From 8070259c3936ee823b758fc1ad1645ae016ba500 Mon Sep 17 00:00:00 2001 From: Apple Date: Fri, 24 Oct 2014 18:09:05 +0000 Subject: [PATCH] objc4-646.tar.gz --- objc.xcodeproj/project.pbxproj | 64 +- runtime/Accessors.subproj/objc-accessors.mm | 9 + runtime/Messengers.subproj/objc-msg-arm.s | 150 +- runtime/Messengers.subproj/objc-msg-arm64.s | 470 ++++ runtime/Messengers.subproj/objc-msg-i386.s | 121 +- .../objc-msg-simulator-i386.s | 217 +- .../objc-msg-simulator-x86_64.s | 1082 ++++++++++ runtime/Messengers.subproj/objc-msg-win32.m | 2 - runtime/Messengers.subproj/objc-msg-x86_64.s | 446 +--- runtime/NSObjCRuntime.h | 1 - runtime/NSObject.h | 39 +- runtime/NSObject.mm | 1101 +++++----- runtime/Object.h | 2 +- runtime/Object.mm | 4 +- runtime/OldClasses.subproj/List.h | 2 +- runtime/Protocol.h | 9 +- runtime/a1a2-blocktramps-arm.s | 669 +----- runtime/a1a2-blocktramps-arm64.s | 134 ++ runtime/a2a3-blocktramps-arm.s | 673 +----- runtime/hashtable2.mm | 4 +- runtime/llvm-DenseMapInfo.h | 20 +- runtime/message.h | 26 +- runtime/objc-abi.h | 27 +- runtime/objc-api.h | 17 +- runtime/objc-auto.mm | 4 +- runtime/objc-block-trampolines.mm | 418 ++-- runtime/objc-cache-old.mm | 10 +- runtime/objc-cache.mm | 336 ++- runtime/objc-class-old.mm | 70 +- runtime/objc-class.mm | 168 +- runtime/objc-config.h | 26 +- runtime/objc-env.h | 3 + runtime/objc-exception.mm | 5 +- runtime/objc-externalref.mm | 2 +- runtime/objc-gdb.h | 33 +- runtime/objc-initialize.mm | 8 +- runtime/objc-internal.h | 89 +- runtime/objc-loadmethod.mm | 10 +- runtime/objc-object.h | 1058 +++++++++ runtime/objc-opt.mm | 85 +- runtime/objc-os.h | 135 +- runtime/objc-os.mm | 44 +- runtime/objc-private.h | 412 +++- runtime/objc-references.mm | 3 +- runtime/objc-runtime-new.h | 694 ++++-- runtime/objc-runtime-new.mm | 1916 ++++++++++++----- runtime/objc-runtime-old.h | 25 +- runtime/objc-runtime-old.mm | 37 +- runtime/objc-runtime.mm | 88 +- runtime/objc-sel-old.mm | 48 +- runtime/objc-sel-set.h | 4 + runtime/objc-sel-set.mm | 7 + runtime/objc-sel-table.s | 14 +- runtime/objc-sel.mm | 23 +- runtime/objc-weak.h | 39 +- runtime/objc-weak.mm | 190 +- runtime/objc.h | 13 +- runtime/runtime.h | 19 +- test/arr-weak.m | 122 +- test/atomicProperty.mm | 26 - test/badCache.m | 2 +- test/badCache2.m | 2 +- test/badTagClass.m | 5 +- test/bigrc.m | 135 ++ test/blocksAsImps.m | 19 +- test/cdtors.mm | 12 +- test/classpair.m | 3 +- test/customrr.m | 10 +- test/designatedinit.m | 27 + test/duplicatedClasses.m | 21 + test/evil-category-def.m | 2 +- test/evil-class-def.m | 171 +- test/forward.m | 48 +- test/forwardDefault.m | 31 + test/forwardDefaultStret.m | 31 + test/getMethod.m | 2 + test/includes.c | 38 + test/load-parallel.m | 5 - test/method_getName.m | 2 + test/msgSend.m | 906 +++++++- test/nonpointerisa.m | 223 ++ test/nscdtors.mm | 6 + test/nsobject.m | 90 +- test/objectCopy.m | 21 + test/protocol.m | 26 +- test/readClassPair.m | 77 + test/rr-autorelease-fast.m | 10 + test/rr-autorelease2.m | 6 + test/runtime.m | 72 +- test/synchronized.m | 1 + test/taggedNSPointers.m | 6 +- test/taggedPointers.m | 57 +- test/tbi.c | 14 + test/test.h | 24 + test/test.pl | 174 +- test/unload.m | 2 +- test/unwind.m | 5 - test/zone.m | 2 +- unexported_symbols | 4 + 99 files changed, 9678 insertions(+), 4087 deletions(-) create mode 100755 runtime/Messengers.subproj/objc-msg-arm64.s create mode 100644 runtime/Messengers.subproj/objc-msg-simulator-x86_64.s create mode 100644 runtime/a1a2-blocktramps-arm64.s create mode 100644 runtime/objc-object.h create mode 100644 test/bigrc.m create mode 100644 test/designatedinit.m create mode 100644 test/duplicatedClasses.m create mode 100644 test/forwardDefault.m create mode 100644 test/forwardDefaultStret.m create mode 100644 test/includes.c create mode 100644 test/nonpointerisa.m create mode 100644 test/nscdtors.mm create mode 100644 test/objectCopy.m create mode 100644 test/readClassPair.m create mode 100644 test/tbi.c diff --git a/objc.xcodeproj/project.pbxproj b/objc.xcodeproj/project.pbxproj index b426969..b69a9ff 100644 --- a/objc.xcodeproj/project.pbxproj +++ b/objc.xcodeproj/project.pbxproj @@ -29,6 +29,7 @@ 834EC0A411614167009B2563 /* objc-abi.h in Headers */ = {isa = PBXBuildFile; fileRef = 834EC0A311614167009B2563 /* objc-abi.h */; settings = {ATTRIBUTES = (Private, ); }; }; 83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; }; 83725F4C14CA5C210014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; }; + 8379996E13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */; }; 8383A3A3122600E9009290B8 /* a1a2-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */; }; 8383A3A4122600E9009290B8 /* a2a3-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */; }; 8383A3AC122600FB009290B8 /* a1a2-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; }; @@ -121,6 +122,10 @@ 83BE02E80FCCB24D00661494 /* objc-file-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E50FCCB24D00661494 /* objc-file-old.h */; }; 83BE02E90FCCB24D00661494 /* objc-file.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E60FCCB24D00661494 /* objc-file.h */; }; 83BE02EA0FCCB24D00661494 /* objc-runtime-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */; }; + 83C9C3391668B50E00F4E544 /* objc-msg-simulator-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */; }; + 83C9C33A1668B56300F4E544 /* objc-msg-simulator-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */; }; + 83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */; }; + 83D49E5013C7C84F0057F1DD /* objc-msg-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */; }; 83E50CDB0FF19E8200D74C19 /* hashtable2.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485B70D6D687300CEA253 /* hashtable2.h */; settings = {ATTRIBUTES = (Public, ); }; }; 83E50CDC0FF19E8200D74C19 /* maptable.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BB0D6D687300CEA253 /* maptable.h */; settings = {ATTRIBUTES = (Private, ); }; }; 83E50CDD0FF19E8200D74C19 /* objc-api.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C80D6D68A200CEA253 /* objc-api.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -205,6 +210,7 @@ 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-sel-old.mm"; path = "runtime/objc-sel-old.mm"; sourceTree = ""; }; 834EC0A311614167009B2563 /* objc-abi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-abi.h"; path = "runtime/objc-abi.h"; sourceTree = ""; }; 83725F4914CA5BFA0014370E /* objc-opt.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-opt.mm"; path = "runtime/objc-opt.mm"; sourceTree = ""; }; + 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-arm64.s"; path = "runtime/a1a2-blocktramps-arm64.s"; sourceTree = ""; }; 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-arm.s"; path = "runtime/a1a2-blocktramps-arm.s"; sourceTree = ""; }; 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a2a3-blocktramps-arm.s"; path = "runtime/a2a3-blocktramps-arm.s"; sourceTree = ""; }; 838485B30D6D682B00CEA253 /* libobjc.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libobjc.order; sourceTree = ""; }; @@ -260,6 +266,8 @@ 83BE02E50FCCB24D00661494 /* objc-file-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-file-old.h"; path = "runtime/objc-file-old.h"; sourceTree = ""; }; 83BE02E60FCCB24D00661494 /* objc-file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-file.h"; path = "runtime/objc-file.h"; sourceTree = ""; }; 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime-old.h"; path = "runtime/objc-runtime-old.h"; sourceTree = ""; }; + 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-simulator-x86_64.s"; path = "runtime/Messengers.subproj/objc-msg-simulator-x86_64.s"; sourceTree = ""; }; + 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-arm64.s"; path = "runtime/Messengers.subproj/objc-msg-arm64.s"; sourceTree = ""; }; 83E50D2A0FF19E8200D74C19 /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; 83E50D2B0FF19E9E00D74C19 /* IndigoSDK.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = IndigoSDK.xcconfig; path = AppleInternal/XcodeConfig/IndigoSDK.xcconfig; sourceTree = DEVELOPER_DIR; }; 83EB007A121C9EC200B92C16 /* objc-sel-table.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-sel-table.s"; path = "runtime/objc-sel-table.s"; sourceTree = ""; }; @@ -361,13 +369,16 @@ 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */, 838485EA0D6D68A200CEA253 /* objc-sync.mm */, 838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */, + 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */, E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */, E8923D9D116AB2820071B552 /* a1a2-blocktramps-x86_64.s */, E8923D9E116AB2820071B552 /* a2a3-blocktramps-i386.s */, E8923D9F116AB2820071B552 /* a2a3-blocktramps-x86_64.s */, 830F2A690D737FB800392440 /* objc-msg-arm.s */, + 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */, 830F2A6A0D737FB800392440 /* objc-msg-i386.s */, 83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */, + 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */, 830F2A720D737FB800392440 /* objc-msg-x86_64.s */, 87BB4E900EC39633005D08E1 /* objc-probes.d */, ); @@ -582,6 +593,7 @@ 83E50CFC0FF19E8200D74C19 /* Sources */, 83E50D240FF19E8200D74C19 /* Frameworks */, 83E50D260FF19E8200D74C19 /* Run Script (symlink) */, + 96895502173DB369006D6747 /* Run Script (RC_HIDE_64) */, ); buildRules = ( ); @@ -601,6 +613,7 @@ D289988505E68E00004EDB86 /* Frameworks */, 830F2AB60D739AB600392440 /* Run Script (markgc) */, 830F2AFA0D73BC5800392440 /* Run Script (symlink) */, + 96BF404516F7DC5300DA41F6 /* Run Script (RC_HIDE_64) */, ); buildRules = ( ); @@ -686,6 +699,34 @@ shellPath = /bin/sh; shellScript = "cd \"${INSTALL_DIR}\"\n/bin/ln -s libobjc.A.dylib libobjc.dylib\n"; }; + 96895502173DB369006D6747 /* Run Script (RC_HIDE_64) */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + ); + name = "Run Script (RC_HIDE_64)"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "cd \"${DSTROOT}\"\nif [ -n \"${RC_HIDE_64}\" ]\nthen\n find . -type f -name \"*.h\" | while read\n do\n unifdef -DOBJC_HIDE_64=1 -o \"$REPLY.tmp\" \"$REPLY\"\n sed 's/OBJC_ARM64_UNAVAILABLE//g' < \"$REPLY.tmp\" > \"$REPLY\"\n rm \"$REPLY.tmp\"\n done\nfi"; + }; + 96BF404516F7DC5300DA41F6 /* Run Script (RC_HIDE_64) */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + ); + name = "Run Script (RC_HIDE_64)"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "cd \"${DSTROOT}\"\nif [ -n \"${RC_HIDE_64}\" ]\nthen\n find . -type f -name \"*.h\" | while read\n do\n unifdef -DOBJC_HIDE_64=1 -o \"$REPLY.tmp\" \"$REPLY\"\n sed 's/OBJC_ARM64_UNAVAILABLE//g' < \"$REPLY.tmp\" > \"$REPLY\"\n rm \"$REPLY.tmp\"\n done\nfi"; + }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ @@ -745,8 +786,10 @@ 8383A3D4122600FB009290B8 /* objc-probes.d in Sources */, 8383A3DC1226291C009290B8 /* objc-externalref.mm in Sources */, 39ABD72612F0B61800D1054C /* objc-weak.mm in Sources */, + 83D49E5013C7C84F0057F1DD /* objc-msg-arm64.s in Sources */, 9672F7EF14D5F488007CEC96 /* NSObject.mm in Sources */, 83725F4C14CA5C210014370E /* objc-opt.mm in Sources */, + 83C9C33A1668B56300F4E544 /* objc-msg-simulator-x86_64.s in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -798,10 +841,13 @@ 8383A3A4122600E9009290B8 /* a2a3-blocktramps-arm.s in Sources */, 399BC72E1224831B007FBDF0 /* objc-externalref.mm in Sources */, 39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */, + 83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */, + 8379996E13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s in Sources */, 9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */, 83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */, 83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */, 834DF8B715993EE1002F2BC9 /* objc-sel-old.mm in Sources */, + 83C9C3391668B50E00F4E544 /* objc-msg-simulator-x86_64.s in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -836,17 +882,22 @@ "$(CONFIGURATION_BUILD_DIR)/usr/local/include/**", ); INSTALL_PATH = /usr/lib; - ORDER_FILE = libobjc.order; + ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order"; OTHER_CFLAGS = ( "-fdollars-in-identifiers", "$(OTHER_CFLAGS)", ); - "OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = "-lc++abi"; + "OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = ( + "-lc++abi", + "-Wl,-segalign,0x4000", + "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000", + ); "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-l_BUILD_objc-simulator_TARGET_INSTEAD"; "OTHER_LDFLAGS[sdk=macosx*]" = ( "-lCrashReporterClient", "-lauto", "-lc++abi", + "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000", ); PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; PRODUCT_NAME = objc.A; @@ -870,17 +921,22 @@ "$(CONFIGURATION_BUILD_DIR)/usr/local/include/**", ); INSTALL_PATH = /usr/lib; - ORDER_FILE = libobjc.order; + ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order"; OTHER_CFLAGS = ( "-fdollars-in-identifiers", "$(OTHER_CFLAGS)", ); - "OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = "-lc++abi"; + "OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = ( + "-lc++abi", + "-Wl,-segalign,0x4000", + "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000", + ); "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-l_BUILD_objc-simulator_TARGET_INSTEAD"; "OTHER_LDFLAGS[sdk=macosx*]" = ( "-lCrashReporterClient", "-lauto", "-lc++abi", + "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000", ); PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; PRODUCT_NAME = objc.A; diff --git a/runtime/Accessors.subproj/objc-accessors.mm b/runtime/Accessors.subproj/objc-accessors.mm index 2e63ece..45d4b08 100644 --- a/runtime/Accessors.subproj/objc-accessors.mm +++ b/runtime/Accessors.subproj/objc-accessors.mm @@ -57,6 +57,10 @@ static spin_lock_t PropertyLocks[1 << GOODPOWER] = { 0 }; #define MUTABLE_COPY 2 id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) { + if (offset == 0) { + return object_getClass(self); + } + // Retain release world id *slot = (id*) ((char*)self + offset); if (!atomic) return *slot; @@ -76,6 +80,11 @@ static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t o static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy) { + if (offset == 0) { + object_setClass(self, newValue); + return; + } + id oldValue; id *slot = (id*) ((char*)self + offset); diff --git a/runtime/Messengers.subproj/objc-msg-arm.s b/runtime/Messengers.subproj/objc-msg-arm.s index d1735d9..629a1c6 100644 --- a/runtime/Messengers.subproj/objc-msg-arm.s +++ b/runtime/Messengers.subproj/objc-msg-arm.s @@ -601,7 +601,7 @@ LMsgSendSuper2StretExit: MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3) mov r12, r0 // r12 = IMP - teq r12, r12 // set eq for nonstret forwarding + movs r9, #0 // r9=0, Z=1 for nonstret forwarding ldmfd sp!, {r0-r3,r7,lr} bx r12 @@ -623,7 +623,7 @@ LMsgSendSuper2StretExit: MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3) mov r12, r0 // r12 = IMP - tst r12, r12 // set ne for stret forwarding (r12!=0) + movs r9, #1 // r9=1, Z=0 for stret forwarding ldmfd sp!, {r0-r3,r7,lr} bx r12 @@ -631,65 +631,19 @@ LMsgSendSuper2StretExit: /******************************************************************** - * - * id _objc_msgForward(id self, - * SEL sel, - * ...); - * struct_type _objc_msgForward_stret (id self, - * SEL sel, - * ...); - * - * Both _objc_msgForward and _objc_msgForward_stret - * send the message to a method having the signature: - * - * - forward:(SEL)sel :(marg_list)args; - * - * The marg_list's layout is: - * d0 <-- args - * d1 - * d2 | increasing address - * d3 v - * d4 - * d5 - * d6 - * d7 - * r0 - * r1 - * r2 - * r3 - * stack args... - * - * typedef struct objc_sendv_margs { - * int a[4]; - * int stackArgs[...]; - * }; - * - ********************************************************************/ - - - .cstring -LUnkSelStr: - .ascii "Does not recognize selector %s\0" - -.private_extern _FwdSel - .data - .align 2 -_FwdSel: - .long 0 - -.private_extern __objc_forward_handler - .data - .align 2 -__objc_forward_handler: - .long 0 - -.private_extern __objc_forward_stret_handler - .data - .align 2 -__objc_forward_stret_handler: - .long 0 - +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + MI_EXTERN(__objc_forward_handler) + MI_EXTERN(__objc_forward_stret_handler) + STATIC_ENTRY _objc_msgForward_impcache // Method cache version @@ -713,43 +667,9 @@ __objc_forward_stret_handler: ENTRY _objc_msgForward // Non-stret version -// check for user-installed forwarding handler - MI_GET_ADDRESS(r12, __objc_forward_handler) + MI_GET_EXTERN(r12, __objc_forward_handler) ldr r12, [r12] - teq r12, #0 - it ne - bxne r12 - -// build marg_list - stmfd sp!, {r0-r3} // push args to marg_list - -// build forward::'s parameter list (self, forward::, original sel, marg_list) - // r0 already is self - mov r2, r1 // original sel - MI_GET_ADDRESS(r1, _FwdSel) // "forward::" - ldr r1, [r1] - mov r3, sp // marg_list - -// check for forwarding of forward:: itself - teq r1, r2 - beq LMsgForwardError // original sel == forward:: - give up - -// push stack frame - str lr, [sp, #-(2*4)]! // save lr and align stack - -// send it - bl _objc_msgSend - -// pop stack frame and return - ldr lr, [sp] - add sp, sp, #(4 + 4 + 4*4) // skip lr, pad, r0..r3 - bx lr - -LMsgForwardError: - // currently r0=self, r1=forward::, r2 = original sel, r3 = marg_list - // call __objc_error(self, format, original sel) - MI_GET_ADDRESS(r1, LUnkSelStr) - MI_CALL_EXTERNAL(___objc_error) + bx r12 END_ENTRY _objc_msgForward @@ -757,43 +677,9 @@ LMsgForwardError: ENTRY _objc_msgForward_stret // Struct-return version -// check for user-installed forwarding handler - MI_GET_ADDRESS(r12, __objc_forward_stret_handler) + MI_GET_EXTERN(r12, __objc_forward_stret_handler) ldr r12, [r12] - teq r12, #0 - it ne - bxne r12 - -// build marg_list - stmfd sp!, {r0-r3} // push args to marg_list - -// build forward::'s parameter list (self, forward::, original sel, marg_list) - mov r0, r1 // self - MI_GET_ADDRESS(r1, _FwdSel) // "forward::" - ldr r1, [r1] - // r2 is already original sel - mov r3, sp // marg_list - -// check for forwarding of forward:: itself - teq r1, r2 - beq LMsgForwardStretError // original sel == forward:: - give up - -// push stack frame - str lr, [sp, #-(2*4)]! // save lr and align stack - -// send it - bl _objc_msgSend - -// pop stack frame and return - ldr lr, [sp] - add sp, sp, #(4 + 4 + 4*4) // skip lr, pad, r0..r3 - bx lr - -LMsgForwardStretError: - // currently r0=self, r1=forward::, r2 = original sel, r3 = marglist - // call __objc_error(self, format, original sel) - MI_GET_ADDRESS(r1, LUnkSelStr) - MI_CALL_EXTERNAL(___objc_error) + bx r12 END_ENTRY _objc_msgForward_stret diff --git a/runtime/Messengers.subproj/objc-msg-arm64.s b/runtime/Messengers.subproj/objc-msg-arm64.s new file mode 100755 index 0000000..3017bb2 --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-arm64.s @@ -0,0 +1,470 @@ +/* + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 2011 Apple Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/******************************************************************** + * + * objc-msg-arm64.s - ARM64 code to support objc messaging + * + ********************************************************************/ + +#ifdef __arm64__ + +#include + + +// _objc_entryPoints and _objc_exitPoints are used by method dispatch +// caching code to figure out whether any threads are actively +// in the cache for dispatching. The labels surround the asm code +// that do cache lookups. The tables are zero-terminated. +.data +.private_extern _objc_entryPoints +_objc_entryPoints: + .quad _cache_getImp + .quad _objc_msgSend + .quad _objc_msgSendSuper + .quad _objc_msgSendSuper2 + .quad 0 + +.data +.private_extern _objc_exitPoints +_objc_exitPoints: + .quad LExit_cache_getImp + .quad LExit_objc_msgSend + .quad LExit_objc_msgSendSuper + .quad LExit_objc_msgSendSuper2 + .quad 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad NIL_EXIT + .text +.endmacro + + +/* objc_super parameter to sendSuper */ +#define RECEIVER 0 +#define CLASS 8 + +/* Selected field offsets in class structure */ +#define SUPERCLASS 8 +#define CACHE 16 + +/* Selected field offsets in isa field */ +#define ISA_MASK 0x00000001fffffff8 + +/* Selected field offsets in method structure */ +#define METHOD_NAME 0 +#define METHOD_TYPES 8 +#define METHOD_IMP 16 + + +/******************************************************************** + * ENTRY functionName + * STATIC_ENTRY functionName + * END_ENTRY functionName + ********************************************************************/ + +.macro ENTRY /* name */ + .text + .align 5 + .globl $0 +$0: +.endmacro + +.macro STATIC_ENTRY /*name*/ + .text + .align 5 + .private_extern $0 +$0: +.endmacro + +.macro END_ENTRY /* name */ +LExit$0: +.endmacro + + +/******************************************************************** + * + * CacheLookup NORMAL|GETIMP + * + * Locate the implementation for a selector in a class method cache. + * + * Takes: + * x1 = selector + * x9 = class to be searched + * + * Kills: + * x10,x11,x12, x16,x17 + * + * On exit: (found) exits CacheLookup + * with x9 = class, x17 = IMP + * (not found) jumps to LCacheMiss + * + ********************************************************************/ + +#define NORMAL 0 +#define GETIMP 1 + +.macro CacheHit + MESSENGER_END_FAST +.if $0 == NORMAL + br x17 // call imp +.else + b LGetImpHit +.endif +.endmacro + +.macro CheckMiss +.if $0 == NORMAL // miss if bucket->cls == 0 + cbz x16, __objc_msgSend_uncached_impcache +.else + cbz x16, LGetImpMiss +.endif +.endmacro + +.macro CacheLookup + // x1 = SEL, x9 = isa + ldp x10, x11, [x9, #CACHE] // x10 = buckets, x11 = occupied|mask + and w12, w1, w11 // x12 = _cmd & mask + add x12, x10, x12, LSL #4 // x12 = buckets + ((_cmd & mask)<<4) + + ldp x16, x17, [x12] // {x16, x17} = *bucket +1: cmp x16, x1 // if (bucket->sel != _cmd) + b.ne 2f // scan more + CacheHit $0 // call or return imp + +2: // not hit: x12 = not-hit bucket + CheckMiss $0 // miss if bucket->cls == 0 + cmp x12, x10 // wrap if bucket == buckets + b.eq 3f + ldp x16, x17, [x12, #-16]! // {x16, x17} = *--bucket + b 1b // loop + +3: // wrap: x12 = first bucket, w11 = mask + add x12, x12, w11, UXTW #4 // x12 = buckets+(mask<<4) + + // clone scanning loop to crash instead of hang when cache is corrupt + + ldp x16, x17, [x12] // {x16, x17} = *bucket +1: cmp x16, x1 // if (bucket->sel != _cmd) + b.ne 2f // scan more + CacheHit $0 // call or return imp + +2: // not hit: x12 = not-hit bucket + CheckMiss $0 // miss if bucket->cls == 0 + cmp x12, x10 // wrap if bucket == buckets + b.eq 3f + ldp x16, x17, [x12, #-16]! // {x16, x17} = *--bucket + b 1b // loop + +3: // double wrap - busted + // x0 = receiver + // x1 = SEL + mov x2, x9 // x2 = isa + +.if $0 == GETIMP + mov x0, #0 + b _cache_getImp_corrupt_cache_error +.else + b _objc_msgSend_corrupt_cache_error +.endif + +.endmacro + + + .data + .align 3 + .globl _objc_debug_taggedpointer_classes +_objc_debug_taggedpointer_classes: + .fill 16, 8, 0 + + ENTRY _objc_msgSend + MESSENGER_START + + cmp x0, #0 // nil check and tagged pointer check + b.le LNilOrTagged // (MSB tagged pointer looks negative) + ldr x13, [x0] // x13 = isa + and x9, x13, #ISA_MASK // x9 = class +LGetIsaDone: + CacheLookup NORMAL // calls imp or objc_msgSend_uncached + +LNilOrTagged: + b.eq LReturnZero // nil check + + // tagged + adrp x10, _objc_debug_taggedpointer_classes@PAGE + add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF + ubfx x11, x0, #60, #4 + ldr x9, [x10, x11, LSL #3] + b LGetIsaDone + +LReturnZero: + // x0 is already zero + mov x1, #0 + movi d0, #0 + movi d1, #0 + movi d2, #0 + movi d3, #0 + MESSENGER_END_NIL + ret + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgSendSuper + MESSENGER_START + + ldr x9, [x0, #CLASS] // load class to search + ldr x0, [x0, #RECEIVER] // load real receiver + CacheLookup NORMAL // calls imp or objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper + + + ENTRY _objc_msgSendSuper2 + MESSENGER_START + + ldr x9, [x0, #CLASS] + ldr x9, [x9, #SUPERCLASS] // load class to search + ldr x0, [x0, #RECEIVER] // load real receiver + CacheLookup NORMAL + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgSend_noarg + b _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + + STATIC_ENTRY __objc_msgSend_uncached_impcache + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band x9 is the class to search + + MESSENGER_START + + // push frame + stp fp, lr, [sp, #-16]! + mov fp, sp + + MESSENGER_END_SLOW + + // save parameter registers: x0..x8, q0..q7 + sub sp, sp, #(10*8 + 8*16) + stp q0, q1, [sp, #(0*16)] + stp q2, q3, [sp, #(2*16)] + stp q4, q5, [sp, #(4*16)] + stp q6, q7, [sp, #(6*16)] + stp x0, x1, [sp, #(8*16+0*8)] + stp x2, x3, [sp, #(8*16+2*8)] + stp x4, x5, [sp, #(8*16+4*8)] + stp x6, x7, [sp, #(8*16+6*8)] + str x8, [sp, #(8*16+8*8)] + + // receiver and selector already in x0 and x1 + mov x2, x9 + bl __class_lookupMethodAndLoadCache3 + + // imp in x0 + mov x17, x0 + + // restore registers and return + ldp q0, q1, [sp, #(0*16)] + ldp q2, q3, [sp, #(2*16)] + ldp q4, q5, [sp, #(4*16)] + ldp q6, q7, [sp, #(6*16)] + ldp x0, x1, [sp, #(8*16+0*8)] + ldp x2, x3, [sp, #(8*16+2*8)] + ldp x4, x5, [sp, #(8*16+4*8)] + ldp x6, x7, [sp, #(8*16+6*8)] + ldr x8, [sp, #(8*16+8*8)] + + mov sp, fp + ldp fp, lr, [sp], #16 + + br x17 + + END_ENTRY __objc_msgSend_uncached_impcache + + +.section __LD,__compact_unwind,regular,debug + .quad _objc_msgSend + .set LUnwind_objc_msgSend, LExit_objc_msgSend-_objc_msgSend + .long LUnwind_objc_msgSend + .long 0x02000000 // no frame, no SP adjustment + .quad 0 // no personality + .quad 0 // no LSDA + +.section __LD,__compact_unwind,regular,debug + .quad _objc_msgSendSuper + .set LUnwind_objc_msgSendSuper, LExit_objc_msgSendSuper-_objc_msgSendSuper + .long LUnwind_objc_msgSendSuper + .long 0x02000000 // no frame, no SP adjustment + .quad 0 // no personality + .quad 0 // no LSDA + +.section __LD,__compact_unwind,regular,debug + .quad _objc_msgSendSuper2 + .set LUnwind_objc_msgSendSuper2, LExit_objc_msgSendSuper2-_objc_msgSendSuper2 + .long LUnwind_objc_msgSendSuper2 + .long 0x02000000 // no frame, no SP adjustment + .quad 0 // no personality + .quad 0 // no LSDA + +.section __LD,__compact_unwind,regular,debug + .quad __objc_msgSend_uncached_impcache + .set LUnwind__objc_msgSend_uncached_impcache, LExit__objc_msgSend_uncached_impcache-__objc_msgSend_uncached_impcache + .long LUnwind__objc_msgSend_uncached_impcache + .long 0x04000000 // frame, no non-volatile registers saved + .quad 0 // no personality + .quad 0 // no LSDA + + + STATIC_ENTRY _cache_getImp + + and x9, x0, #ISA_MASK + CacheLookup GETIMP + +LGetImpHit: + // imp in x17 + // don't return msgSend_uncached + adrp x16, __objc_msgSend_uncached_impcache@PAGE + add x16, x16, __objc_msgSend_uncached_impcache@PAGEOFF + cmp x16, x17 + csel x0, x17, xzr, ne // if imp!=uncached then imp else 0 + ret + +LGetImpMiss: + mov x0, #0 + ret + + END_ENTRY _cache_getImp + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward is the externally-callable +* function returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + STATIC_ENTRY __objc_msgForward_impcache + + MESSENGER_START + nop + MESSENGER_END_SLOW + + // No stret specialization. + b __objc_msgForward + + END_ENTRY __objc_msgForward_impcache + + + ENTRY __objc_msgForward + + adrp x17, __objc_forward_handler@PAGE + ldr x17, [x17, __objc_forward_handler@PAGEOFF] + br x17 + + END_ENTRY __objc_msgForward + + + ENTRY _objc_msgSend_debug + b _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + b _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + + ENTRY _method_invoke + // x1 is method triplet instead of SEL + ldr x17, [x1, #METHOD_IMP] + ldr x1, [x1, #METHOD_NAME] + br x17 + END_ENTRY _method_invoke + + + STATIC_ENTRY __objc_ignored_method + + // self is already in x0 + ret + + END_ENTRY __objc_ignored_method + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-i386.s b/runtime/Messengers.subproj/objc-msg-i386.s index b5bf631..d0541b4 100644 --- a/runtime/Messengers.subproj/objc-msg-i386.s +++ b/runtime/Messengers.subproj/objc-msg-i386.s @@ -42,12 +42,6 @@ ********************************************************************/ .data -// Substitute receiver for messages sent to nil (usually also nil) -// id _objc_nilReceiver -.align 4 -.private_extern __objc_nilReceiver -__objc_nilReceiver: - .long 0 // _objc_entryPoints and _objc_exitPoints are used by objc // to get the critical regions for which method caches @@ -193,74 +187,6 @@ _gdb_objc_messenger_breakpoints: #endif -////////////////////////////////////////////////////////////////////// -// -// LOAD_STATIC_WORD targetReg, symbolName, LOCAL_SYMBOL | EXTERNAL_SYMBOL -// -// Load the value of the named static data word. -// -// Takes: targetReg - the register, other than r0, to load -// symbolName - the name of the symbol -// LOCAL_SYMBOL - symbol name used as-is -// EXTERNAL_SYMBOL - symbol name gets nonlazy treatment -// -// Eats: edx and targetReg -////////////////////////////////////////////////////////////////////// - -// Values to specify whether the symbol is plain or nonlazy -LOCAL_SYMBOL = 0 -EXTERNAL_SYMBOL = 1 - -.macro LOAD_STATIC_WORD - -#if defined(__DYNAMIC__) - call 1f -1: popl %edx -.if $2 == EXTERNAL_SYMBOL - movl L$1-1b(%edx),$0 - movl 0($0),$0 -.elseif $2 == LOCAL_SYMBOL - movl $1-1b(%edx),$0 -.else - !!! Unknown symbol type !!! -.endif -#else - movl $1,$0 -#endif - -.endmacro - -////////////////////////////////////////////////////////////////////// -// -// LEA_STATIC_DATA targetReg, symbolName, LOCAL_SYMBOL | EXTERNAL_SYMBOL -// -// Load the address of the named static data. -// -// Takes: targetReg - the register, other than edx, to load -// symbolName - the name of the symbol -// LOCAL_SYMBOL - symbol is local to this module -// EXTERNAL_SYMBOL - symbol is imported from another module -// -// Eats: edx and targetReg -////////////////////////////////////////////////////////////////////// - -.macro LEA_STATIC_DATA -#if defined(__DYNAMIC__) - call 1f -1: popl %edx -.if $2 == EXTERNAL_SYMBOL - movl L$1-1b(%edx),$0 -.elseif $2 == LOCAL_SYMBOL - leal $1-1b(%edx),$0 -.else - !!! Unknown symbol type !!! -.endif -#else - leal $1,$0 -#endif - -.endmacro - ////////////////////////////////////////////////////////////////////// // // ENTRY functionName @@ -686,14 +612,6 @@ LMsgSendCacheMiss: // message sent to nil: redirect to nil receiver, if any LMsgSendNilSelf: - call 1f // load new receiver -1: popl %edx - movl __objc_nilReceiver-1b(%edx),%eax - testl %eax, %eax // return nil if no new receiver - je LMsgSendReturnZero - movl %eax, self(%esp) // send to new receiver - jmp LMsgSendReceiverOk // receiver must be in %eax -LMsgSendReturnZero: // %eax is already zero movl $0,%edx LMsgSendDone: @@ -844,14 +762,7 @@ LMsgSendFpretCacheMiss: // message sent to nil: redirect to nil receiver, if any LMsgSendFpretNilSelf: - call 1f // load new receiver -1: popl %edx - movl __objc_nilReceiver-1b(%edx),%eax - testl %eax, %eax // return zero if no new receiver - je LMsgSendFpretReturnZero - movl %eax, self(%esp) // send to new receiver - jmp LMsgSendFpretReceiverOk // receiver must be in %eax -LMsgSendFpretReturnZero: + // %eax is already zero fldz LMsgSendFpretDone: MESSENGER_END_NIL @@ -954,14 +865,6 @@ LMsgSendStretCacheMiss: // message sent to nil: redirect to nil receiver, if any LMsgSendStretNilSelf: - call 1f // load new receiver -1: popl %edx - movl __objc_nilReceiver-1b(%edx),%eax - testl %eax, %eax // return nil if no new receiver - je LMsgSendStretDone - movl %eax, self_stret(%esp) // send to new receiver - jmp LMsgSendStretReceiverOk // receiver must be in %eax -LMsgSendStretDone: MESSENGER_END_NIL ret $4 // pop struct return address (#2995932) @@ -1095,15 +998,13 @@ _FwdSel: .long 0 .align 2 LUnkSelStr: .ascii "Does not recognize selector %s (while forwarding %s)\0" - .data - .align 2 - .private_extern __objc_forward_handler -__objc_forward_handler: .long 0 - - .data - .align 2 - .private_extern __objc_forward_stret_handler -__objc_forward_stret_handler: .long 0 + .non_lazy_symbol_pointer +L_forward_handler: + .indirect_symbol __objc_forward_handler + .long 0 +L_forward_stret_handler: + .indirect_symbol __objc_forward_stret_handler + .long 0 STATIC_ENTRY __objc_msgForward_impcache // Method cache version @@ -1132,7 +1033,8 @@ L__objc_msgForward$pic_base: popl %edx // Call user handler, if any - movl __objc_forward_handler-L__objc_msgForward$pic_base(%edx),%ecx + movl L_forward_handler-L__objc_msgForward$pic_base(%edx),%ecx + movl (%ecx), %ecx testl %ecx, %ecx // if not NULL je 1f // skip to default handler jmp *%ecx // call __objc_forward_handler @@ -1186,7 +1088,8 @@ L__objc_msgForwardStret$pic_base: popl %edx // Call user handler, if any - movl __objc_forward_stret_handler-L__objc_msgForwardStret$pic_base(%edx), %ecx + movl L_forward_stret_handler-L__objc_msgForwardStret$pic_base(%edx), %ecx + movl (%ecx), %ecx testl %ecx, %ecx // if not NULL je 1f // skip to default handler jmp *%ecx // call __objc_forward_stret_handler diff --git a/runtime/Messengers.subproj/objc-msg-simulator-i386.s b/runtime/Messengers.subproj/objc-msg-simulator-i386.s index 431a483..36f1f38 100644 --- a/runtime/Messengers.subproj/objc-msg-simulator-i386.s +++ b/runtime/Messengers.subproj/objc-msg-simulator-i386.s @@ -195,6 +195,7 @@ _gdb_objc_messenger_breakpoints: .globl $0 .align 2, 0x90 $0: + .cfi_startproc .endmacro .macro STATIC_ENTRY @@ -202,6 +203,7 @@ $0: .private_extern $0 .align 4, 0x90 $0: + .cfi_startproc .endmacro ////////////////////////////////////////////////////////////////////// @@ -215,6 +217,7 @@ $0: ////////////////////////////////////////////////////////////////////// .macro END_ENTRY + .cfi_endproc .endmacro @@ -293,11 +296,12 @@ $0: 1: // loop - cmpl $$1, (%eax) - je 3f // if (bucket->sel == 1) cache wrap - jb LCacheMiss_f // if (bucket->sel == 0) cache miss + cmpl $$0, (%eax) + je LCacheMiss_f // if (bucket->sel == 0) cache miss + cmpl 8(%edx), %eax + je 3f // if (bucket = cache->buckets) wrap - addl $$8, %eax // bucket++ + subl $$8, %eax // bucket-- 2: cmpl (%eax), %ecx // if (bucket->sel != sel) jne 1b // scan more @@ -306,10 +310,52 @@ $0: 3: // wrap - // eax is last bucket, bucket->imp is first bucket - movl 4(%eax), %eax - jmp 2b + movzwl 12(%edx), %eax // eax = mask + shll $$3, %eax // eax = offset = mask * 8 + addl 8(%edx), %eax // eax = bucket = cache->buckets+offset + jmp 2f + // clone scanning loop to crash instead of hang when cache is corrupt + +1: + // loop + cmpl $$0, (%eax) + je LCacheMiss_f // if (bucket->sel == 0) cache miss + cmpl 8(%edx), %eax + je 3f // if (bucket = cache->buckets) wrap + + subl $$8, %eax // bucket-- +2: + cmpl (%eax), %ecx // if (bucket->sel != sel) + jne 1b // scan more + // The `jne` above sets flags for CacheHit + CacheHit $0 // call or return imp + +3: + // double wrap - busted + + pushl %ebp + movl %esp, %ebp + pushl $$0 + pushl $$0 + pushl $$0 // stack alignment + pushl %edx // isa + pushl %ecx // SEL +.if $0 == STRET || $0 == SUPER_STRET + movl self_stret+4(%ebp), %ecx +.elseif $0 == GETIMP + movl $$0, %ecx +.else + movl self+4(%ebp), %ecx +.endif + pushl %ecx // receiver + +.if $0 == GETIMP + call _cache_getImp_corrupt_cache_error +.else + call _objc_msgSend_corrupt_cache_error +.endif + .endmacro @@ -330,7 +376,12 @@ $0: .macro MethodTableLookup MESSENGER_END_SLOW pushl %ebp + .cfi_def_cfa_offset 8 + .cfi_offset ebp, -8 + movl %esp, %ebp + .cfi_def_cfa_register ebp + sub $$12, %esp // align stack pushl %edx // class @@ -341,6 +392,8 @@ $0: // imp in eax leave + .cfi_def_cfa esp, 4 + .cfi_same_value ebp .if $0 == SUPER // replace "super" arg with "receiver" @@ -403,7 +456,7 @@ LNilTestSlow: ret .elseif $0 == STRET MESSENGER_END_NIL - ret $4 + ret $$4 .elseif $0 == NORMAL // eax is already zero xorl %edx, %edx @@ -422,8 +475,7 @@ LNilTestSlow: * If not found, returns NULL. ********************************************************************/ - .private_extern _cache_getImp - ENTRY _cache_getImp + STATIC_ENTRY _cache_getImp // load the class and selector movl selector(%esp), %ecx @@ -695,36 +747,27 @@ LMsgSendSuper2StretExit: END_ENTRY __objc_msgSend_stret_uncached + /******************************************************************** - * - * id _objc_msgForward(id self, SEL _cmd,...); - * - ********************************************************************/ - -// _FwdSel is @selector(forward::), set up in map_images(). -// ALWAYS dereference _FwdSel to get to "forward::" !! - .data - .align 2 - .private_extern _FwdSel -_FwdSel: .long 0 - - - .cstring - .align 2 -LUnkSelStr: .ascii "Does not recognize selector %s\0" - - .data - .align 2 - .private_extern __objc_forward_handler -__objc_forward_handler: .long 0 +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ - .data - .align 2 - .private_extern __objc_forward_stret_handler -__objc_forward_stret_handler: .long 0 + .non_lazy_symbol_pointer +L_forward_handler: + .indirect_symbol __objc_forward_handler + .long 0 +L_forward_stret_handler: + .indirect_symbol __objc_forward_stret_handler + .long 0 - ENTRY __objc_msgForward_impcache - .private_extern __objc_msgForward_impcache + STATIC_ENTRY __objc_msgForward_impcache // Method cache version // THIS IS NOT A CALLABLE C FUNCTION @@ -743,52 +786,10 @@ __objc_forward_stret_handler: .long 0 ENTRY __objc_msgForward // Non-struct return version - // Get PIC base into %edx - call L__objc_msgForward$pic_base -L__objc_msgForward$pic_base: - popl %edx - - // Call user handler, if any - movl __objc_forward_handler-L__objc_msgForward$pic_base(%edx),%ecx - testl %ecx, %ecx // if not NULL - je 1f // skip to default handler - jmp *%ecx // call __objc_forward_handler -1: - // No user handler - // Push stack frame - pushl %ebp - movl %esp, %ebp - - // Die if forwarding "forward::" - movl (selector+4)(%ebp), %eax - movl _FwdSel-L__objc_msgForward$pic_base(%edx),%ecx - cmpl %ecx, %eax - je LMsgForwardError - - // Call [receiver forward:sel :margs] - subl $8, %esp // 16-byte align the stack - leal (self+4)(%ebp), %ecx - pushl %ecx // &margs - pushl %eax // sel - movl _FwdSel-L__objc_msgForward$pic_base(%edx),%ecx - pushl %ecx // forward:: - pushl (self+4)(%ebp) // receiver - - call _objc_msgSend - - movl %ebp, %esp - popl %ebp - ret - -LMsgForwardError: - // Call __objc_error(receiver, "unknown selector %s", "forward::") - subl $12, %esp // 16-byte align the stack - movl _FwdSel-L__objc_msgForward$pic_base(%edx),%eax - pushl %eax - leal LUnkSelStr-L__objc_msgForward$pic_base(%edx),%eax - pushl %eax - pushl (self+4)(%ebp) - call ___objc_error // never returns + call 1f +1: popl %edx + movl L_forward_handler-1b(%edx), %edx + jmp *(%edx) END_ENTRY __objc_msgForward @@ -796,52 +797,10 @@ LMsgForwardError: ENTRY __objc_msgForward_stret // Struct return version - // Get PIC base into %edx - call L__objc_msgForwardStret$pic_base -L__objc_msgForwardStret$pic_base: - popl %edx - - // Call user handler, if any - movl __objc_forward_stret_handler-L__objc_msgForwardStret$pic_base(%edx), %ecx - testl %ecx, %ecx // if not NULL - je 1f // skip to default handler - jmp *%ecx // call __objc_forward_stret_handler -1: - // No user handler - // Push stack frame - pushl %ebp - movl %esp, %ebp - - // Die if forwarding "forward::" - movl selector_stret+4(%ebp), %eax - movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx), %ecx - cmpl %ecx, %eax - je LMsgForwardStretError - - // Call [receiver forward:sel :margs] - subl $8, %esp // 16-byte align the stack - leal (self_stret+4)(%ebp), %ecx - pushl %ecx // &margs - pushl %eax // sel - movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx),%ecx - pushl %ecx // forward:: - pushl (self_stret+4)(%ebp) // receiver - - call _objc_msgSend - - movl %ebp, %esp - popl %ebp - ret $4 // pop struct return address (#2995932) - -LMsgForwardStretError: - // Call __objc_error(receiver, "unknown selector %s", "forward::") - subl $12, %esp // 16-byte align the stack - leal _FwdSel-L__objc_msgForwardStret$pic_base(%edx),%eax - pushl %eax - leal LUnkSelStr-L__objc_msgForwardStret$pic_base(%edx),%eax - pushl %eax - pushl (self_stret+4)(%ebp) - call ___objc_error // never returns + call 1f +1: popl %edx + movl L_forward_stret_handler-1b(%edx), %edx + jmp *(%edx) END_ENTRY __objc_msgForward_stret diff --git a/runtime/Messengers.subproj/objc-msg-simulator-x86_64.s b/runtime/Messengers.subproj/objc-msg-simulator-x86_64.s new file mode 100644 index 0000000..cbe2b51 --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-simulator-x86_64.s @@ -0,0 +1,1082 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#if __x86_64__ && TARGET_IPHONE_SIMULATOR + +/******************************************************************** + ******************************************************************** + ** + ** objc-msg-x86_64.s - x86-64 code to support objc messaging. + ** + ******************************************************************** + ********************************************************************/ + +.data + +// _objc_entryPoints and _objc_exitPoints are used by objc +// to get the critical regions for which method caches +// cannot be garbage collected. + +.private_extern _objc_entryPoints +_objc_entryPoints: + .quad _cache_getImp + .quad _objc_msgSend + .quad _objc_msgSend_fpret + .quad _objc_msgSend_fp2ret + .quad _objc_msgSend_stret + .quad _objc_msgSendSuper + .quad _objc_msgSendSuper_stret + .quad _objc_msgSendSuper2 + .quad _objc_msgSendSuper2_stret + .quad 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .quad LExit_cache_getImp + .quad LExit_objc_msgSend + .quad LExit_objc_msgSend_fpret + .quad LExit_objc_msgSend_fp2ret + .quad LExit_objc_msgSend_stret + .quad LExit_objc_msgSendSuper + .quad LExit_objc_msgSendSuper_stret + .quad LExit_objc_msgSendSuper2 + .quad LExit_objc_msgSendSuper2_stret + .quad 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad NIL_EXIT + .text +.endmacro + + +/******************************************************************** + * Recommended multi-byte NOP instructions + * (Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2B) + ********************************************************************/ +#define nop1 .byte 0x90 +#define nop2 .byte 0x66,0x90 +#define nop3 .byte 0x0F,0x1F,0x00 +#define nop4 .byte 0x0F,0x1F,0x40,0x00 +#define nop5 .byte 0x0F,0x1F,0x44,0x00,0x00 +#define nop6 .byte 0x66,0x0F,0x1F,0x44,0x00,0x00 +#define nop7 .byte 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00 +#define nop8 .byte 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00 +#define nop9 .byte 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00 + + +/******************************************************************** + * Names for parameter registers. + ********************************************************************/ + +#define a1 rdi +#define a1d edi +#define a1b dil +#define a2 rsi +#define a2d esi +#define a2b sil +#define a3 rdx +#define a3d edx +#define a4 rcx +#define a4d ecx +#define a5 r8 +#define a5d r8d +#define a6 r9 +#define a6d r9d + + +/******************************************************************** + * Names for relative labels + * DO NOT USE THESE LABELS ELSEWHERE + * Reserved labels: 6: 7: 8: 9: + ********************************************************************/ +#define LCacheMiss 6 +#define LCacheMiss_f 6f +#define LCacheMiss_b 6b +#define LGetIsaDone 7 +#define LGetIsaDone_f 7f +#define LGetIsaDone_b 7b +#define LNilOrTagged 8 +#define LNilOrTagged_f 8f +#define LNilOrTagged_b 8b +#define LNil 9 +#define LNil_f 9f +#define LNil_b 9b + +/******************************************************************** + * Macro parameters + ********************************************************************/ + +#define NORMAL 0 +#define FPRET 1 +#define FP2RET 2 +#define GETIMP 3 +#define STRET 4 +#define SUPER 5 +#define SUPER_STRET 6 +#define SUPER2 7 +#define SUPER2_STRET 8 + + +/******************************************************************** + * + * Structure definitions. + * + ********************************************************************/ + +// objc_super parameter to sendSuper +#define receiver 0 +#define class 8 + +// Selected field offsets in class structure +// #define isa 0 USE GetIsa INSTEAD + +// Method descriptor +#define method_name 0 +#define method_imp 16 + + +////////////////////////////////////////////////////////////////////// +// +// ENTRY functionName +// +// Assembly directives to begin an exported function. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro ENTRY + .text + .globl $0 + .align 6, 0x90 +$0: + .cfi_startproc +.endmacro + +.macro STATIC_ENTRY + .text + .private_extern $0 + .align 2, 0x90 +$0: + .cfi_startproc +.endmacro + +////////////////////////////////////////////////////////////////////// +// +// END_ENTRY functionName +// +// Assembly directives to end an exported function. Just a placeholder, +// a close-parenthesis for ENTRY, until it is needed for something. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro END_ENTRY + .cfi_endproc +LExit$0: +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// SaveRegisters +// +// Pushes a stack frame and saves all registers that might contain +// parameter values. +// +// On entry: +// stack = ret +// +// On exit: +// %rsp is 16-byte aligned +// +///////////////////////////////////////////////////////////////////// + +.macro SaveRegisters + + push %rbp + .cfi_def_cfa_offset 16 + .cfi_offset rbp, -16 + + mov %rsp, %rbp + .cfi_def_cfa_register rbp + + sub $$0x80+8, %rsp // +8 for alignment + + movdqa %xmm0, -0x80(%rbp) + push %rax // might be xmm parameter count + movdqa %xmm1, -0x70(%rbp) + push %a1 + movdqa %xmm2, -0x60(%rbp) + push %a2 + movdqa %xmm3, -0x50(%rbp) + push %a3 + movdqa %xmm4, -0x40(%rbp) + push %a4 + movdqa %xmm5, -0x30(%rbp) + push %a5 + movdqa %xmm6, -0x20(%rbp) + push %a6 + movdqa %xmm7, -0x10(%rbp) + +.endmacro + +///////////////////////////////////////////////////////////////////// +// +// RestoreRegisters +// +// Pops a stack frame pushed by SaveRegisters +// +// On entry: +// %rbp unchanged since SaveRegisters +// +// On exit: +// stack = ret +// +///////////////////////////////////////////////////////////////////// + +.macro RestoreRegisters + + movdqa -0x80(%rbp), %xmm0 + pop %a6 + movdqa -0x70(%rbp), %xmm1 + pop %a5 + movdqa -0x60(%rbp), %xmm2 + pop %a4 + movdqa -0x50(%rbp), %xmm3 + pop %a3 + movdqa -0x40(%rbp), %xmm4 + pop %a2 + movdqa -0x30(%rbp), %xmm5 + pop %a1 + movdqa -0x20(%rbp), %xmm6 + pop %rax + movdqa -0x10(%rbp), %xmm7 + + leave + .cfi_def_cfa rsp, 8 + .cfi_same_value rbp + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// CacheLookup return-type, caller +// +// Locate the implementation for a class in a selector's method cache. +// +// Takes: +// $0 = NORMAL, FPRET, FP2RET, STRET, SUPER, SUPER_STRET, SUPER2, SUPER2_STRET, GETIMP +// a2 or a3 (STRET) = selector a.k.a. cache +// r11 = class to search +// +// On exit: r10 clobbered +// (found) calls or returns IMP, eq/ne/r11 set for forwarding +// (not found) jumps to LCacheMiss, class still in r11 +// +///////////////////////////////////////////////////////////////////// + +.macro CacheHit + + // CacheHit must always be preceded by a not-taken `jne` instruction + // in order to set the correct flags for _objc_msgForward_impcache. + + // r10 = found bucket + +.if $0 == GETIMP + movq 8(%r10), %rax // return imp + leaq __objc_msgSend_uncached_impcache(%rip), %r11 + cmpq %rax, %r11 + jne 4f + xorl %eax, %eax // don't return msgSend_uncached +4: ret +.elseif $0 == NORMAL || $0 == FPRET || $0 == FP2RET + // eq already set for forwarding by `jne` + MESSENGER_END_FAST + jmp *8(%r10) // call imp + +.elseif $0 == SUPER + movq receiver(%a1), %a1 // load real receiver + cmp %r10, %r10 // set eq for non-stret forwarding + MESSENGER_END_FAST + jmp *8(%r10) // call imp + +.elseif $0 == SUPER2 + movq receiver(%a1), %a1 // load real receiver + cmp %r10, %r10 // set eq for non-stret forwarding + MESSENGER_END_FAST + jmp *8(%r10) // call imp + +.elseif $0 == STRET + test %r10, %r10 // set ne for stret forwarding + MESSENGER_END_FAST + jmp *8(%r10) // call imp + +.elseif $0 == SUPER_STRET + movq receiver(%a2), %a2 // load real receiver + test %r10, %r10 // set ne for stret forwarding + MESSENGER_END_FAST + jmp *8(%r10) // call imp + +.elseif $0 == SUPER2_STRET + movq receiver(%a2), %a2 // load real receiver + test %r10, %r10 // set ne for stret forwarding + MESSENGER_END_FAST + jmp *8(%r10) // call imp +.else +.abort oops +.endif + +.endmacro + + +.macro CacheLookup +.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET + movq %a2, %r10 // r10 = _cmd +.else + movq %a3, %r10 // r10 = _cmd +.endif + andl 24(%r11), %r10d // r10 = _cmd & class->cache.mask + shlq $$4, %r10 // r10 = offset = (_cmd & mask)<<4 + addq 16(%r11), %r10 // r10 = class->cache.buckets + offset + +.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET + cmpq (%r10), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r10), %a3 // if (bucket->sel != _cmd) +.endif + jne 1f // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0 // call or return imp + +1: + // loop + cmpq $$0, (%r10) + je LCacheMiss_f // if (bucket->sel == 0) cache miss + cmpq 16(%r11), %r10 + je 3f // if (bucket == cache->buckets) wrap + + subq $$16, %r10 // bucket-- +2: +.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET + cmpq (%r10), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r10), %a3 // if (bucket->sel != _cmd) +.endif + jne 1b // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0 // call or return imp + +3: + // wrap + movl 24(%r11), %r10d // r10 = mask a.k.a. last bucket index + shlq $$4, %r10 // r10 = offset = mask<<4 + addq 16(%r11), %r10 // r10 = &cache->buckets[mask] + jmp 2f + + // clone scanning loop to crash instead of hang when cache is corrupt + +1: + // loop + cmpq $$0, (%r10) + je LCacheMiss_f // if (bucket->sel == 0) cache miss + cmpq 16(%r11), %r10 + je 3f // if (bucket == cache->buckets) wrap + + subq $$16, %r10 // bucket-- +2: +.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET + cmpq (%r10), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r10), %a3 // if (bucket->sel != _cmd) +.endif + jne 1b // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0 // call or return imp + +3: + // double wrap - busted +.if $0 == STRET || $0 == SUPER_STRET || $0 == SUPER2_STRET + movq %a2, %a1 + movq %a3, %a2 +.elseif $0 == GETIMP + movq $$0, %a1 +.endif + // a1 = receiver + // a2 = SEL + movq %r11, %a3 // a3 = isa +.if $0 == GETIMP + jmp _cache_getImp_corrupt_cache_error +.else + jmp _objc_msgSend_corrupt_cache_error +.endif + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// MethodTableLookup classRegister, selectorRegister +// +// Takes: $0 = class to search (a1 or a2 or r10 ONLY) +// $1 = selector to search for (a2 or a3 ONLY) +// r11 = class to search +// +// On exit: imp in %r11 +// +///////////////////////////////////////////////////////////////////// +.macro MethodTableLookup + + MESSENGER_END_SLOW + + SaveRegisters + + // _class_lookupMethodAndLoadCache3(receiver, selector, class) + + movq $0, %a1 + movq $1, %a2 + movq %r11, %a3 + call __class_lookupMethodAndLoadCache3 + + // IMP is now in %rax + movq %rax, %r11 + + RestoreRegisters + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// GetIsaCheckNil return-type +// GetIsaSupport return-type +// +// Sets r11 = receiver->isa. +// Looks up the real class if receiver is a tagged pointer object. +// Returns zero if obj is nil. +// +// Takes: $0 = NORMAL or FPRET or FP2RET or STRET +// a1 or a2 (STRET) = receiver +// +// On exit: r11 = receiver->isa +// r10 is clobbered +// +///////////////////////////////////////////////////////////////////// + +.macro GetIsaCheckNil +.if $0 == SUPER || $0 == SUPER_STRET + error super dispatch does not test for nil +.endif + +.if $0 != STRET + testq %a1, %a1 +.else + testq %a2, %a2 +.endif + jle LNilOrTagged_f // MSB tagged pointer looks negative + +.if $0 != STRET + movq (%a1), %r11 // r11 = isa +.else + movq (%a2), %r11 // r11 = isa +.endif + +LGetIsaDone: +.endmacro + + +.macro GetIsaSupport + .align 3 +LNilOrTagged: + jz LNil_f // flags set by NilOrTaggedTest + + // tagged + + leaq _objc_debug_taggedpointer_classes(%rip), %r11 +.if $0 != STRET + movq %a1, %r10 +.else + movq %a2, %r10 +.endif + shrq $$60, %r10 + movq (%r11, %r10, 8), %r11 // read isa from table + jmp LGetIsaDone_b + +LNil: + // nil + +.if $0 == FPRET + fldz +.elseif $0 == FP2RET + fldz + fldz +.endif +.if $0 == STRET + movq %rdi, %rax +.else + xorl %eax, %eax + xorl %edx, %edx + xorps %xmm0, %xmm0 + xorps %xmm1, %xmm1 +.endif + MESSENGER_END_NIL + ret +.endmacro + + +/******************************************************************** + * IMP cache_getImp(Class cls, SEL sel) + * + * On entry: a1 = class whose cache is to be searched + * a2 = selector to search for + * + * If found, returns method implementation. + * If not found, returns NULL. + ********************************************************************/ + + STATIC_ENTRY _cache_getImp + +// do lookup + movq %a1, %r11 // move class to r11 for CacheLookup + CacheLookup GETIMP // returns IMP on success + +LCacheMiss: +// cache miss, return nil + xorl %eax, %eax + ret + +LGetImpExit: + END_ENTRY _cache_getImp + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd,...); + * + ********************************************************************/ + + .data + .align 3 + .globl _objc_debug_taggedpointer_classes +_objc_debug_taggedpointer_classes: + .fill 16, 8, 0 + + ENTRY _objc_msgSend + MESSENGER_START + + GetIsaCheckNil NORMAL // r11 = self->isa, or return zero + CacheLookup NORMAL // calls IMP on success + + GetIsaSupport NORMAL + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r11 + MethodTableLookup %a1, %a2 // r11 = IMP + cmp %r11, %r11 // set eq (nonstret) for forwarding + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgSend_fixup + int3 + END_ENTRY _objc_msgSend_fixup + + + STATIC_ENTRY _objc_msgSend_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend + END_ENTRY _objc_msgSend_fixedup + + +/******************************************************************** + * + * id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + ********************************************************************/ + + ENTRY _objc_msgSendSuper + MESSENGER_START + +// search the cache (objc_super in %a1) + movq class(%a1), %r11 // class = objc_super->class + CacheLookup SUPER // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in r11 + movq receiver(%a1), %r10 + MethodTableLookup %r10, %a2 // r11 = IMP + movq receiver(%a1), %a1 // load real receiver + cmp %r11, %r11 // set eq (nonstret) for forwarding + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSendSuper + + +/******************************************************************** + * id objc_msgSendSuper2 + ********************************************************************/ + + ENTRY _objc_msgSendSuper2 + MESSENGER_START + + // objc_super->class is superclass of class to search + +// search the cache (objc_super in %a1) + movq class(%a1), %r11 // cls = objc_super->class + movq 8(%r11), %r11 // cls = class->superclass + CacheLookup SUPER2 // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r11 + movq receiver(%a1), %r10 + MethodTableLookup %r10, %a2 // r11 = IMP + movq receiver(%a1), %a1 // load real receiver + cmp %r11, %r11 // set eq (nonstret) for forwarding + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgSendSuper2_fixup + int3 + END_ENTRY _objc_msgSendSuper2_fixup + + + STATIC_ENTRY _objc_msgSendSuper2_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_fixedup + + +/******************************************************************** + * + * double objc_msgSend_fpret(id self, SEL _cmd,...); + * Used for `long double` return only. `float` and `double` use objc_msgSend. + * + ********************************************************************/ + + ENTRY _objc_msgSend_fpret + MESSENGER_START + + GetIsaCheckNil FPRET // r11 = self->isa, or return zero + CacheLookup FPRET // calls IMP on success + + GetIsaSupport FPRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r11 + MethodTableLookup %a1, %a2 // r11 = IMP + cmp %r11, %r11 // set eq (nonstret) for forwarding + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSend_fpret + + + ENTRY _objc_msgSend_fpret_fixup + int3 + END_ENTRY _objc_msgSend_fpret_fixup + + + STATIC_ENTRY _objc_msgSend_fpret_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_fixedup + + +/******************************************************************** + * + * double objc_msgSend_fp2ret(id self, SEL _cmd,...); + * Used for `complex long double` return only. + * + ********************************************************************/ + + ENTRY _objc_msgSend_fp2ret + MESSENGER_START + + GetIsaCheckNil FP2RET // r11 = self->isa, or return zero + CacheLookup FP2RET // calls IMP on success + + GetIsaSupport FP2RET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r11 + MethodTableLookup %a1, %a2 // r11 = IMP + cmp %r11, %r11 // set eq (nonstret) for forwarding + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSend_fp2ret + + + ENTRY _objc_msgSend_fp2ret_fixup + int3 + END_ENTRY _objc_msgSend_fp2ret_fixup + + + STATIC_ENTRY _objc_msgSend_fp2ret_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend_fp2ret + END_ENTRY _objc_msgSend_fp2ret_fixedup + + +/******************************************************************** + * + * void objc_msgSend_stret(void *st_addr, id self, SEL _cmd, ...); + * + * objc_msgSend_stret is the struct-return form of msgSend. + * The ABI calls for %a1 to be used as the address of the structure + * being returned, with the parameters in the succeeding locations. + * + * On entry: %a1 is the address where the structure is returned, + * %a2 is the message receiver, + * %a3 is the selector + ********************************************************************/ + + ENTRY _objc_msgSend_stret + MESSENGER_START + + GetIsaCheckNil STRET // r11 = self->isa, or return zero + CacheLookup STRET // calls IMP on success + + GetIsaSupport STRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r11 + MethodTableLookup %a2, %a3 // r11 = IMP + test %r11, %r11 // set ne (stret) for forward; r11!=0 + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSend_stret + + + ENTRY _objc_msgSend_stret_fixup + int3 + END_ENTRY _objc_msgSend_stret_fixup + + + STATIC_ENTRY _objc_msgSend_stret_fixedup + // Load _cmd from the message_ref + movq 8(%a3), %a3 + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_fixedup + + +/******************************************************************** + * + * void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + * + * objc_msgSendSuper_stret is the struct-return form of msgSendSuper. + * The ABI calls for (sp+4) to be used as the address of the structure + * being returned, with the parameters in the succeeding registers. + * + * On entry: %a1 is the address where the structure is returned, + * %a2 is the address of the objc_super structure, + * %a3 is the selector + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper_stret + MESSENGER_START + +// search the cache (objc_super in %a2) + movq class(%a2), %r11 // class = objc_super->class + CacheLookup SUPER_STRET // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in r11 + movq receiver(%a2), %r10 + MethodTableLookup %r10, %a3 // r11 = IMP + movq receiver(%a2), %a2 // load real receiver + test %r11, %r11 // set ne (stret) for forward; r11!=0 + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSendSuper_stret + + +/******************************************************************** + * id objc_msgSendSuper2_stret + ********************************************************************/ + + ENTRY _objc_msgSendSuper2_stret + MESSENGER_START + +// search the cache (objc_super in %a2) + movq class(%a2), %r11 // class = objc_super->class + movq 8(%r11), %r11 // class = class->superclass + CacheLookup SUPER2_STRET // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r11 + movq receiver(%a2), %r10 + MethodTableLookup %r10, %a3 // r11 = IMP + movq receiver(%a2), %a2 // load real receiver + test %r11, %r11 // set ne (stret) for forward; r11!=0 + jmp *%r11 // goto *imp + + END_ENTRY _objc_msgSendSuper2_stret + + + ENTRY _objc_msgSendSuper2_stret_fixup + int3 + END_ENTRY _objc_msgSendSuper2_stret_fixup + + + STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup + // Load _cmd from the message_ref + movq 8(%a3), %a3 + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_fixedup + + +/******************************************************************** + * + * _objc_msgSend_uncached_impcache + * _objc_msgSend_uncached + * _objc_msgSend_stret_uncached + * + * Used to erase method cache entries in-place by + * bouncing them to the uncached lookup. + * + ********************************************************************/ + + STATIC_ENTRY __objc_msgSend_uncached_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band condition register is NE for stret, EQ otherwise. + // Out-of-band r11 is the searched class + + MESSENGER_START + nop + MESSENGER_END_SLOW + + jne __objc_msgSend_stret_uncached + jmp __objc_msgSend_uncached + + END_ENTRY __objc_msgSend_uncached_impcache + + + STATIC_ENTRY __objc_msgSend_uncached + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r11 is the searched class + + // r11 is already the class to search + MethodTableLookup %a1, %a2 // r11 = IMP + cmp %r11, %r11 // set eq (nonstret) for forwarding + jmp *%r11 // goto *imp + + END_ENTRY __objc_msgSend_uncached + + + STATIC_ENTRY __objc_msgSend_stret_uncached + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r11 is the searched class + + // r11 is already the class to search + MethodTableLookup %a2, %a3 // r11 = IMP + test %r11, %r11 // set ne (stret) for forward; r11!=0 + jmp *%r11 // goto *imp + + END_ENTRY __objc_msgSend_stret_uncached + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + STATIC_ENTRY __objc_msgForward_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band condition register is NE for stret, EQ otherwise. + + MESSENGER_START + nop + MESSENGER_END_SLOW + + jne __objc_msgForward_stret + jmp __objc_msgForward + + END_ENTRY __objc_msgForward_impcache + + + ENTRY __objc_msgForward + // Non-stret version + + movq __objc_forward_handler(%rip), %r11 + jmp *%r11 + + END_ENTRY __objc_msgForward + + + ENTRY __objc_msgForward_stret + // Struct-return version + + movq __objc_forward_stret_handler(%rip), %r11 + jmp *%r11 + + END_ENTRY __objc_msgForward_stret + + + ENTRY _objc_msgSend_debug + jmp _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + ENTRY _objc_msgSend_stret_debug + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_debug + + ENTRY _objc_msgSendSuper2_stret_debug + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_debug + + ENTRY _objc_msgSend_fpret_debug + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_debug + + ENTRY _objc_msgSend_fp2ret_debug + jmp _objc_msgSend_fp2ret + END_ENTRY _objc_msgSend_fp2ret_debug + + + ENTRY _objc_msgSend_noarg + jmp _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + + ENTRY _method_invoke + + movq method_imp(%a2), %r11 + movq method_name(%a2), %a2 + jmp *%r11 + + END_ENTRY _method_invoke + + + ENTRY _method_invoke_stret + + movq method_imp(%a3), %r11 + movq method_name(%a3), %a3 + jmp *%r11 + + END_ENTRY _method_invoke_stret + + + STATIC_ENTRY __objc_ignored_method + + movq %a1, %rax + ret + + END_ENTRY __objc_ignored_method + + +.section __DATA,__objc_msg_break +.quad 0 +.quad 0 + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-win32.m b/runtime/Messengers.subproj/objc-msg-win32.m index e87bec6..014daa8 100644 --- a/runtime/Messengers.subproj/objc-msg-win32.m +++ b/runtime/Messengers.subproj/objc-msg-win32.m @@ -486,7 +486,6 @@ OBJC_EXPORT __declspec(naked) id _objc_msgForward(id a, SEL b, ...) { __asm { mov ecx, _objc_forward_handler - // forward:: support omitted here jmp ecx } } @@ -495,7 +494,6 @@ OBJC_EXPORT __declspec(naked) id _objc_msgForward_stret(id a, SEL b, ...) { __asm { mov ecx, _objc_forward_stret_handler - // forward:: support omitted here jmp ecx } } diff --git a/runtime/Messengers.subproj/objc-msg-x86_64.s b/runtime/Messengers.subproj/objc-msg-x86_64.s index 5ab603f..55111d2 100644 --- a/runtime/Messengers.subproj/objc-msg-x86_64.s +++ b/runtime/Messengers.subproj/objc-msg-x86_64.s @@ -21,7 +21,8 @@ * @APPLE_LICENSE_HEADER_END@ */ -#ifdef __x86_64__ +#include +#if __x86_64__ && !TARGET_IPHONE_SIMULATOR /******************************************************************** ******************************************************************** @@ -37,12 +38,6 @@ ********************************************************************/ .data -// Substitute receiver for messages sent to nil (usually also nil) -// id _objc_nilReceiver -.align 4 -.private_extern __objc_nilReceiver -__objc_nilReceiver: - .quad 0 // _objc_entryPoints and _objc_exitPoints are used by objc // to get the critical regions for which method caches @@ -176,14 +171,11 @@ _gdb_objc_messenger_breakpoints: /******************************************************************** * Names for relative labels * DO NOT USE THESE LABELS ELSEWHERE - * Reserved labels: 5: 6: 7: 8: 9: + * Reserved labels: 6: 7: 8: 9: ********************************************************************/ -#define LCacheMiss 5 -#define LCacheMiss_f 5f -#define LCacheMiss_b 5b -#define LNilTestDone 6 -#define LNilTestDone_f 6f -#define LNilTestDone_b 6b +#define LCacheMiss 6 +#define LCacheMiss_f 6f +#define LCacheMiss_b 6b #define LNilTestSlow 7 #define LNilTestSlow_f 7f #define LNilTestSlow_b 7b @@ -252,6 +244,7 @@ _gdb_objc_messenger_breakpoints: .globl $0 .align 6, 0x90 $0: + .cfi_startproc .endmacro .macro STATIC_ENTRY @@ -259,6 +252,7 @@ $0: .private_extern $0 .align 2, 0x90 $0: + .cfi_startproc .endmacro ////////////////////////////////////////////////////////////////////// @@ -272,170 +266,19 @@ $0: ////////////////////////////////////////////////////////////////////// .macro END_ENTRY -LExit$0: -.endmacro - - -/* DWARF support - These macros work for objc_msgSend variants and others that call - CacheLookup/MethodTableLookup or SaveRegisters/RestoreRegisters - without otherwise building a frame or clobbering callee-save registers - - The macros build appropriate FDEs and tie them to the CIE. -*/ - -#define DW_CFA_offset 0x80 -#define DW_CFA_restore 0xc0 -#define DW_CFA_advance_loc4 0x4 -#define DW_CFA_same_value 0x8 -#define DW_CFA_def_cfa 0xc -#define DW_CFA_def_cfa_register 0xd -#define DW_CFA_def_cfa_offset 0xe -#define DW_CFA_offset_extended_sf 0x11 -#define DW_CFA_def_cfa_offset_sf 0x13 -#define DW_rax 0 -#define DW_rdx 1 -#define DW_rcx 2 -#define DW_rsi 4 -#define DW_rdi 5 -#define DW_rbp 6 -#define DW_rsp 7 -#define DW_r8 8 -#define DW_r9 9 -#define DW_r10 10 -#define DW_ra 16 -#define DW_xmm0 17 -#define DW_xmm1 18 -#define DW_xmm2 19 -#define DW_xmm3 20 -#define DW_xmm4 21 -#define DW_xmm5 22 -#define DW_xmm6 23 -#define DW_xmm7 24 -#define DW_a1 DW_rdi -#define DW_a2 DW_rsi -#define DW_a3 DW_rdx -#define DW_a4 DW_rcx -#define DW_a5 DW_r8 -#define DW_a6 DW_r9 - -// CIE -// 8-byte data multiplier -// 1-byte insn multiplier -// PC-relative everything -// No prologue - - .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support -CIE: - .set L$set$0,LECIE1-LSCIE1 - .long L$set$0 # Length of Common Information Entry -LSCIE1: - .long 0 # CIE Identifier Tag - .byte 0x3 # CIE Version - .ascii "zPR\0" # CIE Augmentation: size + personality + FDE encoding - .byte 0x1 # uleb128 0x1; CIE Code Alignment Factor - .byte 0x78 # sleb128 -0x8; CIE Data Alignment Factor - .byte 0x10 # CIE RA Column - .byte 0x6 # uleb128 0x1; Augmentation size - // Personality augmentation - .byte 0x9b - .long ___objc_personality_v0+4@GOTPCREL - // FDE-encoding augmentation - .byte 0x10 - // Prefix instructions - // CFA is %rsp+8 - .byte DW_CFA_def_cfa - .byte DW_rsp - .byte 8 - // RA is at 0(%rsp) aka 1*-8(CFA) - .byte DW_CFA_offset | DW_ra - .byte 1 - - .align 3 -LECIE1: - - -.macro EMIT_FDE - - .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support - -// FDE header -.globl $0.eh -$0.eh: -LSFDE$0: - .set LLENFDE$0, LEFDE$0-LASFDE$0 - .long LLENFDE$0 # FDE Length -LASFDE$0: - .long LASFDE$0-CIE # FDE CIE offset - .quad L_dw_start_$0-. # FDE address start - .quad L_dw_len_$0 # FDE address range - .byte 0x0 # uleb128 0x0; Augmentation size - - // DW_START: set by CIE - -.if $1 == 1 - // Save/RestoreRegisters or MethodTableLookup - - // enter - .byte DW_CFA_advance_loc4 - .long L_dw_enter_$0 - L_dw_start_$0 - .byte DW_CFA_def_cfa_offset - .byte 16 - .byte DW_CFA_offset | DW_rbp // rbp => 2*-8(CFA) - .byte 2 - .byte DW_CFA_def_cfa_register // CFA = rbp+16 (offset unchanged) - .byte DW_rbp - - // leave - .byte DW_CFA_advance_loc4 - .long L_dw_leave_$0 - L_dw_enter_$0 - - .byte DW_CFA_same_value // rbp = original value - .byte DW_rbp - .byte DW_CFA_def_cfa // CFA = rsp+8 - .byte DW_rsp - .byte 8 - -.endif - - .align 3 -LEFDE$0: - .text - -.endmacro - - -// Start of function -.macro DW_START -L_dw_start_$0: -.endmacro - -// After `enter` in SaveRegisters -.macro DW_ENTER -L_dw_enter_$0: -.endmacro - -// After `leave` in RestoreRegisters -.macro DW_LEAVE -L_dw_leave_$0: -.endmacro - -// End of function -// $1 == 1 iff you called MethodTableLookup or Save/RestoreRegsters -.macro DW_END - .set L_dw_len_$0, . - L_dw_start_$0 - EMIT_FDE $0, $1 + .cfi_endproc +LExit$0: .endmacro ///////////////////////////////////////////////////////////////////// // -// SaveRegisters caller +// SaveRegisters // // Pushes a stack frame and saves all registers that might contain // parameter values. // -// On entry: %0 = caller's symbol name for DWARF +// On entry: // stack = ret // // On exit: @@ -444,10 +287,15 @@ L_dw_leave_$0: ///////////////////////////////////////////////////////////////////// .macro SaveRegisters - // These instructions must match the DWARF data in EMIT_FDE. + + push %rbp + .cfi_def_cfa_offset 16 + .cfi_offset rbp, -16 - enter $$0x80+8, $$0 // +8 for alignment - DW_ENTER $0 + mov %rsp, %rbp + .cfi_def_cfa_register rbp + + sub $$0x80+8, %rsp // +8 for alignment movdqa %xmm0, -0x80(%rbp) push %rax // might be xmm parameter count @@ -465,7 +313,6 @@ L_dw_leave_$0: push %a6 movdqa %xmm7, -0x10(%rbp) - // These instructions must match the DWARF data in EMIT_FDE. .endmacro ///////////////////////////////////////////////////////////////////// @@ -474,7 +321,7 @@ L_dw_leave_$0: // // Pops a stack frame pushed by SaveRegisters // -// On entry: $0 = caller's symbol name for DWARF +// On entry: // %rbp unchanged since SaveRegisters // // On exit: @@ -483,7 +330,6 @@ L_dw_leave_$0: ///////////////////////////////////////////////////////////////////// .macro RestoreRegisters - // These instructions must match the DWARF data in EMIT_FDE. movdqa -0x80(%rbp), %xmm0 pop %a6 @@ -502,9 +348,9 @@ L_dw_leave_$0: movdqa -0x10(%rbp), %xmm7 leave - DW_LEAVE $0 + .cfi_def_cfa rsp, 8 + .cfi_same_value rbp - // These instructions must match the DWARF data in EMIT_FDE. .endmacro @@ -578,7 +424,7 @@ L_dw_leave_$0: .endmacro - + .macro CacheLookup .if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET movq %a2, %r10 // r10 = _cmd @@ -606,6 +452,7 @@ L_dw_leave_$0: je 3f // if (bucket == cache->buckets) wrap subq $$16, %r10 // bucket-- +2: .if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET cmpq (%r10), %a2 // if (bucket->sel != _cmd) .else @@ -653,22 +500,21 @@ L_dw_leave_$0: // a1 = receiver // a2 = SEL movq %r11, %a3 // a3 = isa - movq %r10, %a4 // a4 = bucket .if $0 == GETIMP jmp _cache_getImp_corrupt_cache_error .else jmp _objc_msgSend_corrupt_cache_error .endif + .endmacro ///////////////////////////////////////////////////////////////////// // -// MethodTableLookup classRegister, selectorRegister, caller +// MethodTableLookup classRegister, selectorRegister // // Takes: $0 = class to search (a1 or a2 or r10 ONLY) // $1 = selector to search for (a2 or a3 ONLY) -// $2 = caller's symbol name for DWARF // r11 = class to search // // On exit: imp in %r11 @@ -678,7 +524,7 @@ L_dw_leave_$0: MESSENGER_END_SLOW - SaveRegisters $2 + SaveRegisters // _class_lookupMethodAndLoadCache3(receiver, selector, class) @@ -690,7 +536,7 @@ L_dw_leave_$0: // IMP is now in %rax movq %rax, %r11 - RestoreRegisters $2 + RestoreRegisters .endmacro @@ -714,12 +560,14 @@ L_dw_leave_$0: testb $$1, %a1b PN jnz LGetIsaSlow_f - movq (%a1), %r11 + movq $$0x00007ffffffffff8, %r11 + andq (%a1), %r11 .else testb $$1, %a2b PN jnz LGetIsaSlow_f - movq (%a2), %r11 + movq $$0x00007ffffffffff8, %r11 + andq (%a2), %r11 .endif LGetIsaDone: .endmacro @@ -779,21 +627,11 @@ LGetIsaDone: .endif PN jz LNilTestSlow_f -LNilTestDone: .endmacro .macro NilTestSupport .align 3 LNilTestSlow: -.if $0 != STRET - movq __objc_nilReceiver(%rip), %a1 - testq %a1, %a1 // if (receiver != nil) -.else - movq __objc_nilReceiver(%rip), %a2 - testq %a2, %a2 // if (receiver != nil) -.endif - jne LNilTestDone_b // send to new receiver - .if $0 == FPRET fldz .elseif $0 == FP2RET @@ -824,7 +662,6 @@ LNilTestSlow: ********************************************************************/ STATIC_ENTRY _cache_getImp - DW_START _cache_getImp // do lookup movq %a1, %r11 // move class to r11 for CacheLookup @@ -836,7 +673,6 @@ LCacheMiss: ret LGetImpExit: - DW_END _cache_getImp, 0 END_ENTRY _cache_getImp @@ -853,7 +689,6 @@ _objc_debug_taggedpointer_classes: .fill 16, 8, 0 ENTRY _objc_msgSend - DW_START _objc_msgSend MESSENGER_START NilTest NORMAL @@ -868,11 +703,10 @@ _objc_debug_taggedpointer_classes: // cache miss: go search the method lists LCacheMiss: // isa still in r11 - MethodTableLookup %a1, %a2, _objc_msgSend // r11 = IMP + MethodTableLookup %a1, %a2 // r11 = IMP cmp %r11, %r11 // set eq (nonstret) for forwarding jmp *%r11 // goto *imp - DW_END _objc_msgSend, 1 END_ENTRY _objc_msgSend @@ -899,7 +733,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSendSuper - DW_START _objc_msgSendSuper MESSENGER_START // search the cache (objc_super in %a1) @@ -910,12 +743,11 @@ LCacheMiss: LCacheMiss: // class still in r11 movq receiver(%a1), %r10 - MethodTableLookup %r10, %a2, _objc_msgSendSuper // r11 = IMP + MethodTableLookup %r10, %a2 // r11 = IMP movq receiver(%a1), %a1 // load real receiver cmp %r11, %r11 // set eq (nonstret) for forwarding jmp *%r11 // goto *imp - DW_END _objc_msgSendSuper, 1 END_ENTRY _objc_msgSendSuper @@ -924,7 +756,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSendSuper2 - DW_START _objc_msgSendSuper2 MESSENGER_START // objc_super->class is superclass of class to search @@ -938,12 +769,11 @@ LCacheMiss: LCacheMiss: // superclass still in r11 movq receiver(%a1), %r10 - MethodTableLookup %r10, %a2, _objc_msgSendSuper2 // r11 = IMP + MethodTableLookup %r10, %a2 // r11 = IMP movq receiver(%a1), %a1 // load real receiver cmp %r11, %r11 // set eq (nonstret) for forwarding jmp *%r11 // goto *imp - DW_END _objc_msgSendSuper2, 1 END_ENTRY _objc_msgSendSuper2 @@ -967,7 +797,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSend_fpret - DW_START _objc_msgSend_fpret MESSENGER_START NilTest FPRET @@ -982,11 +811,10 @@ LCacheMiss: // cache miss: go search the method lists LCacheMiss: // isa still in r11 - MethodTableLookup %a1, %a2, _objc_msgSend_fpret // r11 = IMP + MethodTableLookup %a1, %a2 // r11 = IMP cmp %r11, %r11 // set eq (nonstret) for forwarding jmp *%r11 // goto *imp - DW_END _objc_msgSend_fpret, 1 END_ENTRY _objc_msgSend_fpret @@ -1010,7 +838,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSend_fp2ret - DW_START _objc_msgSend_fp2ret MESSENGER_START NilTest FP2RET @@ -1025,11 +852,10 @@ LCacheMiss: // cache miss: go search the method lists LCacheMiss: // isa still in r11 - MethodTableLookup %a1, %a2, _objc_msgSend_fp2ret // r11 = IMP + MethodTableLookup %a1, %a2 // r11 = IMP cmp %r11, %r11 // set eq (nonstret) for forwarding jmp *%r11 // goto *imp - DW_END _objc_msgSend_fp2ret, 1 END_ENTRY _objc_msgSend_fp2ret @@ -1059,7 +885,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSend_stret - DW_START _objc_msgSend_stret MESSENGER_START NilTest STRET @@ -1074,11 +899,10 @@ LCacheMiss: // cache miss: go search the method lists LCacheMiss: // isa still in r11 - MethodTableLookup %a2, %a3, _objc_msgSend_stret // r11 = IMP + MethodTableLookup %a2, %a3 // r11 = IMP test %r11, %r11 // set ne (stret) for forward; r11!=0 jmp *%r11 // goto *imp - DW_END _objc_msgSend_stret, 1 END_ENTRY _objc_msgSend_stret @@ -1114,7 +938,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSendSuper_stret - DW_START _objc_msgSendSuper_stret MESSENGER_START // search the cache (objc_super in %a2) @@ -1125,12 +948,11 @@ LCacheMiss: LCacheMiss: // class still in r11 movq receiver(%a2), %r10 - MethodTableLookup %r10, %a3, _objc_msgSendSuper_stret // r11 = IMP + MethodTableLookup %r10, %a3 // r11 = IMP movq receiver(%a2), %a2 // load real receiver test %r11, %r11 // set ne (stret) for forward; r11!=0 jmp *%r11 // goto *imp - DW_END _objc_msgSendSuper_stret, 1 END_ENTRY _objc_msgSendSuper_stret @@ -1139,7 +961,6 @@ LCacheMiss: ********************************************************************/ ENTRY _objc_msgSendSuper2_stret - DW_START _objc_msgSendSuper2_stret MESSENGER_START // search the cache (objc_super in %a2) @@ -1151,12 +972,11 @@ LCacheMiss: LCacheMiss: // superclass still in r11 movq receiver(%a2), %r10 - MethodTableLookup %r10, %a3, _objc_msgSendSuper2_stret // r11 = IMP + MethodTableLookup %r10, %a3 // r11 = IMP movq receiver(%a2), %a2 // load real receiver test %r11, %r11 // set ne (stret) for forward; r11!=0 jmp *%r11 // goto *imp - DW_END _objc_msgSendSuper2_stret, 1 END_ENTRY _objc_msgSendSuper2_stret @@ -1201,61 +1021,40 @@ LCacheMiss: STATIC_ENTRY __objc_msgSend_uncached - DW_START __objc_msgSend_uncached // THIS IS NOT A CALLABLE C FUNCTION // Out-of-band r11 is the searched class // r11 is already the class to search - MethodTableLookup %a1, %a2, __objc_msgSend_uncached // r11 = IMP + MethodTableLookup %a1, %a2 // r11 = IMP cmp %r11, %r11 // set eq (nonstret) for forwarding jmp *%r11 // goto *imp - DW_END __objc_msgSend_uncached, 1 END_ENTRY __objc_msgSend_uncached STATIC_ENTRY __objc_msgSend_stret_uncached - DW_START __objc_msgSend_stret_uncached // THIS IS NOT A CALLABLE C FUNCTION // Out-of-band r11 is the searched class // r11 is already the class to search - MethodTableLookup %a2, %a3, __objc_msgSend_stret_uncached // r11 = IMP + MethodTableLookup %a2, %a3 // r11 = IMP test %r11, %r11 // set ne (stret) for forward; r11!=0 jmp *%r11 // goto *imp - DW_END __objc_msgSend_stret_uncached, 1 END_ENTRY __objc_msgSend_stret_uncached /******************************************************************** - * - * id _objc_msgForward(id self, SEL _cmd,...); - * - ********************************************************************/ - -// _FwdSel is @selector(forward::), set up in map_images(). -// ALWAYS dereference _FwdSel to get to "forward::" !! - .data - .align 3 - .private_extern _FwdSel -_FwdSel: .quad 0 - - .cstring - .align 3 -LUnkSelStr: .ascii "Does not recognize selector %s (while forwarding %s)\0" - - .data - .align 3 - .private_extern __objc_forward_handler -__objc_forward_handler: .quad 0 - - .data - .align 3 - .private_extern __objc_forward_stret_handler -__objc_forward_stret_handler: .quad 0 - +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ STATIC_ENTRY __objc_msgForward_impcache // Method cache version @@ -1276,152 +1075,17 @@ __objc_forward_stret_handler: .quad 0 ENTRY __objc_msgForward // Non-stret version - // Call user handler, if any movq __objc_forward_handler(%rip), %r11 - testq %r11, %r11 // if (handler == NULL) - je 1f // skip handler - jmp *%r11 // else goto handler -1: - // No user handler - - // Die if forwarding "forward::" - cmpq %a2, _FwdSel(%rip) - je LMsgForwardError - - // Record current return address. It will be copied elsewhere in - // the marg_list because this location is needed for register args - movq (%rsp), %r11 - - // Push stack frame - // Space for: fpArgs + regArgs + linkage - ret (already on stack) - subq $ 8*16 + 6*8 + (4-1)*8, %rsp - - // Save return address in linkage area. - movq %r11, 16+LINK_AREA(%rsp) - - // Save parameter registers - movq %a1, 0+REG_AREA(%rsp) - movq %a2, 8+REG_AREA(%rsp) - movq %a3, 16+REG_AREA(%rsp) - movq %a4, 24+REG_AREA(%rsp) - movq %a5, 32+REG_AREA(%rsp) - movq %a6, 40+REG_AREA(%rsp) - - // Save side parameter registers - // movq %r10, 0+LINK_AREA(%rsp) // static chain pointer == Pascal - movq %rax, 8+LINK_AREA(%rsp) // xmm count - // 16+LINK_AREA is return address - - // Save xmm registers - movdqa %xmm0, 0+FP_AREA(%rsp) - movdqa %xmm1, 16+FP_AREA(%rsp) - movdqa %xmm2, 32+FP_AREA(%rsp) - movdqa %xmm3, 48+FP_AREA(%rsp) - movdqa %xmm4, 64+FP_AREA(%rsp) - movdqa %xmm5, 80+FP_AREA(%rsp) - movdqa %xmm6, 96+FP_AREA(%rsp) - movdqa %xmm7, 112+FP_AREA(%rsp) - - // Call [receiver forward:sel :margs] - movq %rsp, %a4 // marg_list - movq %a2, %a3 // sel - movq _FwdSel(%rip), %a2 // forward:: - // %a1 is already the receiver - - call _objc_msgSend - - // Retrieve return address from linkage area - movq 16+LINK_AREA(%rsp), %r11 - // Pop stack frame - addq $ 8*16 + 6*8 + (4-1)*8, %rsp - // Put return address back - movq %r11, (%rsp) - ret - -LMsgForwardError: - // Tail-call __objc_error(receiver, "unknown selector %s %s", "forward::", forwardedSel) - // %a1 is already the receiver - movq %a3, %a4 // the forwarded selector - leaq LUnkSelStr(%rip), %a2 // "unknown selector %s %s" - movq _FwdSel(%rip), %a3 // forward:: - jmp ___objc_error // never returns + jmp *%r11 END_ENTRY __objc_msgForward ENTRY __objc_msgForward_stret // Struct-return version - - // Call user handler, if any - movq __objc_forward_stret_handler(%rip), %r11 - testq %r11, %r11 // if (handler == NULL) - je 1f // skip handler - jmp *%r11 // else goto handler -1: - // No user handler - // Die if forwarding "forward::" - cmpq %a3, _FwdSel(%rip) - je LMsgForwardStretError - - // Record current return address. It will be copied elsewhere in - // the marg_list because this location is needed for register args - movq (%rsp), %r11 - - // Push stack frame - // Space for: fpArgs + regArgs + linkage - ret (already on stack) - subq $ 8*16 + 6*8 + (4-1)*8, %rsp - - // Save return address in linkage area. - movq %r11, 16+LINK_AREA(%rsp) - - // Save parameter registers - movq %a1, 0+REG_AREA(%rsp) // note: used again below - movq %a2, 8+REG_AREA(%rsp) - movq %a3, 16+REG_AREA(%rsp) - movq %a4, 24+REG_AREA(%rsp) - movq %a5, 32+REG_AREA(%rsp) - movq %a6, 40+REG_AREA(%rsp) - - // Save side parameter registers - // movq %r10, 0+LINK_AREA(%rsp) // static chain pointer == Pascal - movq %rax, 8+LINK_AREA(%rsp) // xmm count - // 16+LINK_AREA is return address - - // Save xmm registers - movdqa %xmm0, 0+FP_AREA(%rsp) - movdqa %xmm1, 16+FP_AREA(%rsp) - movdqa %xmm2, 32+FP_AREA(%rsp) - movdqa %xmm3, 48+FP_AREA(%rsp) - movdqa %xmm4, 64+FP_AREA(%rsp) - movdqa %xmm5, 80+FP_AREA(%rsp) - movdqa %xmm6, 96+FP_AREA(%rsp) - movdqa %xmm7, 112+FP_AREA(%rsp) - - // Call [receiver forward:sel :margs] - movq %a2, %a1 // receiver - movq _FwdSel(%rip), %a2 // forward:: - // %a3 is already the selector - movq %rsp, %a4 // marg_list - - call _objc_msgSend // forward:: is NOT struct-return - - // Set return value register to the passed-in struct address - movq 0+REG_AREA(%rsp), %rax - // Retrieve return address from linkage area - movq 16+LINK_AREA(%rsp), %r11 - // Pop stack frame - addq $ 8*16 + 6*8 + (4-1)*8, %rsp - // Put return address back - movq %r11, (%rsp) - ret -LMsgForwardStretError: - // Tail-call __objc_error(receiver, "unknown selector %s %s", "forward::", forwardedSel) - // %a4 is already the forwarded selector - movq %a2, %a1 // receiver - leaq LUnkSelStr(%rip), %a2 // "unknown selector %s %s" - movq _FwdSel(%rip), %a3 // forward:: - jmp ___objc_error // never returns + movq __objc_forward_stret_handler(%rip), %r11 + jmp *%r11 END_ENTRY __objc_msgForward_stret diff --git a/runtime/NSObjCRuntime.h b/runtime/NSObjCRuntime.h index 52ab531..77a8054 100644 --- a/runtime/NSObjCRuntime.h +++ b/runtime/NSObjCRuntime.h @@ -22,5 +22,4 @@ typedef unsigned int NSUInteger; #define NSINTEGER_DEFINED 1 - #endif diff --git a/runtime/NSObject.h b/runtime/NSObject.h index cd35d7e..78ea56a 100644 --- a/runtime/NSObject.h +++ b/runtime/NSObject.h @@ -5,6 +5,8 @@ #ifndef _OBJC_NSOBJECT_H_ #define _OBJC_NSOBJECT_H_ +#if __OBJC__ + #include #include @@ -13,12 +15,11 @@ @protocol NSObject - (BOOL)isEqual:(id)object; -- (NSUInteger)hash; +@property (readonly) NSUInteger hash; -- (Class)superclass; +@property (readonly) Class superclass; - (Class)class; -- (id)self; -- (struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; +- (instancetype)self; - (id)performSelector:(SEL)aSelector; - (id)performSelector:(SEL)aSelector withObject:(id)object; @@ -32,14 +33,16 @@ - (BOOL)respondsToSelector:(SEL)aSelector; -- (id)retain OBJC_ARC_UNAVAILABLE; +- (instancetype)retain OBJC_ARC_UNAVAILABLE; - (oneway void)release OBJC_ARC_UNAVAILABLE; -- (id)autorelease OBJC_ARC_UNAVAILABLE; +- (instancetype)autorelease OBJC_ARC_UNAVAILABLE; - (NSUInteger)retainCount OBJC_ARC_UNAVAILABLE; -- (NSString *)description; +- (struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; + +@property (readonly, copy) NSString *description; @optional -- (NSString *)debugDescription; +@property (readonly, copy) NSString *debugDescription; @end @@ -54,11 +57,11 @@ OBJC_EXPORT + (void)load; + (void)initialize; -- (id)init; +- (instancetype)init; -+ (id)new; -+ (id)allocWithZone:(struct _NSZone *)zone; -+ (id)alloc; ++ (instancetype)new; ++ (instancetype)allocWithZone:(struct _NSZone *)zone; ++ (instancetype)alloc; - (void)dealloc; - (void)finalize; @@ -69,8 +72,6 @@ OBJC_EXPORT + (id)copyWithZone:(struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; + (id)mutableCopyWithZone:(struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; -+ (Class)superclass; -+ (Class)class; + (BOOL)instancesRespondToSelector:(SEL)aSelector; + (BOOL)conformsToProtocol:(Protocol *)protocol; - (IMP)methodForSelector:(SEL)aSelector; @@ -86,13 +87,19 @@ OBJC_EXPORT - (BOOL)allowsWeakReference UNAVAILABLE_ATTRIBUTE; - (BOOL)retainWeakReference UNAVAILABLE_ATTRIBUTE; -+ (NSString *)description; - + (BOOL)isSubclassOfClass:(Class)aClass; + (BOOL)resolveClassMethod:(SEL)sel __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); + (BOOL)resolveInstanceMethod:(SEL)sel __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); ++ (NSUInteger)hash; ++ (Class)superclass; ++ (Class)class; ++ (NSString *)description; ++ (NSString *)debugDescription; + @end #endif + +#endif diff --git a/runtime/NSObject.mm b/runtime/NSObject.mm index 56886fe..ccaa4e4 100644 --- a/runtime/NSObject.mm +++ b/runtime/NSObject.mm @@ -45,11 +45,6 @@ - (SEL)selector; @end -// HACK -- the use of these functions must be after the @implementation -id bypass_msgSend_retain(NSObject *obj) asm("-[NSObject retain]"); -void bypass_msgSend_release(NSObject *obj) asm("-[NSObject release]"); -id bypass_msgSend_autorelease(NSObject *obj) asm("-[NSObject autorelease]"); - #if TARGET_OS_MAC @@ -99,12 +94,6 @@ id bypass_msgSend_autorelease(NSObject *obj) asm("-[NSObject autorelease]"); // TARGET_OS_MAC #endif -#if SUPPORT_RETURN_AUTORELEASE -// We cannot peek at where we are returning to unless we always inline this: -__attribute__((always_inline)) -static bool callerAcceptsFastAutorelease(const void * const ra0); -#endif - /*********************************************************************** * Weak ivar support @@ -113,7 +102,7 @@ static bool callerAcceptsFastAutorelease(const void * const ra0); static id defaultBadAllocHandler(Class cls) { _objc_fatal("attempt to allocate object of class '%s' failed", - class_getName(cls)); + cls->nameForLogging()); } static id(*badAllocHandler)(Class) = &defaultBadAllocHandler; @@ -133,23 +122,26 @@ void _objc_setBadAllocHandler(id(*newHandler)(Class)) namespace { #if TARGET_OS_EMBEDDED -# define SIDE_TABLE_STRIPE 1 -#else # define SIDE_TABLE_STRIPE 8 +#else +# define SIDE_TABLE_STRIPE 64 #endif // should be a multiple of cache line size (64) #define SIDE_TABLE_SIZE 128 // The order of these bits is important. -#define SIDE_TABLE_WEAKLY_REFERENCED (1<<0) -#define SIDE_TABLE_DEALLOCATING (1<<1) // MSB-ward of weak bit -#define SIDE_TABLE_RC_ONE (1<<2) // MSB-ward of deallocating bit +#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0) +#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit +#define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit +#define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1)) #define SIDE_TABLE_RC_SHIFT 2 +#define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1) - -typedef objc::DenseMap RefcountMap; +// RefcountMap disguises its pointers because we +// don't want the table to act as a root for `leaks`. +typedef objc::DenseMap,size_t,true> RefcountMap; class SideTable { private: @@ -194,9 +186,6 @@ STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE); __attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE]; -// Avoid false-negative reports from tools like "leaks" -#define DISGUISE(x) ((id)~(uintptr_t)(x)) - // anonymous namespace }; @@ -223,6 +212,20 @@ objc_retain_autorelease(id obj) return objc_autorelease(objc_retain(obj)); } + +void +objc_storeStrong(id *location, id obj) +{ + id prev = *location; + if (obj == prev) { + return; + } + objc_retain(obj); + *location = obj; + objc_release(prev); +} + + /** * This function stores a new value into a __weak variable. It would * be used anywhere a __weak variable is the target of an assignment. @@ -278,7 +281,7 @@ objc_storeWeak(id *location, id newObj) // Set is-weakly-referenced bit in refcount table. if (newObj && !newObj->isTaggedPointer()) { - newTable->refcnts[DISGUISE(newObj)] |= SIDE_TABLE_WEAKLY_REFERENCED; + newObj->setWeaklyReferenced_nolock(); } // Do not set *location anywhere else. That would introduce a race. @@ -432,7 +435,9 @@ objc_moveWeak(id *to, id *from) } -/* Autorelease pool implementation +/*********************************************************************** + Autorelease pool implementation + A thread's autorelease pool is a stack of pointers. Each pointer is either an object to release, or POOL_SENTINEL which is an autorelease pool boundary. @@ -442,7 +447,7 @@ objc_moveWeak(id *to, id *from) and deleted as necessary. Thread-local storage points to the hot page, where newly autoreleased objects are stored. - */ +**********************************************************************/ BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj)); @@ -493,9 +498,9 @@ class AutoreleasePoolPage static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing static size_t const SIZE = #if PROTECT_AUTORELEASEPOOL - PAGE_SIZE; // must be multiple of vm page size + PAGE_MAX_SIZE; // must be multiple of vm page size #else - PAGE_SIZE; // size and alignment, power of 2 + PAGE_MAX_SIZE; // size and alignment, power of 2 #endif static size_t const COUNT = SIZE / sizeof(id); @@ -560,11 +565,17 @@ class AutoreleasePoolPage void busted(bool die = true) { + magic_t right; (die ? _objc_fatal : _objc_inform) ("autorelease pool page %p corrupted\n" - " magic 0x%08x 0x%08x 0x%08x 0x%08x\n pthread %p\n", - this, magic.m[0], magic.m[1], magic.m[2], magic.m[3], - this->thread); + " magic 0x%08x 0x%08x 0x%08x 0x%08x\n" + " should be 0x%08x 0x%08x 0x%08x 0x%08x\n" + " pthread %p\n" + " should be %p\n", + this, + magic.m[0], magic.m[1], magic.m[2], magic.m[3], + right.m[0], right.m[1], right.m[2], right.m[3], + this->thread, pthread_self()); } void check(bool die = true) @@ -606,9 +617,10 @@ class AutoreleasePoolPage { assert(!full()); unprotect(); + id *ret = next; // faster than `return next-1` because of aliasing *next++ = obj; protect(); - return next-1; + return ret; } void releaseAll() @@ -731,37 +743,20 @@ class AutoreleasePoolPage AutoreleasePoolPage *page = hotPage(); if (page && !page->full()) { return page->add(obj); + } else if (page) { + return autoreleaseFullPage(obj, page); } else { - return autoreleaseSlow(obj); + return autoreleaseNoPage(obj); } } static __attribute__((noinline)) - id *autoreleaseSlow(id obj) + id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page) { - AutoreleasePoolPage *page; - page = hotPage(); - - // The code below assumes some cases are handled by autoreleaseFast() - assert(!page || page->full()); - - if (!page) { - // No pool. Silently push one. - assert(obj != POOL_SENTINEL); - - if (DebugMissingPools) { - _objc_inform("MISSING POOLS: Object %p of class %s " - "autoreleased with no pool in place - " - "just leaking - break on " - "objc_autoreleaseNoPool() to debug", - (void*)obj, object_getClassName(obj)); - objc_autoreleaseNoPool(obj); - return nil; - } - - push(); - page = hotPage(); - } + // The hot page is full. + // Step to the next non-full page, adding a new page if necessary. + // Then add the object to that page. + assert(page == hotPage() && page->full()); do { if (page->child) page = page->child; @@ -772,6 +767,37 @@ class AutoreleasePoolPage return page->add(obj); } + static __attribute__((noinline)) + id *autoreleaseNoPage(id obj) + { + // No pool in place. + assert(!hotPage()); + + if (obj != POOL_SENTINEL && DebugMissingPools) { + // We are pushing an object with no pool in place, + // and no-pool debugging was requested by environment. + _objc_inform("MISSING POOLS: Object %p of class %s " + "autoreleased with no pool in place - " + "just leaking - break on " + "objc_autoreleaseNoPool() to debug", + (void*)obj, object_getClassName(obj)); + objc_autoreleaseNoPool(obj); + return nil; + } + + // Install the first page. + AutoreleasePoolPage *page = new AutoreleasePoolPage(nil); + setHotPage(page); + + // Push an autorelease pool boundary if it wasn't already requested. + if (obj != POOL_SENTINEL) { + page->add(POOL_SENTINEL); + } + + // Push the requested object. + return page->add(obj); + } + public: static inline id autorelease(id obj) { @@ -785,9 +811,6 @@ public: static inline void *push() { - if (!hotPage()) { - setHotPage(new AutoreleasePoolPage(nil)); - } id *dest = autoreleaseFast(POOL_SENTINEL); assert(*dest == POOL_SENTINEL); return dest; @@ -908,34 +931,260 @@ public: // anonymous namespace }; -// API to only be called by root classes like NSObject or NSProxy -extern "C" { -__attribute__((used,noinline,nothrow)) -static id _objc_rootRetain_slow(id obj, SideTable *table); -__attribute__((used,noinline,nothrow)) -static bool _objc_rootReleaseWasZero_slow(id obj, SideTable *table); -}; +/*********************************************************************** +* Slow paths for inline control +**********************************************************************/ -id -_objc_rootRetain_slow(id obj, SideTable *table) +#if SUPPORT_NONPOINTER_ISA + +NEVER_INLINE id +objc_object::rootRetain_overflow(bool tryRetain) +{ + return rootRetain(tryRetain, true); +} + + +NEVER_INLINE bool +objc_object::rootRelease_underflow(bool performDealloc) +{ + return rootRelease(performDealloc, true); +} + + +// Slow path of clearDeallocating() +// for weakly-referenced objects with indexed isa +NEVER_INLINE void +objc_object::clearDeallocating_weak() { + assert(isa.indexed && isa.weakly_referenced); + + SideTable *table = SideTable::tableForPointer(this); spinlock_lock(&table->slock); - table->refcnts[DISGUISE(obj)] += SIDE_TABLE_RC_ONE; + weak_clear_no_lock(&table->weak_table, (id)this); spinlock_unlock(&table->slock); +} - return obj; +#endif + +__attribute__((noinline,used)) +id +objc_object::rootAutorelease2() +{ + assert(!isTaggedPointer()); + return AutoreleasePoolPage::autorelease((id)this); +} + + +BREAKPOINT_FUNCTION( + void objc_overrelease_during_dealloc_error(void) +); + + +NEVER_INLINE +bool +objc_object::overrelease_error() +{ + _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this); + objc_overrelease_during_dealloc_error(); + return false; // allow rootRelease() to tail-call this } + +/*********************************************************************** +* Retain count operations for side table. +**********************************************************************/ + + +#if !NDEBUG +// Used to assert that an object is not present in the side table. bool -_objc_rootTryRetain(id obj) +objc_object::sidetable_present() { - assert(obj); - assert(!UseGC); + bool result = false; + SideTable *table = SideTable::tableForPointer(this); + + spinlock_lock(&table->slock); + + RefcountMap::iterator it = table->refcnts.find(this); + if (it != table->refcnts.end()) result = true; + + if (weak_is_registered_no_lock(&table->weak_table, (id)this)) result = true; + + spinlock_unlock(&table->slock); + + return result; +} +#endif + +#if SUPPORT_NONPOINTER_ISA + +void +objc_object::sidetable_lock() +{ + SideTable *table = SideTable::tableForPointer(this); + spinlock_lock(&table->slock); +} + +void +objc_object::sidetable_unlock() +{ + SideTable *table = SideTable::tableForPointer(this); + spinlock_unlock(&table->slock); +} + + +// Move the entire retain count to the side table, +// as well as isDeallocating and weaklyReferenced. +void +objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc, + bool isDeallocating, + bool weaklyReferenced) +{ + assert(!isa.indexed); // should already be changed to not-indexed + SideTable *table = SideTable::tableForPointer(this); + + size_t& refcntStorage = table->refcnts[this]; + size_t oldRefcnt = refcntStorage; + // not deallocating - that was in the isa + assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0); + assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); + + uintptr_t carry; + size_t refcnt = addc(oldRefcnt, extra_rc<refcnts[this]; + size_t oldRefcnt = refcntStorage; + // not deallocating - that is in the isa + assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0); + assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); + + if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true; + + uintptr_t carry; + size_t newRefcnt = + addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry); + if (carry) { + refcntStorage = + SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK); + return true; + } + else { + refcntStorage = newRefcnt; + return false; + } +} + + +// Move some retain counts from the side table to the isa field. +// Returns true if the sidetable retain count is now 0. +bool +objc_object::sidetable_subExtraRC_nolock(size_t delta_rc) +{ + assert(isa.indexed); + SideTable *table = SideTable::tableForPointer(this); + + size_t& refcntStorage = table->refcnts[this]; + size_t oldRefcnt = refcntStorage; + // not deallocating - that is in the isa + assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0); + assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); + + if (oldRefcnt < delta_rc) { + _objc_inform_now_and_on_crash("refcount underflow error for object %p", + this); + _objc_fatal("refcount underflow error for %s %p", + object_getClassName((id)this), this); + } + + size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT); + if (newRefcnt == 0) { + table->refcnts.erase(this); + return true; + } + else { + refcntStorage = newRefcnt; + return false; + } +} + + +size_t +objc_object::sidetable_getExtraRC_nolock() +{ + assert(isa.indexed); + SideTable *table = SideTable::tableForPointer(this); + RefcountMap::iterator it = table->refcnts.find(this); + assert(it != table->refcnts.end()); + return it->second >> SIDE_TABLE_RC_SHIFT; +} - if (obj->isTaggedPointer()) return true; - SideTable *table = SideTable::tableForPointer(obj); +// SUPPORT_NONPOINTER_ISA +#endif + + +__attribute__((used,noinline,nothrow)) +id +objc_object::sidetable_retain_slow(SideTable *table) +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.indexed); +#endif + + spinlock_lock(&table->slock); + size_t& refcntStorage = table->refcnts[this]; + if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) { + refcntStorage += SIDE_TABLE_RC_ONE; + } + spinlock_unlock(&table->slock); + + return (id)this; +} + + +id +objc_object::sidetable_retain() +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.indexed); +#endif + SideTable *table = SideTable::tableForPointer(this); + + if (spinlock_trylock(&table->slock)) { + size_t& refcntStorage = table->refcnts[this]; + if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) { + refcntStorage += SIDE_TABLE_RC_ONE; + } + spinlock_unlock(&table->slock); + return (id)this; + } + return sidetable_retain_slow(table); +} + + +bool +objc_object::sidetable_tryRetain() +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.indexed); +#endif + SideTable *table = SideTable::tableForPointer(this); // NO SPINLOCK HERE // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(), @@ -947,27 +1196,41 @@ _objc_rootTryRetain(id obj) // } bool result = true; - RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj)); + RefcountMap::iterator it = table->refcnts.find(this); if (it == table->refcnts.end()) { - table->refcnts[DISGUISE(obj)] = SIDE_TABLE_RC_ONE; + table->refcnts[this] = SIDE_TABLE_RC_ONE; } else if (it->second & SIDE_TABLE_DEALLOCATING) { result = false; - } else { + } else if (! (it->second & SIDE_TABLE_RC_PINNED)) { it->second += SIDE_TABLE_RC_ONE; } return result; } -bool -_objc_rootIsDeallocating(id obj) + +uintptr_t +objc_object::sidetable_retainCount() { - assert(obj); - assert(!UseGC); + SideTable *table = SideTable::tableForPointer(this); + + size_t refcnt_result = 1; + + spinlock_lock(&table->slock); + RefcountMap::iterator it = table->refcnts.find(this); + if (it != table->refcnts.end()) { + // this is valid for SIDE_TABLE_RC_PINNED too + refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT; + } + spinlock_unlock(&table->slock); + return refcnt_result; +} - if (obj->isTaggedPointer()) return false; - SideTable *table = SideTable::tableForPointer(obj); +bool +objc_object::sidetable_isDeallocating() +{ + SideTable *table = SideTable::tableForPointer(this); // NO SPINLOCK HERE // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(), @@ -979,135 +1242,261 @@ _objc_rootIsDeallocating(id obj) // _objc_fatal("Do not call -_isDeallocating."); // } - RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj)); + RefcountMap::iterator it = table->refcnts.find(this); return (it != table->refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING); } -void -objc_clear_deallocating(id obj) +bool +objc_object::sidetable_isWeaklyReferenced() { - assert(obj); - assert(!UseGC); + bool result = false; - SideTable *table = SideTable::tableForPointer(obj); - - // clear any weak table items - // clear extra retain count and deallocating bit - // (fixme warn or abort if extra retain count == 0 ?) + SideTable *table = SideTable::tableForPointer(this); spinlock_lock(&table->slock); - RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj)); + + RefcountMap::iterator it = table->refcnts.find(this); if (it != table->refcnts.end()) { - if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) { - weak_clear_no_lock(&table->weak_table, obj); - } - table->refcnts.erase(it); + result = it->second & SIDE_TABLE_WEAKLY_REFERENCED; } + spinlock_unlock(&table->slock); + + return result; +} + + +void +objc_object::sidetable_setWeaklyReferenced_nolock() +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.indexed); +#endif + + SideTable *table = SideTable::tableForPointer(this); + + table->refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED; } +__attribute__((used,noinline,nothrow)) bool -_objc_rootReleaseWasZero_slow(id obj, SideTable *table) +objc_object::sidetable_release_slow(SideTable *table, bool performDealloc) { +#if SUPPORT_NONPOINTER_ISA + assert(!isa.indexed); +#endif bool do_dealloc = false; spinlock_lock(&table->slock); - RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj)); + RefcountMap::iterator it = table->refcnts.find(this); if (it == table->refcnts.end()) { do_dealloc = true; - table->refcnts[DISGUISE(obj)] = SIDE_TABLE_DEALLOCATING; + table->refcnts[this] = SIDE_TABLE_DEALLOCATING; } else if (it->second < SIDE_TABLE_DEALLOCATING) { // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it. do_dealloc = true; it->second |= SIDE_TABLE_DEALLOCATING; - } else { + } else if (! (it->second & SIDE_TABLE_RC_PINNED)) { it->second -= SIDE_TABLE_RC_ONE; } spinlock_unlock(&table->slock); + if (do_dealloc && performDealloc) { + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc); + } return do_dealloc; } -bool -_objc_rootReleaseWasZero(id obj) -{ - assert(obj); - assert(!UseGC); - - if (obj->isTaggedPointer()) return false; - SideTable *table = SideTable::tableForPointer(obj); +bool +objc_object::sidetable_release(bool performDealloc) +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.indexed); +#endif + SideTable *table = SideTable::tableForPointer(this); bool do_dealloc = false; if (spinlock_trylock(&table->slock)) { - RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj)); + RefcountMap::iterator it = table->refcnts.find(this); if (it == table->refcnts.end()) { do_dealloc = true; - table->refcnts[DISGUISE(obj)] = SIDE_TABLE_DEALLOCATING; + table->refcnts[this] = SIDE_TABLE_DEALLOCATING; } else if (it->second < SIDE_TABLE_DEALLOCATING) { // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it. do_dealloc = true; it->second |= SIDE_TABLE_DEALLOCATING; - } else { + } else if (! (it->second & SIDE_TABLE_RC_PINNED)) { it->second -= SIDE_TABLE_RC_ONE; } spinlock_unlock(&table->slock); + if (do_dealloc && performDealloc) { + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc); + } return do_dealloc; } - return _objc_rootReleaseWasZero_slow(obj, table); + return sidetable_release_slow(table, performDealloc); } -__attribute__((noinline,used)) -static id _objc_rootAutorelease2(id obj) -{ - if (obj->isTaggedPointer()) return obj; - return AutoreleasePoolPage::autorelease(obj); -} -uintptr_t -_objc_rootRetainCount(id obj) +void +objc_object::sidetable_clearDeallocating() { - assert(obj); - assert(!UseGC); - - // XXX -- There is no way that anybody can use this API race free in a - // threaded environment because the result is immediately stale by the - // time the caller receives it. + SideTable *table = SideTable::tableForPointer(this); - if (obj->isTaggedPointer()) return (uintptr_t)obj; - - SideTable *table = SideTable::tableForPointer(obj); - - size_t refcnt_result = 1; - + // clear any weak table items + // clear extra retain count and deallocating bit + // (fixme warn or abort if extra retain count == 0 ?) spinlock_lock(&table->slock); - RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj)); + RefcountMap::iterator it = table->refcnts.find(this); if (it != table->refcnts.end()) { - refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT; + if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) { + weak_clear_no_lock(&table->weak_table, (id)this); + } + table->refcnts.erase(it); } spinlock_unlock(&table->slock); - return refcnt_result; } -id -_objc_rootInit(id obj) + +/*********************************************************************** +* Optimized retain/release/autorelease entrypoints +**********************************************************************/ + + +#if __OBJC2__ + +__attribute__((aligned(16))) +id +objc_retain(id obj) { - // In practice, it will be hard to rely on this function. - // Many classes do not properly chain -init calls. - return obj; + if (!obj) return obj; + if (obj->isTaggedPointer()) return obj; + return obj->retain(); } -id -_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone) -{ - id obj; -#if __OBJC2__ - // allocWithZone under __OBJC2__ ignores the zone parameter - (void)zone; - obj = class_createInstance(cls, 0); +__attribute__((aligned(16))) +void +objc_release(id obj) +{ + if (!obj) return; + if (obj->isTaggedPointer()) return; + return obj->release(); +} + + +__attribute__((aligned(16))) +id +objc_autorelease(id obj) +{ + if (!obj) return obj; + if (obj->isTaggedPointer()) return obj; + return obj->autorelease(); +} + + +// OBJC2 +#else +// not OBJC2 + + +id objc_retain(id obj) { return [obj retain]; } +void objc_release(id obj) { [obj release]; } +id objc_autorelease(id obj) { return [obj autorelease]; } + + +#endif + + +/*********************************************************************** +* Basic operations for root class implementations a.k.a. _objc_root*() +**********************************************************************/ + +bool +_objc_rootTryRetain(id obj) +{ + assert(obj); + + return obj->rootTryRetain(); +} + +bool +_objc_rootIsDeallocating(id obj) +{ + assert(obj); + + return obj->rootIsDeallocating(); +} + + +void +objc_clear_deallocating(id obj) +{ + assert(obj); + assert(!UseGC); + + if (obj->isTaggedPointer()) return; + obj->clearDeallocating(); +} + + +bool +_objc_rootReleaseWasZero(id obj) +{ + assert(obj); + + return obj->rootReleaseShouldDealloc(); +} + + +id +_objc_rootAutorelease(id obj) +{ + assert(obj); + // assert(!UseGC); + if (UseGC) return obj; // fixme CF calls this when GC is on + + return obj->rootAutorelease(); +} + +uintptr_t +_objc_rootRetainCount(id obj) +{ + assert(obj); + + return obj->rootRetainCount(); +} + + +id +_objc_rootRetain(id obj) +{ + assert(obj); + + return obj->rootRetain(); +} + +void +_objc_rootRelease(id obj) +{ + assert(obj); + + obj->rootRelease(); +} + + +id +_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone) +{ + id obj; + +#if __OBJC2__ + // allocWithZone under __OBJC2__ ignores the zone parameter + (void)zone; + obj = class_createInstance(cls, 0); #else if (!zone || UseGC) { obj = class_createInstance(cls, 0); @@ -1121,63 +1510,71 @@ _objc_rootAllocWithZone(Class cls, malloc_zone_t *zone) return obj; } -id -_objc_rootAlloc(Class cls) + +// Call [cls alloc] or [cls allocWithZone:nil], with appropriate +// shortcutting optimizations. +static ALWAYS_INLINE id +callAlloc(Class cls, bool checkNil, bool allocWithZone=false) { + if (checkNil && !cls) return nil; + #if __OBJC2__ - // Skip over the +allocWithZone: call if the class doesn't override it. if (! cls->ISA()->hasCustomAWZ()) { - id obj = class_createInstance(cls, 0); - if (!obj) obj = callBadAllocHandler(cls); - return obj; + // No alloc/allocWithZone implementation. Go straight to the allocator. + // fixme store hasCustomAWZ in the non-meta class and + // add it to canAllocFast's summary + if (cls->canAllocFast()) { + // No ctors, raw isa, etc. Go straight to the metal. + bool dtor = cls->hasCxxDtor(); + id obj = (id)calloc(1, cls->bits.fastInstanceSize()); + if (!obj) return callBadAllocHandler(cls); + obj->initInstanceIsa(cls, dtor); + return obj; + } + else { + // Has ctor or raw isa or something. Use the slower path. + id obj = class_createInstance(cls, 0); + if (!obj) return callBadAllocHandler(cls); + return obj; + } } #endif - return [cls allocWithZone: nil]; + + // No shortcuts available. + if (allocWithZone) return [cls allocWithZone:nil]; + return [cls alloc]; } + +// Base class implementation of +alloc. cls is not nil. +// Calls [cls allocWithZone:nil]. +id +_objc_rootAlloc(Class cls) +{ + return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/); +} + +// Calls [cls alloc]. id objc_alloc(Class cls) { -#if __OBJC2__ - // Skip over +alloc and +allocWithZone: if the class doesn't override them. - if (cls && - cls->ISA()->isInitialized_meta() && - ! cls->ISA()->hasCustomAWZ()) - { - id obj = class_createInstance(cls, 0); - if (!obj) obj = callBadAllocHandler(cls); - return obj; - } -#endif - return [cls alloc]; + return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/); } +// Calls [cls allocWithZone:nil]. id objc_allocWithZone(Class cls) { -#if __OBJC2__ - // Skip over the +allocWithZone: call if the class doesn't override it. - if (cls && - cls->ISA()->isInitialized_meta() && - ! cls->ISA()->hasCustomAWZ()) - { - id obj = class_createInstance(cls, 0); - if (!obj) obj = callBadAllocHandler(cls); - return obj; - } -#endif - return [cls allocWithZone: nil]; + return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/); } + void _objc_rootDealloc(id obj) { assert(obj); - assert(!UseGC); - if (obj->isTaggedPointer()) return; - - object_dispose(obj); + obj->rootDealloc(); } void @@ -1192,6 +1589,16 @@ _objc_rootFinalize(id obj __unused) _objc_fatal("_objc_rootFinalize called with garbage collection off"); } + +id +_objc_rootInit(id obj) +{ + // In practice, it will be hard to rely on this function. + // Many classes do not properly chain -init calls. + return obj; +} + + malloc_zone_t * _objc_rootZone(id obj) { @@ -1217,10 +1624,6 @@ _objc_rootHash(id obj) return (uintptr_t)obj; } -// make CF link for now -void *_objc_autoreleasePoolPush(void) { return objc_autoreleasePoolPush(); } -void _objc_autoreleasePoolPop(void *ctxt) { objc_autoreleasePoolPop(ctxt); } - void * objc_autoreleasePoolPush(void) { @@ -1239,145 +1642,30 @@ objc_autoreleasePoolPop(void *ctxt) AutoreleasePoolPage::pop(ctxt); } -void -_objc_autoreleasePoolPrint(void) -{ - if (UseGC) return; - AutoreleasePoolPage::printAll(); -} - -#if SUPPORT_RETURN_AUTORELEASE - -/* - Fast handling of returned autoreleased values. - The caller and callee cooperate to keep the returned object - out of the autorelease pool. - - Caller: - ret = callee(); - objc_retainAutoreleasedReturnValue(ret); - // use ret here - - Callee: - // compute ret - [ret retain]; - return objc_autoreleaseReturnValue(ret); - - objc_autoreleaseReturnValue() examines the caller's instructions following - the return. If the caller's instructions immediately call - objc_autoreleaseReturnValue, then the callee omits the -autorelease and saves - the result in thread-local storage. If the caller does not look like it - cooperates, then the callee calls -autorelease as usual. - - objc_autoreleaseReturnValue checks if the returned value is the same as the - one in thread-local storage. If it is, the value is used directly. If not, - the value is assumed to be truly autoreleased and is retained again. In - either case, the caller now has a retained reference to the value. - - Tagged pointer objects do participate in the fast autorelease scheme, - because it saves message sends. They are not entered in the autorelease - pool in the slow case. -*/ - -# if __x86_64__ - -static bool callerAcceptsFastAutorelease(const void * const ra0) -{ - const uint8_t *ra1 = (const uint8_t *)ra0; - const uint16_t *ra2; - const uint32_t *ra4 = (const uint32_t *)ra1; - const void **sym; - -#define PREFER_GOTPCREL 0 -#if PREFER_GOTPCREL - // 48 89 c7 movq %rax,%rdi - // ff 15 callq *symbol@GOTPCREL(%rip) - if (*ra4 != 0xffc78948) { - return false; - } - if (ra1[4] != 0x15) { - return false; - } - ra1 += 3; -#else - // 48 89 c7 movq %rax,%rdi - // e8 callq symbol - if (*ra4 != 0xe8c78948) { - return false; - } - ra1 += (long)*(const int32_t *)(ra1 + 4) + 8l; - ra2 = (const uint16_t *)ra1; - // ff 25 jmpq *symbol@DYLDMAGIC(%rip) - if (*ra2 != 0x25ff) { - return false; - } -#endif - ra1 += 6l + (long)*(const int32_t *)(ra1 + 2); - sym = (const void **)ra1; - if (*sym != objc_retainAutoreleasedReturnValue) - { - return false; - } - - return true; -} -// __x86_64__ -# elif __arm__ - -static bool callerAcceptsFastAutorelease(const void *ra) +void * +_objc_autoreleasePoolPush(void) { - // if the low bit is set, we're returning to thumb mode - if ((uintptr_t)ra & 1) { - // 3f 46 mov r7, r7 - // we mask off the low bit via subtraction - if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) { - return true; - } - } else { - // 07 70 a0 e1 mov r7, r7 - if (*(uint32_t *)ra == 0xe1a07007) { - return true; - } - } - return false; + return objc_autoreleasePoolPush(); } -// __arm__ -# elif __i386__ && TARGET_IPHONE_SIMULATOR - -static bool callerAcceptsFastAutorelease(const void *ra) +void +_objc_autoreleasePoolPop(void *ctxt) { - return false; + objc_autoreleasePoolPop(ctxt); } -// __i386__ && TARGET_IPHONE_SIMULATOR -# else - -#warning unknown architecture - -static bool callerAcceptsFastAutorelease(const void *ra) +void +_objc_autoreleasePoolPrint(void) { - return false; + if (UseGC) return; + AutoreleasePoolPage::printAll(); } -# endif - -// SUPPORT_RETURN_AUTORELEASE -#endif - - id objc_autoreleaseReturnValue(id obj) { -#if SUPPORT_RETURN_AUTORELEASE - assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == nil); - - if (callerAcceptsFastAutorelease(__builtin_return_address(0))) { - tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj); - return obj; - } -#endif + if (fastAutoreleaseForReturn(obj)) return obj; return objc_autorelease(obj); } @@ -1391,26 +1679,9 @@ objc_retainAutoreleaseReturnValue(id obj) id objc_retainAutoreleasedReturnValue(id obj) { -#if SUPPORT_RETURN_AUTORELEASE - if (obj == tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY)) { - tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, 0); - return obj; - } -#endif - return objc_retain(obj); -} + if (fastRetainFromReturn(obj)) return obj; -void -objc_storeStrong(id *location, id obj) -{ - // XXX FIXME -- GC support? - id prev = *location; - if (obj == prev) { - return; - } - objc_retain(obj); - *location = obj; - objc_release(prev); + return objc_retain(obj); } id @@ -1522,12 +1793,12 @@ void arr_init(void) + (BOOL)respondsToSelector:(SEL)sel { if (!sel) return NO; - return class_respondsToSelector(object_getClass((id)self), sel); + return class_respondsToSelector_inst(object_getClass(self), sel, self); } - (BOOL)respondsToSelector:(SEL)sel { if (!sel) return NO; - return class_respondsToSelector([self class], sel); + return class_respondsToSelector_inst([self class], sel, self); } + (BOOL)conformsToProtocol:(Protocol *)protocol { @@ -1702,7 +1973,7 @@ void arr_init(void) + (id)new { - return [[self alloc] init]; + return [callAlloc(self, false/*checkNil*/) init]; } + (id)retain { @@ -1710,23 +1981,9 @@ void arr_init(void) } // Replaced by ObjectAlloc -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wmismatched-method-attributes" -- (id)retain -__attribute__((aligned(16))) -{ - if (((id)self)->isTaggedPointer()) return self; - - SideTable *table = SideTable::tableForPointer(self); - - if (spinlock_trylock(&table->slock)) { - table->refcnts[DISGUISE(self)] += SIDE_TABLE_RC_ONE; - spinlock_unlock(&table->slock); - return self; - } - return _objc_rootRetain_slow(self, table); +- (id)retain { + return ((id)self)->rootRetain(); } -#pragma clang diagnostic pop + (BOOL)_tryRetain { @@ -1735,7 +1992,7 @@ __attribute__((aligned(16))) // Replaced by ObjectAlloc - (BOOL)_tryRetain { - return _objc_rootTryRetain(self); + return ((id)self)->rootTryRetain(); } + (BOOL)_isDeallocating { @@ -1743,7 +2000,7 @@ __attribute__((aligned(16))) } - (BOOL)_isDeallocating { - return _objc_rootIsDeallocating(self); + return ((id)self)->rootIsDeallocating(); } + (BOOL)allowsWeakReference { @@ -1766,50 +2023,25 @@ __attribute__((aligned(16))) } // Replaced by ObjectAlloc -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wmismatched-method-attributes" -- (oneway void)release -__attribute__((aligned(16))) -{ - // tagged pointer check is inside _objc_rootReleaseWasZero(). - - if (_objc_rootReleaseWasZero(self) == false) { - return; - } - [self dealloc]; +- (oneway void)release { + ((id)self)->rootRelease(); } -#pragma clang diagnostic pop + (id)autorelease { return (id)self; } // Replaced by ObjectAlloc -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wmismatched-method-attributes" -- (id)autorelease -__attribute__((aligned(16))) -{ - // no tag check here: tagged pointers DO use fast autoreleasing - -#if SUPPORT_RETURN_AUTORELEASE - assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == nil); - - if (callerAcceptsFastAutorelease(__builtin_return_address(0))) { - tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, self); - return self; - } -#endif - return _objc_rootAutorelease2(self); +- (id)autorelease { + return ((id)self)->rootAutorelease(); } -#pragma clang diagnostic pop + (NSUInteger)retainCount { return ULONG_MAX; } - (NSUInteger)retainCount { - return _objc_rootRetainCount(self); + return ((id)self)->rootRetainCount(); } + (id)alloc { @@ -1834,6 +2066,7 @@ __attribute__((aligned(16))) + (void)dealloc { } + // Replaced by NSZombies - (void)dealloc { _objc_rootDealloc(self); @@ -1881,100 +2114,4 @@ __attribute__((aligned(16))) @end -__attribute__((aligned(16))) -id -objc_retain(id obj) -{ - if (!obj || obj->isTaggedPointer()) { - goto out_slow; - } -#if __OBJC2__ - if (((Class)obj->isa)->hasCustomRR()) { - return [obj retain]; - } - return bypass_msgSend_retain(obj); -#else - return [obj retain]; -#endif - out_slow: - // clang really wants to reorder the "mov %rdi, %rax" early - // force better code gen with a data barrier - asm volatile(""); - return obj; -} - -__attribute__((aligned(16))) -void -objc_release(id obj) -{ - if (!obj || obj->isTaggedPointer()) { - return; - } -#if __OBJC2__ - if (((Class)obj->isa)->hasCustomRR()) { - return (void)[obj release]; - } - return bypass_msgSend_release(obj); -#else - [obj release]; -#endif -} -__attribute__((aligned(16))) -id -objc_autorelease(id obj) -{ - if (!obj || obj->isTaggedPointer()) { - goto out_slow; - } -#if __OBJC2__ - if (((Class)obj->isa)->hasCustomRR()) { - return [obj autorelease]; - } - return bypass_msgSend_autorelease(obj); -#else - return [obj autorelease]; -#endif - out_slow: - // clang really wants to reorder the "mov %rdi, %rax" early - // force better code gen with a data barrier - asm volatile(""); - return obj; -} - -id -_objc_rootRetain(id obj) -{ - assert(obj); - assert(!UseGC); - - if (obj->isTaggedPointer()) return obj; - - return bypass_msgSend_retain(obj); -} - -void -_objc_rootRelease(id obj) -{ - assert(obj); - assert(!UseGC); - - if (obj->isTaggedPointer()) return; - - bypass_msgSend_release(obj); -} - -id -_objc_rootAutorelease(id obj) -{ - assert(obj); // root classes shouldn't get here, since objc_msgSend ignores nil - // assert(!UseGC); - - if (UseGC) { - return obj; - } - - // no tag check here: tagged pointers DO use fast autoreleasing - - return bypass_msgSend_autorelease(obj); -} diff --git a/runtime/Object.h b/runtime/Object.h index e11ddc0..dd8838d 100644 --- a/runtime/Object.h +++ b/runtime/Object.h @@ -35,7 +35,7 @@ #include #include -#if ! __OBJC2__ +#if __OBJC__ && !__OBJC2__ __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_NA) OBJC_ROOT_CLASS diff --git a/runtime/Object.mm b/runtime/Object.mm index ffdce0c..c4981cf 100644 --- a/runtime/Object.mm +++ b/runtime/Object.mm @@ -229,7 +229,7 @@ static const char - (BOOL)isKindOf:aClass { - register Class cls; + Class cls; for (cls = isa; cls; cls = cls->superclass) if (cls == (Class)aClass) return YES; @@ -243,7 +243,7 @@ static const char - (BOOL)isKindOfClassNamed:(const char *)aClassName { - register Class cls; + Class cls; for (cls = isa; cls; cls = cls->superclass) if (strcmp(aClassName, class_getName(cls)) == 0) return YES; diff --git a/runtime/OldClasses.subproj/List.h b/runtime/OldClasses.subproj/List.h index f223fcd..199a887 100644 --- a/runtime/OldClasses.subproj/List.h +++ b/runtime/OldClasses.subproj/List.h @@ -32,7 +32,7 @@ #ifndef _OBJC_LIST_H_ #define _OBJC_LIST_H_ -#if !__OBJC2__ +#if __OBJC__ && !__OBJC2__ #include #include diff --git a/runtime/Protocol.h b/runtime/Protocol.h index 26b3a97..0e78850 100644 --- a/runtime/Protocol.h +++ b/runtime/Protocol.h @@ -28,7 +28,13 @@ #ifndef _OBJC_PROTOCOL_H_ #define _OBJC_PROTOCOL_H_ -#if __OBJC2__ +#if !__OBJC__ + +// typedef Protocol is here: +#include + + +#elif __OBJC2__ #include @@ -39,6 +45,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0) @interface Protocol : NSObject @end + #else #include diff --git a/runtime/a1a2-blocktramps-arm.s b/runtime/a1a2-blocktramps-arm.s index cfaf5da..9e54078 100644 --- a/runtime/a1a2-blocktramps-arm.s +++ b/runtime/a1a2-blocktramps-arm.s @@ -9,585 +9,138 @@ .private_extern __a1a2_tramphead .private_extern __a1a2_firsttramp - .private_extern __a1a2_nexttramp .private_extern __a1a2_trampend -// This must match a2a3-blocktramps-arm.s -#if defined(_ARM_ARCH_7) -# define THUMB2 1 -#else -# define THUMB2 0 +// Trampoline machinery assumes the trampolines are Thumb function pointers +#if !__thumb2__ +# error sorry #endif - -#if THUMB2 - .thumb - .thumb_func __a1a2_tramphead - .thumb_func __a1a2_firsttramp - .thumb_func __a1a2_nexttramp - .thumb_func __a1a2_trampend -#else - // don't use Thumb-1 - .arm -#endif - -.align PAGE_SHIFT -__a1a2_tramphead_nt: + +.thumb +.thumb_func __a1a2_tramphead +.thumb_func __a1a2_firsttramp +.thumb_func __a1a2_trampend + +.align PAGE_MAX_SHIFT __a1a2_tramphead: /* r0 == self - r1 == pc of trampoline's first instruction + PC bias + r12 == pc of trampoline's first instruction + PC bias lr == original return address */ - // calculate the trampoline's index (512 entries, 8 bytes each) -#if THUMB2 - // PC bias is only 4, no need to correct with 8-byte trampolines - ubfx r1, r1, #3, #9 -#else - sub r1, r1, #8 // correct PC bias - lsl r1, r1, #20 - lsr r1, r1, #23 -#endif - - // load block pointer from trampoline's data - // nt label works around thumb integrated asm bug rdar://11315197 - adr r12, __a1a2_tramphead_nt // text page - sub r12, r12, #PAGE_SIZE // data page precedes text page - ldr r12, [r12, r1, LSL #3] // load block pointer from data + index*8 - - // shuffle parameters mov r1, r0 // _cmd = self - mov r0, r12 // self = block pointer - // tail call block->invoke - ldr pc, [r12, #12] + // Trampoline's data is one page before the trampoline text. + // Also correct PC bias of 4 bytes. + sub r12, #PAGE_MAX_SIZE + ldr r0, [r12, #-4] // self = block object + ldr pc, [r0, #12] // tail call block->invoke // not reached - // Make v6 and v7 match so they have the same number of TrampolineEntry - // below. Debug asserts in objc-block-trampoline.m check this. -#if THUMB2 - .space 16 -#endif + // Align trampolines to 8 bytes +.align 3 .macro TrampolineEntry - mov r1, pc + mov r12, pc b __a1a2_tramphead - .align 3 +.align 3 +.endmacro + +.macro TrampolineEntryX16 + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry +.endmacro + +.macro TrampolineEntryX256 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 .endmacro -.align 3 .private_extern __a1a2_firsttramp __a1a2_firsttramp: - TrampolineEntry + // 2048-2 trampolines to fill 16K page + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry -.private_extern __a1a2_nexttramp -__a1a2_nexttramp: -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry + TrampolineEntry + TrampolineEntry + // TrampolineEntry + // TrampolineEntry .private_extern __a1a2_trampend __a1a2_trampend: diff --git a/runtime/a1a2-blocktramps-arm64.s b/runtime/a1a2-blocktramps-arm64.s new file mode 100644 index 0000000..139df99 --- /dev/null +++ b/runtime/a1a2-blocktramps-arm64.s @@ -0,0 +1,134 @@ +#if __arm64__ + +#include + +.text + + .private_extern __a1a2_tramphead + .private_extern __a1a2_firsttramp + .private_extern __a1a2_trampend + +.align PAGE_MAX_SHIFT +__a1a2_tramphead: +L_a1a2_tramphead: + /* + x0 == self + x17 == address of called trampoline's data (1 page before its code) + lr == original return address + */ + + mov x1, x0 // _cmd = self + ldr x0, [x17] // self = block object + ldr x16, [x0, #16] // tail call block->invoke + br x16 + + // pad up to TrampolineBlockPagePair header size + nop + nop + +.macro TrampolineEntry + // load address of trampoline data (one page before this instruction) + adr x17, -PAGE_MAX_SIZE + b L_a1a2_tramphead +.endmacro + +.macro TrampolineEntryX16 + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry +.endmacro + +.macro TrampolineEntryX256 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 +.endmacro + +.align 3 +.private_extern __a1a2_firsttramp +__a1a2_firsttramp: + // 2048-3 trampolines to fill 16K page + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + // TrampolineEntry + // TrampolineEntry + // TrampolineEntry + +.private_extern __a1a2_trampend +__a1a2_trampend: + +#endif diff --git a/runtime/a2a3-blocktramps-arm.s b/runtime/a2a3-blocktramps-arm.s index 166f833..ac0ce72 100644 --- a/runtime/a2a3-blocktramps-arm.s +++ b/runtime/a2a3-blocktramps-arm.s @@ -7,585 +7,140 @@ .text + .private_extern __a2a3_tramphead + .private_extern __a2a3_firsttramp + .private_extern __a2a3_trampend -// This must match a1a2-blocktramps-arm.s -#if defined(_ARM_ARCH_7) -# define THUMB2 1 -#else -# define THUMB2 0 +// Trampoline machinery assumes the trampolines are Thumb function pointers +#if !__thumb2__ +# error sorry #endif -#if THUMB2 - .thumb - .thumb_func __a2a3_tramphead - .thumb_func __a2a3_firsttramp - .thumb_func __a2a3_nexttramp - .thumb_func __a2a3_trampend -#else - // don't use Thumb-1 - .arm -#endif - -.align PAGE_SHIFT -.private_extern __a2a3_tramphead -__a2a3_tramphead_nt: +.thumb +.thumb_func __a2a3_tramphead +.thumb_func __a2a3_firsttramp +.thumb_func __a2a3_trampend + +.align PAGE_MAX_SHIFT __a2a3_tramphead: /* - r0 == stret r1 == self - r2 == pc of trampoline's first instruction + 4 + r12 == pc of trampoline's first instruction + PC bias lr == original return address */ - // calculate the trampoline's index (512 entries, 8 bytes each) -#if THUMB2 - // PC bias is only 4, no need to correct with 8-byte trampolines - ubfx r2, r2, #3, #9 -#else - sub r2, r2, #8 // correct PC bias - lsl r2, r2, #20 - lsr r2, r2, #23 -#endif - - // load block pointer from trampoline's data - // nt label works around thumb integrated asm bug rdar://11315197 - adr r12, __a2a3_tramphead_nt // text page - sub r12, r12, #PAGE_SIZE // data page precedes text page - ldr r12, [r12, r2, LSL #3] // load block pointer from data + index*8 - - // shuffle parameters mov r2, r1 // _cmd = self - mov r1, r12 // self = block pointer - // tail call block->invoke - ldr pc, [r12, #12] + // Trampoline's data is one page before the trampoline text. + // Also correct PC bias of 4 bytes. + sub r12, #PAGE_MAX_SIZE + ldr r1, [r12, #-4] // self = block object + ldr pc, [r1, #12] // tail call block->invoke // not reached - // Make v6 and v7 match so they have the same number of TrampolineEntry - // below. Debug asserts in objc-block-trampoline.m check this. -#if THUMB2 - .space 16 -#endif - + // Align trampolines to 8 bytes +.align 3 + .macro TrampolineEntry - mov r2, pc + mov r12, pc b __a2a3_tramphead - .align 3 +.align 3 +.endmacro + +.macro TrampolineEntryX16 + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry +.endmacro + +.macro TrampolineEntryX256 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 .endmacro -.align 3 .private_extern __a2a3_firsttramp __a2a3_firsttramp: - TrampolineEntry + // 2048-2 trampolines to fill 16K page + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry -.private_extern __a2a3_nexttramp -__a2a3_nexttramp: -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry -TrampolineEntry + TrampolineEntry + TrampolineEntry + // TrampolineEntry + // TrampolineEntry .private_extern __a2a3_trampend __a2a3_trampend: diff --git a/runtime/hashtable2.mm b/runtime/hashtable2.mm index 478f689..20b5255 100644 --- a/runtime/hashtable2.mm +++ b/runtime/hashtable2.mm @@ -485,8 +485,8 @@ uintptr_t NXPtrHash (const void *info, const void *data) { }; uintptr_t NXStrHash (const void *info, const void *data) { - register uintptr_t hash = 0; - register unsigned char *s = (unsigned char *) data; + uintptr_t hash = 0; + unsigned char *s = (unsigned char *) data; /* unsigned to avoid a sign-extend */ /* unroll the loop */ if (s) for (; ; ) { diff --git a/runtime/llvm-DenseMapInfo.h b/runtime/llvm-DenseMapInfo.h index b425662..4b7869f 100644 --- a/runtime/llvm-DenseMapInfo.h +++ b/runtime/llvm-DenseMapInfo.h @@ -41,12 +41,28 @@ struct DenseMapInfo { return reinterpret_cast(Val); } static unsigned getHashValue(const T *PtrVal) { - return (unsigned((uintptr_t)PtrVal) >> 4) ^ - (unsigned((uintptr_t)PtrVal) >> 9); + return ptr_hash((uintptr_t)PtrVal); } static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } }; +// Provide DenseMapInfo for disguised pointers. +template +struct DenseMapInfo> { + static inline DisguisedPtr getEmptyKey() { + return DisguisedPtr((T*)(uintptr_t)-1); + } + static inline DisguisedPtr getTombstoneKey() { + return DisguisedPtr((T*)(uintptr_t)-2); + } + static unsigned getHashValue(const T *PtrVal) { + return ptr_hash((uintptr_t)PtrVal); + } + static bool isEqual(const DisguisedPtr &LHS, const DisguisedPtr &RHS) { + return LHS == RHS; + } +}; + // Provide DenseMapInfo for cstrings. template<> struct DenseMapInfo { static inline const char* getEmptyKey() { diff --git a/runtime/message.h b/runtime/message.h index 538b389..9f2a1c6 100644 --- a/runtime/message.h +++ b/runtime/message.h @@ -29,6 +29,7 @@ #include #include +#pragma GCC system_header #ifndef OBJC_SUPER #define OBJC_SUPER @@ -113,9 +114,11 @@ OBJC_EXPORT id objc_msgSendSuper(struct objc_super *super, SEL op, ...) */ #if !OBJC_OLD_DISPATCH_PROTOTYPES OBJC_EXPORT void objc_msgSend_stret(void /* id self, SEL op, ... */ ) - __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; OBJC_EXPORT void objc_msgSendSuper_stret(void /* struct objc_super *super, SEL op, ... */ ) - __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; #else /** * Sends a message with a data-structure return value to an instance of a class. @@ -123,14 +126,17 @@ OBJC_EXPORT void objc_msgSendSuper_stret(void /* struct objc_super *super, SEL o * @see objc_msgSend */ OBJC_EXPORT void objc_msgSend_stret(id self, SEL op, ...) - __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; + /** * Sends a message with a data-structure return value to the superclass of an instance of a class. * * @see objc_msgSendSuper */ OBJC_EXPORT void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...) - __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; #endif @@ -226,12 +232,14 @@ OBJC_EXPORT void objc_msgSend_fp2ret(id self, SEL op, ...) OBJC_EXPORT void method_invoke(void /* id receiver, Method m, ... */ ) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); OBJC_EXPORT void method_invoke_stret(void /* id receiver, Method m, ... */ ) - __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; #else OBJC_EXPORT id method_invoke(id receiver, Method m, ...) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); OBJC_EXPORT void method_invoke_stret(id receiver, Method m, ...) - __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; #endif @@ -254,12 +262,14 @@ OBJC_EXPORT void method_invoke_stret(id receiver, Method m, ...) OBJC_EXPORT void _objc_msgForward(void /* id receiver, SEL sel, ... */ ) __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); OBJC_EXPORT void _objc_msgForward_stret(void /* id receiver, SEL sel, ... */ ) - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); + __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0) + OBJC_ARM64_UNAVAILABLE; #else OBJC_EXPORT id _objc_msgForward(id receiver, SEL sel, ...) __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); OBJC_EXPORT void _objc_msgForward_stret(id receiver, SEL sel, ...) - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); + __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0) + OBJC_ARM64_UNAVAILABLE; #endif diff --git a/runtime/objc-abi.h b/runtime/objc-abi.h index 46c6d77..809416c 100644 --- a/runtime/objc-abi.h +++ b/runtime/objc-abi.h @@ -48,6 +48,22 @@ OBJC_EXPORT void _objcInit(void) __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); +/* Images */ + +// Description of an Objective-C image. +// __DATA,__objc_imageinfo stores one of these. +typedef struct objc_image_info { + uint32_t version; // currently 0 + uint32_t flags; +} objc_image_info; + +// Values for objc_image_info.flags +#define OBJC_IMAGE_IS_REPLACEMENT (1<<0) +#define OBJC_IMAGE_SUPPORTS_GC (1<<1) +#define OBJC_IMAGE_REQUIRES_GC (1<<2) +#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3) +#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4) // might be re-assignable + /* Properties */ @@ -96,7 +112,8 @@ OBJC_EXPORT struct objc_cache _objc_empty_cache OBJC_EXPORT id objc_msgSendSuper2(struct objc_super *super, SEL op, ...) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); OBJC_EXPORT void objc_msgSendSuper2_stret(struct objc_super *super, SEL op,...) - __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; // objc_msgSend_noarg() may be faster for methods with no additional arguments. OBJC_EXPORT id objc_msgSend_noarg(id self, SEL _cmd) @@ -113,9 +130,11 @@ OBJC_EXPORT id objc_msgSend_debug(id self, SEL op, ...) OBJC_EXPORT id objc_msgSendSuper2_debug(struct objc_super *super, SEL op, ...) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0); OBJC_EXPORT void objc_msgSend_stret_debug(id self, SEL op, ...) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0); + __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0) + OBJC_ARM64_UNAVAILABLE; OBJC_EXPORT void objc_msgSendSuper2_stret_debug(struct objc_super *super, SEL op,...) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0); + __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0) + OBJC_ARM64_UNAVAILABLE; # if defined(__i386__) OBJC_EXPORT double objc_msgSend_fpret_debug(id self, SEL op, ...) @@ -134,7 +153,7 @@ OBJC_EXPORT void objc_msgSend_fp2ret_debug(id self, SEL op, ...) #endif -#if __OBJC2__ && defined(__x86_64__) +#if defined(__x86_64__) && TARGET_OS_MAC && !TARGET_IPHONE_SIMULATOR // objc_msgSend_fixup() is used for vtable-dispatchable call sites. OBJC_EXPORT void objc_msgSend_fixup(void) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA); diff --git a/runtime/objc-api.h b/runtime/objc-api.h index 3779bab..fc36423 100644 --- a/runtime/objc-api.h +++ b/runtime/objc-api.h @@ -65,7 +65,11 @@ /* OBJC_ISA_AVAILABILITY: `isa` will be deprecated or unavailable * in the future */ #if !defined(OBJC_ISA_AVAILABILITY) -# define OBJC_ISA_AVAILABILITY /* still available */ +# if __OBJC2__ +# define OBJC_ISA_AVAILABILITY __attribute__((deprecated)) +# else +# define OBJC_ISA_AVAILABILITY /* still available */ +# endif #endif @@ -92,6 +96,17 @@ # endif #endif +#if !defined(OBJC_HIDE_64) +/* OBJC_ARM64_UNAVAILABLE: unavailable on arm64 (i.e. stret dispatch) */ +#if !defined(OBJC_ARM64_UNAVAILABLE) +# if defined(__arm64__) +# define OBJC_ARM64_UNAVAILABLE __attribute__((unavailable("not available in arm64"))) +# else +# define OBJC_ARM64_UNAVAILABLE +# endif +#endif +#endif + /* OBJC_GC_UNAVAILABLE: unavailable with -fobjc-gc or -fobjc-gc-only */ #if !defined(OBJC_GC_UNAVAILABLE) # if __OBJC_GC__ diff --git a/runtime/objc-auto.mm b/runtime/objc-auto.mm index ab961f3..5655ecd 100644 --- a/runtime/objc-auto.mm +++ b/runtime/objc-auto.mm @@ -816,8 +816,6 @@ void gc_unregister_datasegment(uintptr_t base, size_t size) { auto_zone_unregister_datasegment(gc_zone, (void*)base, size); } -#define countof(array) (sizeof(array) / sizeof(array[0])) - /*********************************************************************** * Initialization @@ -1129,7 +1127,7 @@ static malloc_zone_t *objc_debug_zone(void) { static malloc_zone_t *z = nil; if (!z) { - z = malloc_create_zone(PAGE_SIZE, 0); + z = malloc_create_zone(PAGE_MAX_SIZE, 0); malloc_set_zone_name(z, "objc-auto debug"); } return z; diff --git a/runtime/objc-block-trampolines.mm b/runtime/objc-block-trampolines.mm index 2be58f3..da9f413 100644 --- a/runtime/objc-block-trampolines.mm +++ b/runtime/objc-block-trampolines.mm @@ -46,123 +46,141 @@ // Scalar return TRAMP(a1a2_tramphead); // trampoline header code TRAMP(a1a2_firsttramp); // first trampoline -TRAMP(a1a2_nexttramp); // second trampoline TRAMP(a1a2_trampend); // after the last trampoline +#if SUPPORT_STRET // Struct return TRAMP(a2a3_tramphead); TRAMP(a2a3_firsttramp); -TRAMP(a2a3_nexttramp); TRAMP(a2a3_trampend); +#endif // argument mode identifier typedef enum { ReturnValueInRegisterArgumentMode, +#if SUPPORT_STRET ReturnValueOnStackArgumentMode, +#endif - ArgumentModeMax + ArgumentModeCount } ArgumentMode; -// slot size is 8 bytes on both i386 and x86_64 (because of bytes-per-call instruction is > 4 for both) -#define SLOT_SIZE 8 - -// unsigned value, any value, larger thna # of blocks that fit in the page pair -#define LAST_SLOT_MARKER 4241 -#define TRAMPOLINE_PAGE_PAIR_HEADER_SIZE (sizeof(uint32_t) + sizeof(struct _TrampolineBlockPagePair *) + sizeof(struct _TrampolineBlockPagePair *)) -typedef struct _TrampolineBlockPagePair { - struct _TrampolineBlockPagePair *nextPagePair; // linked list of all page pairs - struct _TrampolineBlockPagePair *nextAvailablePage; // linked list of pages with available slots +// We must take care with our data layout on architectures that support +// multiple page sizes. +// +// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE. +// On some platforms this requires additional linker flags. +// +// When we allocate a page pair, we use PAGE_MAX_SIZE size. +// This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE. +// +// When we allocate a page pair, we use the process's page alignment. +// This simplifies allocation because we don't need to force greater than +// default alignment when running with small pages, but it also means +// the trampoline code MUST NOT look for its data by masking with PAGE_MAX_MASK. + +struct TrampolineBlockPagePair +{ + TrampolineBlockPagePair *nextPagePair; // linked list of all pages + TrampolineBlockPagePair *nextAvailablePage; // linked list of pages with available slots + + uintptr_t nextAvailable; // index of next available slot, endIndex() if no more available + + // Payload data: block pointers and free list. + // Bytes parallel with trampoline header code are the fields above or unused + // uint8_t blocks[ PAGE_MAX_SIZE - sizeof(TrampolineBlockPagePair) ] - uint32_t nextAvailable; // index of next available slot, 0 if no more available - - // Data: block pointers and free list. - // Bytes parallel with trampoline header are the fields above, or unused. - uint8_t blocks[ PAGE_SIZE - TRAMPOLINE_PAGE_PAIR_HEADER_SIZE ] - __attribute__((unavailable)) /* always use _headerSize() */; - // Code: trampoline header followed by trampolines. - uint8_t trampolines[PAGE_SIZE]; - + // uint8_t trampolines[PAGE_MAX_SIZE]; + // Per-trampoline block data format: - // initial value is 0 while page pair is filled sequentially (last slot is LAST_SLOT_MARKER to indicate end of page) + // initial value is 0 while page data is filled sequentially // when filled, value is reference to Block_copy()d block - // when empty, value is index of next available slot OR LAST_SLOT_MARKER - -} TrampolineBlockPagePair; - -// two sets of trampoline page pairs; one for stack returns and one for register returns -static TrampolineBlockPagePair *headPagePairs[2]; - -#pragma mark Utility Functions -static inline uint32_t _headerSize() { - uint32_t headerSize = (uint32_t) (a1a2_firsttramp() - a1a2_tramphead()); - - // make sure stret and non-stret sizes match - assert(a2a3_firsttramp() - a2a3_tramphead() == headerSize); + // when empty, value is index of next available slot OR 0 if never used yet + + union Payload { + id block; + uintptr_t nextAvailable; // free list + }; + + static uintptr_t headerSize() { + return (uintptr_t) (a1a2_firsttramp() - a1a2_tramphead()); + } + + static uintptr_t slotSize() { + return 8; + } - return headerSize; -} + static uintptr_t startIndex() { + // headerSize is assumed to be slot-aligned + return headerSize() / slotSize(); + } -static inline uint32_t _slotSize() { - uint32_t slotSize = (uint32_t) (a1a2_nexttramp() - a1a2_firsttramp()); + static uintptr_t endIndex() { + return (uintptr_t)PAGE_MAX_SIZE / slotSize(); + } - // make sure stret and non-stret sizes match - assert(a2a3_nexttramp() - a2a3_firsttramp() == slotSize); + static bool validIndex(uintptr_t index) { + return (index >= startIndex() && index < endIndex()); + } - return slotSize; -} + Payload *payload(uintptr_t index) { + assert(validIndex(index)); + return (Payload *)((char *)this + index*slotSize()); + } -static inline bool trampolinesAreThumb(void) { - extern void *_a1a2_firsttramp; -#if !NDEBUG - extern void *_a1a2_nexttramp; - extern void *_a2a3_firsttramp; - extern void *_a2a3_nexttramp; + IMP trampoline(uintptr_t index) { + assert(validIndex(index)); + char *imp = (char *)this + index*slotSize() + PAGE_MAX_SIZE; +#if __arm__ + imp++; // trampoline is Thumb instructions #endif + return (IMP)imp; + } - // make sure thumb-edness of all trampolines match - assert(((uintptr_t)&_a1a2_firsttramp) % 2 == - ((uintptr_t)&_a2a3_firsttramp) % 2); - assert(((uintptr_t)&_a1a2_firsttramp) % 2 == - ((uintptr_t)&_a1a2_nexttramp) % 2); - assert(((uintptr_t)&_a1a2_firsttramp) % 2 == - ((uintptr_t)&_a2a3_nexttramp) % 2); - - return ((uintptr_t)&_a1a2_firsttramp) % 2; -} - -static inline uint32_t _slotsPerPagePair() { - uint32_t slotSize = _slotSize(); - uint32_t slotsPerPagePair = PAGE_SIZE / slotSize; - return slotsPerPagePair; -} - -static inline uint32_t _paddingSlotCount() { - uint32_t headerSize = _headerSize(); - uint32_t slotSize = _slotSize(); - uint32_t paddingSlots = headerSize / slotSize; - return paddingSlots; -} + uintptr_t indexForTrampoline(IMP tramp) { + uintptr_t tramp0 = (uintptr_t)this + PAGE_MAX_SIZE; + uintptr_t start = tramp0 + headerSize(); + uintptr_t end = tramp0 + PAGE_MAX_SIZE; + uintptr_t address = (uintptr_t)tramp; + if (address >= start && address < end) { + return (uintptr_t)(address - tramp0) / slotSize(); + } + return 0; + } -static inline id *_payloadAddressAtIndex(TrampolineBlockPagePair *pagePair, uint32_t index) { - uint32_t slotSize = _slotSize(); - uintptr_t baseAddress = (uintptr_t) pagePair; - uintptr_t payloadAddress = baseAddress + (slotSize * index); - return (id *)payloadAddress; -} + static void check() { + assert(TrampolineBlockPagePair::slotSize() == 8); + assert(TrampolineBlockPagePair::headerSize() >= sizeof(TrampolineBlockPagePair)); + assert(TrampolineBlockPagePair::headerSize() % TrampolineBlockPagePair::slotSize() == 0); + + // _objc_inform("%p %p %p", a1a2_tramphead(), a1a2_firsttramp(), + // a1a2_trampend()); + assert(a1a2_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE + assert(a1a2_tramphead() + PAGE_MAX_SIZE == a1a2_trampend()); +#if SUPPORT_STRET + // _objc_inform("%p %p %p", a2a3_tramphead(), a2a3_firsttramp(), + // a2a3_trampend()); + assert(a2a3_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE + assert(a2a3_tramphead() + PAGE_MAX_SIZE == a2a3_trampend()); +#endif + +#if __arm__ + // make sure trampolines are Thumb + extern void *_a1a2_firsttramp; + extern void *_a2a3_firsttramp; + assert(((uintptr_t)&_a1a2_firsttramp) % 2 == 1); + assert(((uintptr_t)&_a2a3_firsttramp) % 2 == 1); +#endif + } -static inline IMP _trampolineAddressAtIndex(TrampolineBlockPagePair *pagePair, uint32_t index) { - uint32_t slotSize = _slotSize(); - uintptr_t baseAddress = (uintptr_t) &(pagePair->trampolines); - uintptr_t trampolineAddress = baseAddress + (slotSize * index); +}; -#if defined(__arm__) - if (trampolinesAreThumb()) trampolineAddress++; -#endif +// two sets of trampoline pages; one for stack returns and one for register returns +static TrampolineBlockPagePair *headPagePairs[ArgumentModeCount]; - return (IMP)trampolineAddress; -} +#pragma mark Utility Functions static inline void _lock() { #if __OBJC2__ @@ -189,21 +207,13 @@ static inline void _assert_locked() { } #pragma mark Trampoline Management Functions -static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode) { +static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode) +{ _assert_locked(); vm_address_t dataAddress; - // make sure certain assumptions are met - assert(sizeof(TrampolineBlockPagePair) == 2*PAGE_SIZE); - assert(_slotSize() == 8); - assert(_headerSize() >= TRAMPOLINE_PAGE_PAIR_HEADER_SIZE); - assert((_headerSize() % _slotSize()) == 0); - - assert(a1a2_tramphead() % PAGE_SIZE == 0); - assert(a1a2_tramphead() + PAGE_SIZE == a1a2_trampend()); - assert(a2a3_tramphead() % PAGE_SIZE == 0); - assert(a2a3_tramphead() + PAGE_SIZE == a2a3_trampend()); + TrampolineBlockPagePair::check(); TrampolineBlockPagePair *headPagePair = headPagePairs[aMode]; @@ -211,18 +221,18 @@ static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode) assert(headPagePair->nextAvailablePage == nil); } - int i; - kern_return_t result = KERN_FAILURE; - for(i = 0; i < 5; i++) { - result = vm_allocate(mach_task_self(), &dataAddress, PAGE_SIZE * 2, + kern_return_t result; + for (int i = 0; i < 5; i++) { + result = vm_allocate(mach_task_self(), &dataAddress, + PAGE_MAX_SIZE * 2, TRUE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION)); if (result != KERN_SUCCESS) { mach_error("vm_allocate failed", result); return nil; } - vm_address_t codeAddress = dataAddress + PAGE_SIZE; - result = vm_deallocate(mach_task_self(), codeAddress, PAGE_SIZE); + vm_address_t codeAddress = dataAddress + PAGE_MAX_SIZE; + result = vm_deallocate(mach_task_self(), codeAddress, PAGE_MAX_SIZE); if (result != KERN_SUCCESS) { mach_error("vm_deallocate failed", result); return nil; @@ -231,44 +241,48 @@ static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode) uintptr_t codePage; switch(aMode) { case ReturnValueInRegisterArgumentMode: - codePage = a1a2_firsttramp() & ~(PAGE_MASK); + codePage = a1a2_tramphead(); break; +#if SUPPORT_STRET case ReturnValueOnStackArgumentMode: - codePage = a2a3_firsttramp() & ~(PAGE_MASK); + codePage = a2a3_tramphead(); break; +#endif default: _objc_fatal("unknown return mode %d", (int)aMode); break; } vm_prot_t currentProtection, maxProtection; - result = vm_remap(mach_task_self(), &codeAddress, PAGE_SIZE, 0, FALSE, mach_task_self(), - codePage, TRUE, ¤tProtection, &maxProtection, VM_INHERIT_SHARE); + result = vm_remap(mach_task_self(), &codeAddress, PAGE_MAX_SIZE, + 0, FALSE, mach_task_self(), codePage, TRUE, + ¤tProtection, &maxProtection, VM_INHERIT_SHARE); if (result != KERN_SUCCESS) { - result = vm_deallocate(mach_task_self(), dataAddress, PAGE_SIZE); + result = vm_deallocate(mach_task_self(), + dataAddress, PAGE_MAX_SIZE); if (result != KERN_SUCCESS) { mach_error("vm_deallocate for retry failed.", result); return nil; } - } else + } else { break; + } } - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { return nil; + } TrampolineBlockPagePair *pagePair = (TrampolineBlockPagePair *) dataAddress; - pagePair->nextAvailable = _paddingSlotCount(); + pagePair->nextAvailable = pagePair->startIndex(); pagePair->nextPagePair = nil; pagePair->nextAvailablePage = nil; - id *lastPageBlockPtr = _payloadAddressAtIndex(pagePair, _slotsPerPagePair() - 1); - *lastPageBlockPtr = (id)(uintptr_t) LAST_SLOT_MARKER; if (headPagePair) { - TrampolineBlockPagePair *lastPage = headPagePair; - while(lastPage->nextPagePair) - lastPage = lastPage->nextPagePair; + TrampolineBlockPagePair *lastPagePair = headPagePair; + while(lastPagePair->nextPagePair) + lastPagePair = lastPagePair->nextPagePair; - lastPage->nextPagePair = pagePair; + lastPagePair->nextPagePair = pagePair; headPagePairs[aMode]->nextAvailablePage = pagePair; } else { headPagePairs[aMode] = pagePair; @@ -277,7 +291,9 @@ static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode) return pagePair; } -static TrampolineBlockPagePair *_getOrAllocatePagePairWithNextAvailable(ArgumentMode aMode) { +static TrampolineBlockPagePair * +_getOrAllocatePagePairWithNextAvailable(ArgumentMode aMode) +{ _assert_locked(); TrampolineBlockPagePair *headPagePair = headPagePairs[aMode]; @@ -285,7 +301,8 @@ static TrampolineBlockPagePair *_getOrAllocatePagePairWithNextAvailable(Argument if (!headPagePair) return _allocateTrampolinesAndData(aMode); - if (headPagePair->nextAvailable) // make sure head page is filled first + // make sure head page is filled first + if (headPagePair->nextAvailable != headPagePair->endIndex()) return headPagePair; if (headPagePair->nextAvailablePage) // check if there is a page w/a hole @@ -294,162 +311,161 @@ static TrampolineBlockPagePair *_getOrAllocatePagePairWithNextAvailable(Argument return _allocateTrampolinesAndData(aMode); // tack on a new one } -static TrampolineBlockPagePair *_pagePairAndIndexContainingIMP(IMP anImp, uint32_t *outIndex, TrampolineBlockPagePair **outHeadPagePair) { +static TrampolineBlockPagePair * +_pageAndIndexContainingIMP(IMP anImp, uintptr_t *outIndex, + TrampolineBlockPagePair **outHeadPagePair) +{ _assert_locked(); - uintptr_t impValue = (uintptr_t) anImp; - uint32_t i; - - for(i = 0; i < ArgumentModeMax; i++) { - TrampolineBlockPagePair *pagePair = headPagePairs[i]; - - while(pagePair) { - uintptr_t startOfTrampolines = (uintptr_t) &(pagePair->trampolines); - uintptr_t endOfTrampolines = ((uintptr_t) startOfTrampolines) + PAGE_SIZE; - - if ( (impValue >=startOfTrampolines) && (impValue <= endOfTrampolines) ) { - if (outIndex) { - *outIndex = (uint32_t) ((impValue - startOfTrampolines) / SLOT_SIZE); - } - if (outHeadPagePair) { - *outHeadPagePair = headPagePairs[i]; - } + for (int arg = 0; arg < ArgumentModeCount; arg++) { + for (TrampolineBlockPagePair *pagePair = headPagePairs[arg]; + pagePair; + pagePair = pagePair->nextPagePair) + { + uintptr_t index = pagePair->indexForTrampoline(anImp); + if (index) { + if (outIndex) *outIndex = index; + if (outHeadPagePair) *outHeadPagePair = headPagePairs[arg]; return pagePair; } - - pagePair = pagePair->nextPagePair; } } return nil; } + +static ArgumentMode +_argumentModeForBlock(id block) +{ + ArgumentMode aMode = ReturnValueInRegisterArgumentMode; + +#if SUPPORT_STRET + if (_Block_has_signature(block) && _Block_use_stret(block)) + aMode = ReturnValueOnStackArgumentMode; +#else + assert(! (_Block_has_signature(block) && _Block_use_stret(block))); +#endif + + return aMode; +} + + // `block` must already have been copied -static IMP _imp_implementationWithBlockNoCopy(ArgumentMode aMode, id block) +IMP +_imp_implementationWithBlockNoCopy(id block) { _assert_locked(); - TrampolineBlockPagePair *pagePair = _getOrAllocatePagePairWithNextAvailable(aMode); + ArgumentMode aMode = _argumentModeForBlock(block); + + TrampolineBlockPagePair *pagePair = + _getOrAllocatePagePairWithNextAvailable(aMode); if (!headPagePairs[aMode]) headPagePairs[aMode] = pagePair; - uint32_t index = pagePair->nextAvailable; - id *payloadAddress = _payloadAddressAtIndex(pagePair, index); - assert((index < _slotsPerPagePair()) || (index == LAST_SLOT_MARKER)); - - uint32_t nextAvailableIndex = (uint32_t) *((uintptr_t *) payloadAddress); - if (nextAvailableIndex == 0) - // first time through, slots are filled with zeros, fill sequentially - pagePair->nextAvailable = index + 1; - else if (nextAvailableIndex == LAST_SLOT_MARKER) { - // last slot is filled with this as marker - // page now full, remove from available page linked list - pagePair->nextAvailable = 0; - TrampolineBlockPagePair *iteratorPair = headPagePairs[aMode]; - while(iteratorPair && (iteratorPair->nextAvailablePage != pagePair)) - iteratorPair = iteratorPair->nextAvailablePage; - if (iteratorPair) { - iteratorPair->nextAvailablePage = pagePair->nextAvailablePage; + uintptr_t index = pagePair->nextAvailable; + assert(index >= pagePair->startIndex() && index < pagePair->endIndex()); + TrampolineBlockPagePair::Payload *payload = pagePair->payload(index); + + uintptr_t nextAvailableIndex = payload->nextAvailable; + if (nextAvailableIndex == 0) { + // First time through (unused slots are zero). Fill sequentially. + // If the page is now full this will now be endIndex(), handled below. + nextAvailableIndex = index + 1; + } + pagePair->nextAvailable = nextAvailableIndex; + if (nextAvailableIndex == pagePair->endIndex()) { + // PagePair is now full (free list or wilderness exhausted) + // Remove from available page linked list + TrampolineBlockPagePair *iterator = headPagePairs[aMode]; + while(iterator && (iterator->nextAvailablePage != pagePair)) { + iterator = iterator->nextAvailablePage; + } + if (iterator) { + iterator->nextAvailablePage = pagePair->nextAvailablePage; pagePair->nextAvailablePage = nil; } - } else { - // empty slot at index contains pointer to next available index - pagePair->nextAvailable = nextAvailableIndex; } - *payloadAddress = block; - IMP trampoline = _trampolineAddressAtIndex(pagePair, index); - - return trampoline; + payload->block = block; + return pagePair->trampoline(index); } -static ArgumentMode _argumentModeForBlock(id block) { - ArgumentMode aMode = ReturnValueInRegisterArgumentMode; - - if (_Block_has_signature(block) && _Block_use_stret(block)) - aMode = ReturnValueOnStackArgumentMode; - - return aMode; -} #pragma mark Public API IMP imp_implementationWithBlock(id block) { block = Block_copy(block); _lock(); - IMP returnIMP = _imp_implementationWithBlockNoCopy(_argumentModeForBlock(block), block); + IMP returnIMP = _imp_implementationWithBlockNoCopy(block); _unlock(); return returnIMP; } id imp_getBlock(IMP anImp) { - uint32_t index; + uintptr_t index; TrampolineBlockPagePair *pagePair; if (!anImp) return nil; _lock(); - pagePair = _pagePairAndIndexContainingIMP(anImp, &index, nil); + pagePair = _pageAndIndexContainingIMP(anImp, &index, nil); if (!pagePair) { _unlock(); return nil; } + + TrampolineBlockPagePair::Payload *payload = pagePair->payload(index); - id potentialBlock = *_payloadAddressAtIndex(pagePair, index); - - if ((uintptr_t) potentialBlock == (uintptr_t) LAST_SLOT_MARKER) { - _unlock(); - return nil; - } - - if ((uintptr_t) potentialBlock < (uintptr_t) _slotsPerPagePair()) { + if (payload->nextAvailable <= TrampolineBlockPagePair::endIndex()) { + // unallocated _unlock(); return nil; } _unlock(); - return potentialBlock; + return payload->block; } BOOL imp_removeBlock(IMP anImp) { TrampolineBlockPagePair *pagePair; TrampolineBlockPagePair *headPagePair; - uint32_t index; + uintptr_t index; if (!anImp) return NO; _lock(); - pagePair = _pagePairAndIndexContainingIMP(anImp, &index, &headPagePair); + pagePair = _pageAndIndexContainingIMP(anImp, &index, &headPagePair); if (!pagePair) { _unlock(); return NO; } - - id *payloadAddress = _payloadAddressAtIndex(pagePair, index); - id block = *payloadAddress; + + TrampolineBlockPagePair::Payload *payload = pagePair->payload(index); + id block = payload->block; // block is released below - if (pagePair->nextAvailable) { - *payloadAddress = (id) (uintptr_t) pagePair->nextAvailable; - pagePair->nextAvailable = index; - } else { - *payloadAddress = (id) (uintptr_t) LAST_SLOT_MARKER; // nada after this one is used - pagePair->nextAvailable = index; - } + payload->nextAvailable = pagePair->nextAvailable; + pagePair->nextAvailable = index; // make sure this page is on available linked list TrampolineBlockPagePair *pagePairIterator = headPagePair; - // see if pagePair is the next available page for any existing pages - while(pagePairIterator->nextAvailablePage && (pagePairIterator->nextAvailablePage != pagePair)) + // see if page is the next available page for any existing pages + while (pagePairIterator->nextAvailablePage && + pagePairIterator->nextAvailablePage != pagePair) + { pagePairIterator = pagePairIterator->nextAvailablePage; + } - if (! pagePairIterator->nextAvailablePage) { // if iteration stopped because nextAvail was nil + if (! pagePairIterator->nextAvailablePage) { + // if iteration stopped because nextAvail was nil // add to end of list. pagePairIterator->nextAvailablePage = pagePair; pagePair->nextAvailablePage = nil; diff --git a/runtime/objc-cache-old.mm b/runtime/objc-cache-old.mm index d86e5f6..69cfe07 100644 --- a/runtime/objc-cache-old.mm +++ b/runtime/objc-cache-old.mm @@ -1285,16 +1285,16 @@ static void _cache_print(Cache cache) void _class_printMethodCaches(Class cls) { if (_cache_isEmpty(cls->cache)) { - printf("no instance-method cache for class %s\n", cls->getName()); + printf("no instance-method cache for class %s\n",cls->nameForLogging()); } else { - printf("instance-method cache for class %s:\n", cls->getName()); + printf("instance-method cache for class %s:\n", cls->nameForLogging()); _cache_print(cls->cache); } if (_cache_isEmpty(cls->ISA()->cache)) { - printf("no class-method cache for class %s\n", cls->getName()); + printf("no class-method cache for class %s\n", cls->nameForLogging()); } else { - printf ("class-method cache for class %s:\n", cls->getName()); + printf ("class-method cache for class %s:\n", cls->nameForLogging()); _cache_print(cls->ISA()->cache); } } @@ -1356,7 +1356,7 @@ void _class_printDuplicateCacheEntries(BOOL detail) (char *) cache->buckets[index2]->name)) == 0) { if (detail) - printf ("%s %s\n", cls->getName(), sel_getName(cache->buckets[index1]->name)); + printf ("%s %s\n", cls->nameForLogging(), sel_getName(cache->buckets[index1]->name)); duplicates += 1; break; } diff --git a/runtime/objc-cache.mm b/runtime/objc-cache.mm index f0751f9..71a6b1c 100644 --- a/runtime/objc-cache.mm +++ b/runtime/objc-cache.mm @@ -62,7 +62,6 @@ * Cache readers (PC-checked by collecting_in_critical()) * objc_msgSend* * cache_getImp - * cache_getMethod * * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked) * cache_fill (acquires lock) @@ -79,16 +78,6 @@ * _class_printDuplicateCacheEntries * _class_printMethodCacheStatistics * - * _class_lookupMethodAndLoadCache is a special case. It may read a - * method triplet out of one cache and store it in another cache. This - * is unsafe if the method triplet is a forward:: entry, because the - * triplet itself could be freed unless _class_lookupMethodAndLoadCache - * were PC-checked or used a lock. Additionally, storing the method - * triplet in both caches would result in double-freeing if both caches - * were flushed or expanded. The solution is for cache_getMethod to - * ignore all entries whose implementation is _objc_msgForward_impcache, - * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry - * unsafely or place it in multiple caches. ***********************************************************************/ @@ -149,7 +138,7 @@ asm("\n .section __TEXT,__const" ); -#if __i386__ || __arm__ +#if __arm__ // objc_msgSend has few registers available. // Cache scan increments and wraps at special end-marking bucket. #define CACHE_END_MARKER 1 @@ -157,7 +146,7 @@ static inline mask_t cache_next(mask_t i, mask_t mask) { return (i+1) & mask; } -#elif __x86_64__ +#elif __i386__ || __x86_64__ || __arm64__ // objc_msgSend has lots of registers and/or memory operands available. // Cache scan decrements. No end marker needed. #define CACHE_END_MARKER 0 @@ -185,34 +174,62 @@ static inline mask_t cache_next(mask_t i, mask_t mask) { "cpuid" \ : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \ ); } while(0) + #elif __i386__ #define mega_barrier() \ do { unsigned long _clbr; __asm__ __volatile__( \ "cpuid" \ : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \ ); } while(0) + #elif __arm__ #define mega_barrier() \ __asm__ __volatile__( \ "dsb ish" \ : : : "memory") + +#elif __arm64__ +// Use atomic double-word updates instead. +// This requires cache buckets not cross cache line boundaries. +#undef mega_barrier +#define stp(onep, twop, destp) \ + __asm__ ("stp %[one], %[two], [%[dest]]" \ + : "=m" (((uint64_t *)(destp))[0]), \ + "=m" (((uint64_t *)(destp))[1]) \ + : [one] "r" (onep), \ + [two] "r" (twop), \ + [dest] "r" (destp) \ + : /* no clobbers */ \ + ) +#define ldp(onep, twop, srcp) \ + __asm__ ("ldp %[one], %[two], [%[src]]" \ + : [one] "=r" (onep), \ + [two] "=r" (twop), \ + : "m" (((uint64_t *)(srcp))[0]), \ + "m" (((uint64_t *)(srcp))[1]) \ + [src] "r" (srcp) \ + : /* no clobbers */ \ + ) + #else #error unknown architecture #endif +// Class points to cache. SEL is key. Cache buckets store SEL+IMP. +// Caches are never built in the dyld shared cache. + static inline mask_t cache_hash(cache_key_t key, mask_t mask) { - return (mask_t)((key >> MASK_SHIFT) & mask); + return (mask_t)(key & mask); } - -// Class points to cache. Cache buckets store SEL+IMP. cache_t *getCache(Class cls, SEL sel __unused) { assert(cls); return &cls->cache; } + cache_key_t getKey(Class cls __unused, SEL sel) { assert(sel); @@ -220,28 +237,163 @@ cache_key_t getKey(Class cls __unused, SEL sel) } -struct bucket_t { - cache_key_t key; - IMP imp; - void set(cache_key_t newKey, IMP newImp) - { - // objc_msgSend uses key and imp with no locks. - // It is safe for objc_msgSend to see new imp but NULL key - // (It will get a cache miss but not dispatch to the wrong place.) - // It is unsafe for objc_msgSend to see old imp and new key. - // Therefore we write new imp, wait a lot, then write new key. +#if __arm64__ - assert(key == 0 || key == newKey); - - imp = newImp; +void bucket_t::set(cache_key_t newKey, IMP newImp) +{ + assert(_key == 0 || _key == newKey); - if (key != newKey) { - mega_barrier(); - key = newKey; - } + // LDP/STP guarantees that all observers get + // either key/imp or newKey/newImp + stp(newKey, newImp, this); +} + +void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) +{ + // ensure other threads see buckets contents before buckets pointer + // see Barrier Litmus Tests and Cookbook, + // "Address Dependency with object construction" + __sync_synchronize(); + + // LDP/STP guarantees that all observers get + // old mask/buckets or new mask/buckets + + mask_t newOccupied = 0; + uint64_t mask_and_occupied = + (uint64_t)newMask | ((uint64_t)newOccupied << 32); + stp(newBuckets, mask_and_occupied, this); +} + +// arm64 +#else +// not arm64 + +void bucket_t::set(cache_key_t newKey, IMP newImp) +{ + assert(_key == 0 || _key == newKey); + + // objc_msgSend uses key and imp with no locks. + // It is safe for objc_msgSend to see new imp but NULL key + // (It will get a cache miss but not dispatch to the wrong place.) + // It is unsafe for objc_msgSend to see old imp and new key. + // Therefore we write new imp, wait a lot, then write new key. + + _imp = newImp; + + if (_key != newKey) { + mega_barrier(); + _key = newKey; } -}; +} + +void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) +{ + // objc_msgSend uses mask and buckets with no locks. + // It is safe for objc_msgSend to see new buckets but old mask. + // (It will get a cache miss but not overrun the buckets' bounds). + // It is unsafe for objc_msgSend to see old buckets and new mask. + // Therefore we write new buckets, wait a lot, then write new mask. + // objc_msgSend reads mask first, then buckets. + + // ensure other threads see buckets contents before buckets pointer + mega_barrier(); + + _buckets = newBuckets; + + // ensure other threads see new buckets before new mask + mega_barrier(); + + _mask = newMask; + _occupied = 0; +} + +// not arm64 +#endif + +struct bucket_t *cache_t::buckets() +{ + return _buckets; +} + +mask_t cache_t::mask() +{ + return _mask; +} + +mask_t cache_t::occupied() +{ + return _occupied; +} + +void cache_t::incrementOccupied() +{ + _occupied++; +} + +void cache_t::setEmpty() +{ + bzero(this, sizeof(*this)); + _buckets = (bucket_t *)&_objc_empty_cache; +} + + +mask_t cache_t::capacity() +{ + return mask() ? mask()+1 : 0; +} + + +#if CACHE_END_MARKER + +size_t cache_t::bytesForCapacity(uint32_t cap) +{ + // fixme put end marker inline when capacity+1 malloc is inefficient + return sizeof(cache_t) * (cap + 1); +} + +bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap) +{ + // bytesForCapacity() chooses whether the end marker is inline or not + return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1; +} + +bucket_t *allocateBuckets(mask_t newCapacity) +{ + // Allocate one extra bucket to mark the end of the list. + // This can't overflow mask_t because newCapacity is a power of 2. + // fixme instead put the end mark inline when +1 is malloc-inefficient + bucket_t *newBuckets = (bucket_t *) + _calloc_internal(cache_t::bytesForCapacity(newCapacity), 1); + + bucket_t *end = cache_t::endMarker(newBuckets, newCapacity); + +#if __arm__ + // End marker's key is 1 and imp points BEFORE the first bucket. + // This saves an instruction in objc_msgSend. + end->setKey((cache_key_t)(uintptr_t)1); + end->setImp((IMP)(newBuckets - 1)); +#else +# error unknown architecture +#endif + + return newBuckets; +} + +#else + +bucket_t *allocateBuckets(mask_t newCapacity) +{ + return (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t)); +} + +#endif + + +bool cache_t::canBeFreed() +{ + return buckets() != (bucket_t *)&_objc_empty_cache; +} void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity) @@ -260,51 +412,22 @@ void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity) } } } - - // objc_msgSend uses shiftmask and buckets with no locks. - // It is safe for objc_msgSend to see new buckets but old shiftmask. - // (It will get a cache miss but not overrun the buckets' bounds). - // It is unsafe for objc_msgSend to see old buckets and new shiftmask. - // Therefore we write new buckets, wait a lot, then write new shiftmask. - // objc_msgSend reads shiftmask first, then buckets. - bucket_t *oldBuckets = buckets; - -#if CACHE_END_MARKER - // Allocate one extra bucket to mark the end of the list. - // fixme instead put the end mark inline when +1 is malloc-inefficient - bucket_t *newBuckets = - (bucket_t *)_calloc_internal(newCapacity + 1, sizeof(bucket_t)); - - // End marker's key is 1 and imp points to the first bucket. - newBuckets[newCapacity].key = (cache_key_t)(uintptr_t)1; -# if __arm__ - // Point before the first bucket instead to save an instruction in msgSend - newBuckets[newCapacity].imp = (IMP)(newBuckets - 1); -# else - newBuckets[newCapacity].imp = (IMP)newBuckets; -# endif -#else - bucket_t *newBuckets = - (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t)); -#endif - + bool freeOld = canBeFreed(); + + bucket_t *oldBuckets = buckets(); + bucket_t *newBuckets = allocateBuckets(newCapacity); + // Cache's old contents are not propagated. // This is thought to save cache memory at the cost of extra cache fills. // fixme re-measure this + + assert(newCapacity > 0); + assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1); + + setBucketsAndMask(newBuckets, newCapacity - 1); - // ensure other threads see buckets contents before buckets pointer - mega_barrier(); - - buckets = newBuckets; - - // ensure other threads see new buckets before new shiftmask - mega_barrier(); - - setCapacity(newCapacity); - occupied = 0; - - if (oldCapacity > 0) { + if (freeOld) { cache_collect_free(oldBuckets, oldCapacity * sizeof(bucket_t)); cache_collect(false); } @@ -313,20 +436,18 @@ void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity) // called by objc_msgSend extern "C" -void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa, - bucket_t *bucket) +void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa) { - cache_t::bad_cache(receiver, sel, isa, bucket); + cache_t::bad_cache(receiver, sel, isa); } extern "C" -void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa, - bucket_t *bucket) +void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa) { - cache_t::bad_cache(receiver, sel, isa, bucket); + cache_t::bad_cache(receiver, sel, isa); } -void cache_t::bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket) +void cache_t::bad_cache(id receiver, SEL sel, Class isa) { // Log in separate steps in case the logging itself causes a crash. _objc_inform_now_and_on_crash @@ -335,18 +456,18 @@ void cache_t::bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket) cache_t *cache = &isa->cache; _objc_inform_now_and_on_crash ("%s %p, SEL %p, isa %p, cache %p, buckets %p, " - "mask 0x%x, occupied 0x%x, wrap bucket %p", + "mask 0x%x, occupied 0x%x", receiver ? "receiver" : "unused", receiver, - sel, isa, cache, cache->buckets, - cache->shiftmask >> MASK_SHIFT, cache->occupied, bucket); + sel, isa, cache, cache->_buckets, + cache->_mask, cache->_occupied); _objc_inform_now_and_on_crash ("%s %zu bytes, buckets %zu bytes", receiver ? "receiver" : "unused", malloc_size(receiver), - malloc_size(cache->buckets)); + malloc_size(cache->_buckets)); _objc_inform_now_and_on_crash ("selector '%s'", sel_getName(sel)); _objc_inform_now_and_on_crash - ("isa '%s'", isa->getName()); + ("isa '%s'", isa->nameForLogging()); _objc_fatal ("Method cache corrupted."); } @@ -354,18 +475,21 @@ void cache_t::bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket) bucket_t * cache_t::find(cache_key_t k) { + assert(k != 0); + + bucket_t *b = buckets(); mask_t m = mask(); mask_t begin = cache_hash(k, m); mask_t i = begin; do { - if (buckets[i].key == 0 || buckets[i].key == k) { - return &buckets[i]; + if (b[i].key() == 0 || b[i].key() == k) { + return &b[i]; } } while ((i = cache_next(i, m)) != begin); // hack Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache)); - cache_t::bad_cache(nil, (SEL)k, cls, nil); + cache_t::bad_cache(nil, (SEL)k, cls); } @@ -373,11 +497,12 @@ void cache_t::expand() { mutex_assert_locked(&cacheUpdateLock); - mask_t oldCapacity = capacity(); - mask_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE; + uint32_t oldCapacity = capacity(); + uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE; - if ((((newCapacity-1) << MASK_SHIFT) >> MASK_SHIFT) != newCapacity-1) { - // shiftmask overflow - can't grow further + if ((uint32_t)(mask_t)newCapacity != newCapacity) { + // mask overflow - can't grow further + // fixme this wastes one bit of mask newCapacity = oldCapacity; } @@ -400,7 +525,7 @@ static void cache_fill_nolock(Class cls, SEL sel, IMP imp) cache_key_t key = getKey(cls, sel); // Use the cache as-is if it is less than 3/4 full - mask_t newOccupied = cache->occupied + 1; + mask_t newOccupied = cache->occupied() + 1; if ((newOccupied * 4) <= (cache->mask() + 1) * 3) { // Cache is less than 3/4 full. } else { @@ -412,7 +537,7 @@ static void cache_fill_nolock(Class cls, SEL sel, IMP imp) // There is guaranteed to be an empty slot because the // minimum size is 4 and we resized at 3/4 full. bucket_t *bucket = cache->find(key); - if (bucket->key == 0) cache->occupied++; + if (bucket->key() == 0) cache->incrementOccupied(); bucket->set(key, imp); } @@ -438,8 +563,8 @@ static void cache_eraseMethod_nolock(Class cls, SEL sel) cache_key_t key = getKey(cls, sel); bucket_t *bucket = cache->find(key); - if (bucket->key == key) { - bucket->imp = _objc_msgSend_uncached_impcache; + if (bucket->key() == key) { + bucket->setImp(_objc_msgSend_uncached_impcache); } } @@ -450,7 +575,7 @@ void cache_eraseMethods(Class cls, method_list_t *mlist) rwlock_assert_writing(&runtimeLock); mutex_lock(&cacheUpdateLock); - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, { + foreach_realized_class_and_subclass(cls, ^(Class c){ for (uint32_t m = 0; m < mlist->count; m++) { SEL sel = mlist->get(m).name; cache_eraseMethod_nolock(c, sel); @@ -468,11 +593,11 @@ void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp) cache_t *cache = getCache(cls, sel); - bucket_t *buckets = cache->buckets; + bucket_t *b = cache->buckets(); mask_t count = cache->capacity(); for (mask_t i = 0; i < count; i++) { - if (buckets[i].imp == imp) { - buckets[i].imp = _objc_msgSend_uncached_impcache; + if (b[i].imp() == imp) { + b[i].setImp(_objc_msgSend_uncached_impcache); } } } @@ -493,7 +618,7 @@ void cache_erase_nolock(cache_t *cache) mutex_assert_locked(&cacheUpdateLock); mask_t capacity = cache->capacity(); - if (capacity > 0 && cache->occupied > 0) { + if (capacity > 0 && cache->occupied() > 0) { cache->reallocate(capacity, capacity); } } @@ -532,6 +657,13 @@ static uintptr_t _get_pc_for_thread(thread_t thread) kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count); return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL; } +#elif defined(__arm64__) +{ + arm_thread_state64_t state; + unsigned int count = ARM_THREAD_STATE64_COUNT; + kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL; +} #else { #error _get_pc_for_thread () not implemented for this architecture diff --git a/runtime/objc-class-old.mm b/runtime/objc-class-old.mm index dc061a6..1ddec04 100644 --- a/runtime/objc-class-old.mm +++ b/runtime/objc-class-old.mm @@ -344,8 +344,8 @@ log_and_fill_cache(Class cls, Class implementer, Method meth, SEL sel) #if SUPPORT_MESSAGE_LOGGING if (objcMsgLogEnabled) { bool cacheIt = logMessageSend(implementer->isMetaClass(), - cls->getName(), - implementer->getName(), + cls->nameForLogging(), + implementer->nameForLogging(), sel); if (!cacheIt) return; } @@ -745,6 +745,16 @@ int class_getVersion(Class cls) } +/*********************************************************************** +* class_getName. +**********************************************************************/ +const char *class_getName(Class cls) +{ + if (!cls) return "nil"; + else return cls->demangledName(); +} + + /*********************************************************************** * _class_getNonMetaClass. * Return the ordinary class for this class or metaclass. @@ -2408,6 +2418,31 @@ void objc_disposeClassPair(Class cls) } +/*********************************************************************** +* objc_constructInstance +* Creates an instance of `cls` at the location pointed to by `bytes`. +* `bytes` must point to at least class_getInstanceSize(cls) bytes of +* well-aligned zero-filled memory. +* The new object's isa is set. Any C++ constructors are called. +* Returns `bytes` if successful. Returns nil if `cls` or `bytes` is +* nil, or if C++ constructors fail. +**********************************************************************/ +id +objc_constructInstance(Class cls, void *bytes) +{ + if (!cls || !bytes) return nil; + + id obj = (id)bytes; + + obj->initIsa(cls); + + if (cls->hasCxxCtor()) { + return object_cxxConstructFromClass(obj, cls); + } else { + return obj; + } +} + /*********************************************************************** * _class_createInstanceFromZone. Allocate an instance of the @@ -2418,7 +2453,7 @@ void objc_disposeClassPair(Class cls) id _class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone) { - id obj; + void *bytes; size_t size; // Can't create something for nothing @@ -2432,24 +2467,17 @@ _class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone) #if SUPPORT_GC if (UseGC) { - obj = (id)auto_zone_allocate_object(gc_zone, size, - AUTO_OBJECT_SCANNED, 0, 1); + bytes = auto_zone_allocate_object(gc_zone, size, + AUTO_OBJECT_SCANNED, 0, 1); } else #endif if (zone) { - obj = (id)malloc_zone_calloc((malloc_zone_t *)zone, 1, size); + bytes = malloc_zone_calloc((malloc_zone_t *)zone, 1, size); } else { - obj = (id)calloc(1, size); - } - if (!obj) return nil; - - obj->initIsa(cls); - - if (cls->hasCxxCtor()) { - obj = _objc_constructOrFree(cls, obj); + bytes = calloc(1, size); } - return obj; + return objc_constructInstance(cls, bytes); } @@ -2654,6 +2682,18 @@ id object_reallocFromZone(id obj, size_t nBytes, void *z) } +/*********************************************************************** +* object_getIndexedIvars. +**********************************************************************/ +void *object_getIndexedIvars(id obj) +{ + // ivars are tacked onto the end of the object + if (!obj) return nil; + if (obj->isTaggedPointer()) return nil; + return ((char *) obj) + obj->ISA()->alignedInstanceSize(); +} + + // ProKit SPI Class class_setSuperclass(Class cls, Class newSuper) { diff --git a/runtime/objc-class.mm b/runtime/objc-class.mm index bfd105d..aa09601 100644 --- a/runtime/objc-class.mm +++ b/runtime/objc-class.mm @@ -206,6 +206,16 @@ Class object_setClass(id obj, Class cls) } +/*********************************************************************** +* object_isClass. +**********************************************************************/ +BOOL object_isClass(id obj) +{ + if (!obj) return NO; + return obj->isClass(); +} + + /*********************************************************************** * object_getClassName. **********************************************************************/ @@ -228,23 +238,13 @@ IMP object_getMethodImplementation(id obj, SEL name) /*********************************************************************** * object_getMethodImplementation_stret. **********************************************************************/ +#if SUPPORT_STRET IMP object_getMethodImplementation_stret(id obj, SEL name) { Class cls = (obj ? obj->getIsa() : nil); return class_getMethodImplementation_stret(cls, name); } - - -/*********************************************************************** -* object_getIndexedIvars. -**********************************************************************/ -void *object_getIndexedIvars(id obj) -{ - // ivars are tacked onto the end of the object - if (!obj) return nil; - if (obj->isTaggedPointer()) return nil; - return ((char *) obj) + obj->ISA()->alignedInstanceSize(); -} +#endif Ivar object_setInstanceVariable(id obj, const char *name, void *value) @@ -392,7 +392,7 @@ static void object_cxxDestructFromClass(id obj, Class cls) if (dtor != (void(*)(id))_objc_msgForward_impcache) { if (PrintCxxCtors) { _objc_inform("CXX: calling C++ destructors for class %s", - cls->getName()); + cls->nameForLogging()); } (*dtor)(obj); } @@ -418,8 +418,9 @@ void object_cxxDestruct(id obj) * Recursively call C++ constructors on obj, starting with base class's * ctor method (if any) followed by subclasses' ctors (if any), stopping * at cls's ctor (if any). -* Returns YES if construction succeeded. -* Returns NO if some constructor threw an exception. The exception is +* Does not check cls->hasCxxCtor(). The caller should preflight that. +* Returns self if construction succeeded. +* Returns nil if some constructor threw an exception. The exception is * caught and discarded. Any partial construction is destructed. * Uses methodListLock and cacheUpdateLock. The caller must hold neither. * @@ -427,52 +428,37 @@ void object_cxxDestruct(id obj) * return self: construction succeeded * return nil: construction failed because a C++ constructor threw an exception **********************************************************************/ -static BOOL object_cxxConstructFromClass(id obj, Class cls) +id +object_cxxConstructFromClass(id obj, Class cls) { + assert(cls->hasCxxCtor()); // required for performance, not correctness + id (*ctor)(id); Class supercls; - // Stop if neither this class nor any superclass has ctors. - if (!cls->hasCxxCtor()) return YES; // no ctor - ok - supercls = cls->superclass; // Call superclasses' ctors first, if any. - if (supercls) { - BOOL ok = object_cxxConstructFromClass(obj, supercls); - if (!ok) return NO; // some superclass's ctor failed - give up + if (supercls && supercls->hasCxxCtor()) { + bool ok = object_cxxConstructFromClass(obj, supercls); + if (!ok) return nil; // some superclass's ctor failed - give up } // Find this class's ctor, if any. ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, SEL_cxx_construct); - if (ctor == (id(*)(id))_objc_msgForward_impcache) return YES; // no ctor - ok + if (ctor == (id(*)(id))_objc_msgForward_impcache) return obj; // no ctor - ok // Call this class's ctor. if (PrintCxxCtors) { - _objc_inform("CXX: calling C++ constructors for class %s", cls->getName()); + _objc_inform("CXX: calling C++ constructors for class %s", + cls->nameForLogging()); } - if ((*ctor)(obj)) return YES; // ctor called and succeeded - ok + if ((*ctor)(obj)) return obj; // ctor called and succeeded - ok // This class's ctor was called and failed. // Call superclasses's dtors to clean up. if (supercls) object_cxxDestructFromClass(obj, supercls); - return NO; -} - - -/*********************************************************************** -* object_cxxConstructFromClass. -* Call C++ constructors on obj, if any. -* Returns YES if construction succeeded. -* Returns NO if some constructor threw an exception. The exception is -* caught and discarded. Any partial construction is destructed. -* Uses methodListLock and cacheUpdateLock. The caller must hold neither. -**********************************************************************/ -BOOL object_cxxConstruct(id obj) -{ - if (!obj) return YES; - if (obj->isTaggedPointer()) return YES; - return object_cxxConstructFromClass(obj, obj->ISA()); + return nil; } @@ -507,15 +493,15 @@ static void _class_resolveClassMethod(Class cls, SEL sel, id inst) _objc_inform("RESOLVE: method %c[%s %s] " "dynamically resolved to %p", cls->isMetaClass() ? '+' : '-', - cls->getName(), sel_getName(sel), imp); + cls->nameForLogging(), sel_getName(sel), imp); } else { // Method resolver didn't add anything? _objc_inform("RESOLVE: +[%s resolveClassMethod:%s] returned YES" ", but no new implementation of %c[%s %s] was found", - cls->getName(), sel_getName(sel), + cls->nameForLogging(), sel_getName(sel), cls->isMetaClass() ? '+' : '-', - cls->getName(), sel_getName(sel)); + cls->nameForLogging(), sel_getName(sel)); } } } @@ -549,15 +535,15 @@ static void _class_resolveInstanceMethod(Class cls, SEL sel, id inst) _objc_inform("RESOLVE: method %c[%s %s] " "dynamically resolved to %p", cls->isMetaClass() ? '+' : '-', - cls->getName(), sel_getName(sel), imp); + cls->nameForLogging(), sel_getName(sel), imp); } else { // Method resolver didn't add anything? _objc_inform("RESOLVE: +[%s resolveInstanceMethod:%s] returned YES" ", but no new implementation of %c[%s %s] was found", - cls->getName(), sel_getName(sel), + cls->nameForLogging(), sel_getName(sel), cls->isMetaClass() ? '+' : '-', - cls->getName(), sel_getName(sel)); + cls->nameForLogging(), sel_getName(sel)); } } } @@ -644,6 +630,14 @@ BOOL class_respondsToMethod(Class cls, SEL sel) BOOL class_respondsToSelector(Class cls, SEL sel) +{ + return class_respondsToSelector_inst(cls, sel, nil); +} + + +// inst is an instance of cls or a subclass thereof, or nil if none is known. +// Non-nil inst is faster in some cases. See lookUpImpOrForward() for details. +BOOL class_respondsToSelector_inst(Class cls, SEL sel, id inst) { IMP imp; @@ -651,7 +645,7 @@ BOOL class_respondsToSelector(Class cls, SEL sel) // Avoids +initialize because it historically did so. // We're not returning a callable IMP anyway. - imp = lookUpImpOrNil(cls, sel, nil, + imp = lookUpImpOrNil(cls, sel, inst, NO/*initialize*/, YES/*cache*/, YES/*resolver*/); return imp ? YES : NO; } @@ -691,7 +685,7 @@ IMP class_getMethodImplementation(Class cls, SEL sel) return imp; } - +#if SUPPORT_STRET IMP class_getMethodImplementation_stret(Class cls, SEL sel) { IMP imp = class_getMethodImplementation(cls, sel); @@ -702,6 +696,7 @@ IMP class_getMethodImplementation_stret(Class cls, SEL sel) } return imp; } +#endif /*********************************************************************** @@ -856,13 +851,6 @@ Class _calloc_class(size_t size) return (Class) _calloc_internal(1, size); } - -const char *class_getName(Class cls) -{ - if (!cls) return "nil"; - else return cls->getName(); -} - Class class_getSuperclass(Class cls) { if (!cls) return nil; @@ -920,45 +908,18 @@ char * method_copyArgumentType(Method m, unsigned int index) /*********************************************************************** -* objc_constructInstance -* Creates an instance of `cls` at the location pointed to by `bytes`. -* `bytes` must point to at least class_getInstanceSize(cls) bytes of -* well-aligned zero-filled memory. -* The new object's isa is set. Any C++ constructors are called. -* Returns `bytes` if successful. Returns nil if `cls` or `bytes` is -* nil, or if C++ constructors fail. -* Note: class_createInstance() and class_createInstances() preflight this. +* _objc_constructOrFree +* Call C++ constructors, and free() if they fail. +* bytes->isa must already be set. +* cls must have cxx constructors. +* Returns the object, or nil. **********************************************************************/ -static id -_objc_constructInstance(Class cls, void *bytes) -{ - id obj = (id)bytes; - - // Set the isa pointer - obj->initIsa(cls); - - // Call C++ constructors, if any. - if (!object_cxxConstruct(obj)) { - // Some C++ constructor threw an exception. - return nil; - } - - return obj; -} - - -id -objc_constructInstance(Class cls, void *bytes) -{ - if (!cls || !bytes) return nil; - return _objc_constructInstance(cls, bytes); -} - - id -_objc_constructOrFree(Class cls, void *bytes) +_objc_constructOrFree(id bytes, Class cls) { - id obj = _objc_constructInstance(cls, bytes); + assert(cls->hasCxxCtor()); // for performance, not correctness + + id obj = object_cxxConstructFromClass(bytes, cls); if (!obj) { #if SUPPORT_GC if (UseGC) { @@ -986,9 +947,7 @@ _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, unsigned num_allocated; if (!cls) return 0; - size_t size = cls->alignedInstanceSize() + extraBytes; - // CF requires all objects be at least 16 bytes. - if (size < 16) size = 16; + size_t size = cls->instanceSize(extraBytes); #if SUPPORT_GC if (UseGC) { @@ -1014,8 +973,8 @@ _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, bool ctor = cls->hasCxxCtor(); for (i = 0; i < num_allocated; i++) { id obj = results[i]; - if (ctor) obj = _objc_constructOrFree(cls, obj); - else if (obj) obj->initIsa(cls); + obj->initIsa(cls); // fixme allow indexed + if (ctor) obj = _objc_constructOrFree(obj, cls); if (obj) { results[i-shift] = obj; @@ -1035,17 +994,18 @@ void inform_duplicate(const char *name, Class oldCls, Class cls) { #if TARGET_OS_WIN32 - _objc_inform ("Class %s is implemented in two different images.", name); + (DebugDuplicateClasses ? _objc_fatal : _objc_inform) + ("Class %s is implemented in two different images.", name); #else const header_info *oldHeader = _headerForClass(oldCls); const header_info *newHeader = _headerForClass(cls); const char *oldName = oldHeader ? oldHeader->fname : "??"; const char *newName = newHeader ? newHeader->fname : "??"; - - _objc_inform ("Class %s is implemented in both %s and %s. " - "One of the two will be used. " - "Which one is undefined.", - name, oldName, newName); + + (DebugDuplicateClasses ? _objc_fatal : _objc_inform) + ("Class %s is implemented in both %s and %s. " + "One of the two will be used. Which one is undefined.", + name, oldName, newName); #endif } diff --git a/runtime/objc-config.h b/runtime/objc-config.h index 3ef50bc..0e33b04 100644 --- a/runtime/objc-config.h +++ b/runtime/objc-config.h @@ -63,10 +63,27 @@ # define SUPPORT_TAGGED_POINTERS 1 #endif +// Define SUPPORT_MSB_TAGGED_POINTERS to use the MSB +// as the tagged pointer marker instead of the LSB. +// Be sure to edit tagged pointer SPI in objc-internal.h as well. +#if !SUPPORT_TAGGED_POINTERS || !TARGET_OS_IPHONE +# define SUPPORT_MSB_TAGGED_POINTERS 0 +#else +# define SUPPORT_MSB_TAGGED_POINTERS 1 +#endif + +// Define SUPPORT_NONPOINTER_ISA=1 to enable extra data in the isa field. +#if !__LP64__ || TARGET_OS_WIN32 || TARGET_IPHONE_SIMULATOR || __x86_64__ +# define SUPPORT_NONPOINTER_ISA 0 +#else +# define SUPPORT_NONPOINTER_ISA 1 +#endif + // Define SUPPORT_FIXUP=1 to repair calls sites for fixup dispatch. // Fixup messaging itself is no longer supported. // Be sure to edit objc-abi.h as well (objc_msgSend*_fixup) -#if !__OBJC2__ || !defined(__x86_64__) +// Note TARGET_OS_MAC is also set for iOS simulator. +#if !__x86_64__ || !TARGET_OS_MAC # define SUPPORT_FIXUP 0 #else # define SUPPORT_FIXUP 1 @@ -111,6 +128,13 @@ # define SUPPORT_RETURN_AUTORELEASE 1 #endif +// Define SUPPORT_STRET on architectures that need separate struct-return ABI. +#if defined(__arm64__) +# define SUPPORT_STRET 0 +#else +# define SUPPORT_STRET 1 +#endif + // Define SUPPORT_MESSAGE_LOGGING to enable NSObjCMessageLoggingEnabled #if TARGET_OS_WIN32 || TARGET_OS_EMBEDDED # define SUPPORT_MESSAGE_LOGGING 0 diff --git a/runtime/objc-env.h b/runtime/objc-env.h index d32c245..9f31421 100644 --- a/runtime/objc-env.h +++ b/runtime/objc-env.h @@ -24,6 +24,7 @@ OPTION( PrintDeprecation, OBJC_PRINT_DEPRECATION_WARNINGS, "warn about c OPTION( PrintPoolHiwat, OBJC_PRINT_POOL_HIGHWATER, "log high-water marks for autorelease pools") OPTION( PrintCustomRR, OBJC_PRINT_CUSTOM_RR, "log classes with un-optimized custom retain/release methods") OPTION( PrintCustomAWZ, OBJC_PRINT_CUSTOM_AWZ, "log classes with un-optimized custom allocWithZone methods") +OPTION( PrintRawIsa, OBJC_PRINT_RAW_ISA, "log classes that require raw pointer isa fields") OPTION( DebugUnload, OBJC_DEBUG_UNLOAD, "warn about poorly-behaving bundles when unloaded") OPTION( DebugFragileSuperclasses, OBJC_DEBUG_FRAGILE_SUPERCLASSES, "warn about subclasses that may have been broken by subsequent changes to superclasses") @@ -32,6 +33,7 @@ OPTION( DebugNilSync, OBJC_DEBUG_NIL_SYNC, "warn about @ OPTION( DebugNonFragileIvars, OBJC_DEBUG_NONFRAGILE_IVARS, "capriciously rearrange non-fragile ivars") OPTION( DebugAltHandlers, OBJC_DEBUG_ALT_HANDLERS, "record more info about bad alt handler use") OPTION( DebugMissingPools, OBJC_DEBUG_MISSING_POOLS, "warn about autorelease with no pool in place, which may be a leak") +OPTION( DebugDuplicateClasses, OBJC_DEBUG_DUPLICATE_CLASSES, "halt when multiple classes with the same name are present") OPTION( UseInternalZone, OBJC_USE_INTERNAL_ZONE, "allocate runtime data in a dedicated malloc zone") @@ -39,3 +41,4 @@ OPTION( DisableGC, OBJC_DISABLE_GC, "force GC OFF OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch") OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache") OPTION( DisableTaggedPointers, OBJC_DISABLE_TAGGED_POINTERS, "disable tagged pointer optimization of NSNumber et al.") +OPTION( DisableIndexedIsa, OBJC_DISABLE_NONPOINTER_ISA, "disable non-pointer isa fields") diff --git a/runtime/objc-exception.mm b/runtime/objc-exception.mm index 956ef0c..88f69a6 100644 --- a/runtime/objc-exception.mm +++ b/runtime/objc-exception.mm @@ -239,6 +239,7 @@ void _destroyAltHandlerList(struct alt_handler_list *list) #include "objc-private.h" #include +#include #include // unwind library types and functions @@ -631,12 +632,12 @@ static bool _objc_exception_do_catch(struct objc_typeinfo *catch_tinfo, } else if ((*exception_matcher)(handler_cls, exception)) { if (PrintExceptions) _objc_inform("EXCEPTIONS: catch(%s)", - handler_cls->getName()); + handler_cls->nameForLogging()); return true; } if (PrintExceptions) _objc_inform("EXCEPTIONS: skipping catch(%s)", - handler_cls->getName()); + handler_cls->nameForLogging()); return false; } diff --git a/runtime/objc-externalref.mm b/runtime/objc-externalref.mm index d5ae3eb..8511fe9 100644 --- a/runtime/objc-externalref.mm +++ b/runtime/objc-externalref.mm @@ -85,7 +85,7 @@ static void _initialize_gc() { // grow the buffer by one page static bool _grow_list(external_ref_list *list) { auto_memory_type_t memory_type = (is_strong(list) ? AUTO_MEMORY_ALL_POINTERS : AUTO_MEMORY_ALL_WEAK_POINTERS); - size_t new_size = list->_size + PAGE_SIZE / sizeof(void *); + size_t new_size = list->_size + PAGE_MAX_SIZE / sizeof(void *); // auto_realloc() has been enhanced to handle strong and weak memory. void **new_list = (void **)(list->_buffer ? malloc_zone_realloc(gc_zone, list->_buffer, new_size * sizeof(void *)) : auto_zone_allocate_object(gc_zone, new_size * sizeof(void *), memory_type, false, false)); if (!new_list) _objc_fatal("unable to allocate, size = %ld\n", new_size); diff --git a/runtime/objc-gdb.h b/runtime/objc-gdb.h index 55304a2..65b3862 100644 --- a/runtime/objc-gdb.h +++ b/runtime/objc-gdb.h @@ -78,6 +78,27 @@ OBJC_EXPORT NXHashTable *_objc_debug_class_hash #endif +/*********************************************************************** +* Non-pointer isa +**********************************************************************/ + +#if __OBJC2__ + +// Extract isa pointer from an isa field. +// (Class)(isa & mask) == class pointer +OBJC_EXPORT const uintptr_t objc_debug_isa_class_mask + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_0); + +// Extract magic cookie from an isa field. +// (isa & magic_mask) == magic_value +OBJC_EXPORT const uintptr_t objc_debug_isa_magic_mask + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_0); +OBJC_EXPORT const uintptr_t objc_debug_isa_magic_value + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_0); + +#endif + + /*********************************************************************** * Tagged pointer decoding **********************************************************************/ @@ -85,24 +106,24 @@ OBJC_EXPORT NXHashTable *_objc_debug_class_hash // if (obj & mask) obj is a tagged pointer object OBJC_EXPORT uintptr_t objc_debug_taggedpointer_mask - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); // tag_slot = (obj >> slot_shift) & slot_mask OBJC_EXPORT unsigned int objc_debug_taggedpointer_slot_shift - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); OBJC_EXPORT uintptr_t objc_debug_taggedpointer_slot_mask - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); // class = classes[tag_slot] OBJC_EXPORT Class objc_debug_taggedpointer_classes[] - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); // payload = (obj << payload_lshift) >> payload_rshift // Payload signedness is determined by the signedness of the right-shift. OBJC_EXPORT unsigned int objc_debug_taggedpointer_payload_lshift - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); OBJC_EXPORT unsigned int objc_debug_taggedpointer_payload_rshift - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); #endif diff --git a/runtime/objc-initialize.mm b/runtime/objc-initialize.mm index a37bd2b..ae7fcc2 100644 --- a/runtime/objc-initialize.mm +++ b/runtime/objc-initialize.mm @@ -281,7 +281,7 @@ static void _finishInitializing(Class cls, Class supercls) if (PrintInitializing) { _objc_inform("INITIALIZE: %s is fully +initialized", - cls->getName()); + cls->nameForLogging()); } // propagate finalization affinity. @@ -329,7 +329,7 @@ static void _finishInitializingAfter(Class cls, Class supercls) if (PrintInitializing) { _objc_inform("INITIALIZE: %s waiting for superclass +[%s initialize]", - cls->getName(), supercls->getName()); + cls->nameForLogging(), supercls->nameForLogging()); } if (!pendingInitializeMap) { @@ -386,14 +386,14 @@ void _class_initialize(Class cls) // this class doesn't implement +initialize. 2157218 if (PrintInitializing) { _objc_inform("INITIALIZE: calling +[%s initialize]", - cls->getName()); + cls->nameForLogging()); } ((void(*)(Class, SEL))objc_msgSend)(cls, SEL_initialize); if (PrintInitializing) { _objc_inform("INITIALIZE: finished +[%s initialize]", - cls->getName()); + cls->nameForLogging()); } // Done initializing. diff --git a/runtime/objc-internal.h b/runtime/objc-internal.h index 9bf9836..e8e36df 100644 --- a/runtime/objc-internal.h +++ b/runtime/objc-internal.h @@ -45,10 +45,34 @@ __BEGIN_DECLS +// This is the allocation size required for each of the class and the metaclass +// with objc_initializeClassPair() and objc_readClassPair(). +// The runtime's class structure will never grow beyond this. +#define OBJC_MAX_CLASS_SIZE (32*sizeof(void*)) + // In-place construction of an Objective-C class. -OBJC_EXPORT Class objc_initializeClassPair(Class superclass_gen, const char *name, Class cls_gen, Class meta_gen) +// cls and metacls must each be OBJC_MAX_CLASS_SIZE bytes. +// Returns nil if a class with the same name already exists. +// Returns nil if the superclass is under construction. +// Call objc_registerClassPair() when you are done. +OBJC_EXPORT Class objc_initializeClassPair(Class superclass, const char *name, Class cls, Class metacls) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0); +// Class and metaclass construction from a compiler-generated memory image. +// cls and cls->isa must each be OBJC_MAX_CLASS_SIZE bytes. +// Extra bytes not used the the metadata must be zero. +// info is the same objc_image_info that would be emitted by a static compiler. +// Returns nil if a class with the same name already exists. +// Returns nil if the superclass is nil and the class is not marked as a root. +// Returns nil if the superclass is under construction. +// Do not call objc_registerClassPair(). +#if __OBJC2__ +struct objc_image_info; +OBJC_EXPORT Class objc_readClassPair(Class cls, + const struct objc_image_info *info) + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +#endif + // Batch object allocation using malloc_zone_batch_malloc(). OBJC_EXPORT unsigned class_createInstances(Class cls, size_t extraBytes, id *results, unsigned num_requested) @@ -59,13 +83,6 @@ OBJC_EXPORT unsigned class_createInstances(Class cls, size_t extraBytes, OBJC_EXPORT Class _objc_getFreedObjectClass(void) __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0); -// Substitute receiver for messages to nil. -// Not supported for all messages to nil. -OBJC_EXPORT id _objc_setNilReceiver(id newNilReceiver) - __OSX_AVAILABLE_STARTING(__MAC_10_3, __IPHONE_NA); -OBJC_EXPORT id _objc_getNilReceiver(void) - __OSX_AVAILABLE_STARTING(__MAC_10_3, __IPHONE_NA); - // Return YES if GC is on and `object` is a GC allocation. OBJC_EXPORT BOOL objc_isAuto(id object) __OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_NA); @@ -154,10 +171,10 @@ typedef enum objc_tag_index_t objc_tag_index_t; #endif OBJC_EXPORT void _objc_registerTaggedPointerClass(objc_tag_index_t tag, Class cls) - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); OBJC_EXPORT Class _objc_getClassForTag(objc_tag_index_t tag) - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); static inline bool _objc_taggedPointersEnabled(void) @@ -166,6 +183,50 @@ _objc_taggedPointersEnabled(void) return (objc_debug_taggedpointer_mask != 0); } +#if TARGET_OS_IPHONE +// tagged pointer marker is MSB + +static inline void * +_objc_makeTaggedPointer(objc_tag_index_t tag, uintptr_t value) +{ + // assert(_objc_taggedPointersEnabled()); + // assert((unsigned int)tag < 8); + // assert(((value << 4) >> 4) == value); + return (void*)((1UL << 63) | ((uintptr_t)tag << 60) | (value & ~(0xFUL << 60))); +} + +static inline bool +_objc_isTaggedPointer(const void *ptr) +{ + return (intptr_t)ptr < 0; // a.k.a. ptr & 0x8000000000000000 +} + +static inline objc_tag_index_t +_objc_getTaggedPointerTag(const void *ptr) +{ + // assert(_objc_isTaggedPointer(ptr)); + return (objc_tag_index_t)(((uintptr_t)ptr >> 60) & 0x7); +} + +static inline uintptr_t +_objc_getTaggedPointerValue(const void *ptr) +{ + // assert(_objc_isTaggedPointer(ptr)); + return (uintptr_t)ptr & 0x0fffffffffffffff; +} + +static inline intptr_t +_objc_getTaggedPointerSignedValue(const void *ptr) +{ + // assert(_objc_isTaggedPointer(ptr)); + return ((intptr_t)ptr << 4) >> 4; +} + +// TARGET_OS_IPHONE +#else +// not TARGET_OS_IPHONE +// tagged pointer marker is LSB + static inline void * _objc_makeTaggedPointer(objc_tag_index_t tag, uintptr_t value) { @@ -202,9 +263,12 @@ _objc_getTaggedPointerSignedValue(const void *ptr) return (intptr_t)ptr >> 4; } +// not TARGET_OS_IPHONE +#endif + OBJC_EXPORT void _objc_insert_tagged_isa(unsigned char slotNumber, Class isa) - __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_7,__MAC_10_9, __IPHONE_4_3,__IPHONE_NA); + __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_7,__MAC_10_9, __IPHONE_4_3,__IPHONE_7_0); #endif @@ -245,7 +309,8 @@ OBJC_EXPORT IMP object_getMethodImplementation(id obj, SEL name) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); OBJC_EXPORT IMP object_getMethodImplementation_stret(id obj, SEL name) - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) + OBJC_ARM64_UNAVAILABLE; // Instance-specific instance variable layout. diff --git a/runtime/objc-loadmethod.mm b/runtime/objc-loadmethod.mm index bace392..71dc68f 100644 --- a/runtime/objc-loadmethod.mm +++ b/runtime/objc-loadmethod.mm @@ -69,7 +69,8 @@ void add_class_to_loadable_list(Class cls) if (!method) return; // Don't bother if cls has no +load method if (PrintLoading) { - _objc_inform("LOAD: class '%s' scheduled for +load", cls->getName()); + _objc_inform("LOAD: class '%s' scheduled for +load", + cls->nameForLogging()); } if (loadable_classes_used == loadable_classes_allocated) { @@ -137,7 +138,8 @@ void remove_class_from_loadable_list(Class cls) if (loadable_classes[i].cls == cls) { loadable_classes[i].cls = nil; if (PrintLoading) { - _objc_inform("LOAD: class '%s' unscheduled for +load", cls->getName()); + _objc_inform("LOAD: class '%s' unscheduled for +load", + cls->nameForLogging()); } return; } @@ -197,7 +199,7 @@ static void call_class_loads(void) if (!cls) continue; if (PrintLoading) { - _objc_inform("LOAD: +[%s load]\n", cls->getName()); + _objc_inform("LOAD: +[%s load]\n", cls->nameForLogging()); } (*load_method)(cls, SEL_load); } @@ -243,7 +245,7 @@ static BOOL call_category_loads(void) if (cls && cls->isLoadable()) { if (PrintLoading) { _objc_inform("LOAD: +[%s(%s) load]\n", - cls->getName(), + cls->nameForLogging(), _category_getName(cat)); } (*load_method)(cls, SEL_load); diff --git a/runtime/objc-object.h b/runtime/objc-object.h new file mode 100644 index 0000000..6d81181 --- /dev/null +++ b/runtime/objc-object.h @@ -0,0 +1,1058 @@ +/* + * Copyright (c) 2010-2012 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/*********************************************************************** +* Inlineable parts of NSObject / objc_object implementation +**********************************************************************/ + +#ifndef _OBJC_OBJCOBJECT_H_ +#define _OBJC_OBJCOBJECT_H_ + +#include "objc-private.h" + +static ALWAYS_INLINE bool fastAutoreleaseForReturn(id obj); +static ALWAYS_INLINE bool fastRetainFromReturn(id obj); + + +#if SUPPORT_TAGGED_POINTERS + +#define TAG_COUNT 8 +#define TAG_SLOT_MASK 0xf + +#if SUPPORT_MSB_TAGGED_POINTERS +# define TAG_MASK (1ULL<<63) +# define TAG_SLOT_SHIFT 60 +# define TAG_PAYLOAD_LSHIFT 4 +# define TAG_PAYLOAD_RSHIFT 4 +#else +# define TAG_MASK 1 +# define TAG_SLOT_SHIFT 0 +# define TAG_PAYLOAD_LSHIFT 0 +# define TAG_PAYLOAD_RSHIFT 4 +#endif + +extern "C" { extern Class objc_debug_taggedpointer_classes[TAG_COUNT*2]; } +#define objc_tag_classes objc_debug_taggedpointer_classes + +#endif + + +inline bool +objc_object::isClass() +{ + if (isTaggedPointer()) return false; + return ISA()->isMetaClass(); +} + +#if SUPPORT_NONPOINTER_ISA + +# if !SUPPORT_TAGGED_POINTERS +# error sorry +# endif + + +inline Class +objc_object::ISA() +{ + assert(!isTaggedPointer()); + return (Class)(isa.bits & ISA_MASK); +} + + +inline bool +objc_object::hasIndexedIsa() +{ + return isa.indexed; +} + +inline Class +objc_object::getIsa() +{ + if (isTaggedPointer()) { + uintptr_t slot = ((uintptr_t)this >> TAG_SLOT_SHIFT) & TAG_SLOT_MASK; + return objc_tag_classes[slot]; + } + return ISA(); +} + + +inline void +objc_object::initIsa(Class cls) +{ + initIsa(cls, false, false); +} + +inline void +objc_object::initClassIsa(Class cls) +{ + if (DisableIndexedIsa) { + initIsa(cls, false, false); + } else { + initIsa(cls, true, false); + } +} + +inline void +objc_object::initProtocolIsa(Class cls) +{ + return initClassIsa(cls); +} + +inline void +objc_object::initInstanceIsa(Class cls, bool hasCxxDtor) +{ + assert(!UseGC); + assert(!cls->requiresRawIsa()); + assert(hasCxxDtor == cls->hasCxxDtor()); + + initIsa(cls, true, hasCxxDtor); +} + +inline void +objc_object::initIsa(Class cls, bool indexed, bool hasCxxDtor) +{ + assert(!isTaggedPointer()); + + if (!indexed) { + isa.cls = cls; + } else { + assert(!DisableIndexedIsa); + isa.bits = ISA_MAGIC_VALUE; + // isa.magic is part of ISA_MAGIC_VALUE + // isa.indexed is part of ISA_MAGIC_VALUE + isa.has_cxx_dtor = hasCxxDtor; + isa.shiftcls = (uintptr_t)cls >> 3; + } +} + + +inline Class +objc_object::changeIsa(Class newCls) +{ + assert(!isTaggedPointer()); + + isa_t oldisa; + isa_t newisa; + + bool sideTableLocked = false; + bool transcribeToSideTable = false; + + do { + transcribeToSideTable = false; + oldisa = LoadExclusive(&isa.bits); + if ((oldisa.bits == 0 || oldisa.indexed) && + newCls->canAllocIndexed()) + { + // 0 -> indexed + // indexed -> indexed + if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE; + else newisa = oldisa; + // isa.magic is part of ISA_MAGIC_VALUE + // isa.indexed is part of ISA_MAGIC_VALUE + newisa.has_cxx_dtor = newCls->hasCxxDtor(); + newisa.shiftcls = (uintptr_t)newCls >> 3; + } + else if (oldisa.indexed) { + // indexed -> not indexed + // Need to copy retain count et al to side table. + // Acquire side table lock before setting isa to + // prevent races such as concurrent -release. + if (!sideTableLocked) sidetable_lock(); + sideTableLocked = true; + transcribeToSideTable = true; + newisa.cls = newCls; + } + else { + // not indexed -> not indexed + newisa.cls = newCls; + } + } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + + if (transcribeToSideTable) { + // Copy oldisa's retain count et al to side table. + // oldisa.weakly_referenced: nothing to do + // oldisa.has_assoc: nothing to do + // oldisa.has_cxx_dtor: nothing to do + sidetable_moveExtraRC_nolock(oldisa.extra_rc, + oldisa.deallocating, + oldisa.weakly_referenced); + } + + if (sideTableLocked) sidetable_unlock(); + + Class oldCls; + if (oldisa.indexed) oldCls = (Class)((uintptr_t)oldisa.shiftcls << 3); + else oldCls = oldisa.cls; + + return oldCls; +} + + +inline bool +objc_object::isTaggedPointer() +{ + return ((uintptr_t)this & TAG_MASK); +} + + +inline bool +objc_object::hasAssociatedObjects() +{ + if (isTaggedPointer()) return true; + if (isa.indexed) return isa.has_assoc; + return true; +} + + +inline void +objc_object::setHasAssociatedObjects() +{ + if (isTaggedPointer()) return; + + retry: + isa_t oldisa = LoadExclusive(&isa.bits); + isa_t newisa = oldisa; + if (!newisa.indexed) return; + if (newisa.has_assoc) return; + newisa.has_assoc = true; + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; +} + + +inline bool +objc_object::isWeaklyReferenced() +{ + assert(!isTaggedPointer()); + if (isa.indexed) return isa.weakly_referenced; + else return sidetable_isWeaklyReferenced(); +} + + +inline void +objc_object::setWeaklyReferenced_nolock() +{ + retry: + isa_t oldisa = LoadExclusive(&isa.bits); + isa_t newisa = oldisa; + if (!newisa.indexed) return sidetable_setWeaklyReferenced_nolock(); + if (newisa.weakly_referenced) return; + newisa.weakly_referenced = true; + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; +} + + +inline bool +objc_object::hasCxxDtor() +{ + assert(!isTaggedPointer()); + if (isa.indexed) return isa.has_cxx_dtor; + else return isa.cls->hasCxxDtor(); +} + + + +inline bool +objc_object::rootIsDeallocating() +{ + assert(!UseGC); + + if (isTaggedPointer()) return false; + if (isa.indexed) return isa.deallocating; + return sidetable_isDeallocating(); +} + + +inline void +objc_object::clearDeallocating() +{ + if (!isa.indexed) { + sidetable_clearDeallocating(); + } + else if (isa.weakly_referenced) { + clearDeallocating_weak(); + } + + assert(!sidetable_present()); +} + + +inline void +objc_object::rootDealloc() +{ + assert(!UseGC); + if (isTaggedPointer()) return; + + if (isa.indexed && + !isa.weakly_referenced && + !isa.has_assoc && + !isa.has_cxx_dtor) + { + assert(!sidetable_present()); + free(this); + } + else { + object_dispose((id)this); + } +} + + +// Equivalent to calling [this retain], with shortcuts if there is no override +inline id +objc_object::retain() +{ + // UseGC is allowed here, but requires hasCustomRR. + assert(!UseGC || ISA()->hasCustomRR()); + assert(!isTaggedPointer()); + + if (! ISA()->hasCustomRR()) { + return rootRetain(); + } + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain); +} + + +// Base retain implementation, ignoring overrides. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super retain]. +// +// tryRetain=true is the -_tryRetain path. +// handleOverflow=false is the frameless fast path. +// handleOverflow=true is the framed slow path including overflow to side table +// The code is structured this way to prevent duplication. + +ALWAYS_INLINE id +objc_object::rootRetain() +{ + return rootRetain(false, false); +} + +ALWAYS_INLINE bool +objc_object::rootTryRetain() +{ + return rootRetain(true, false) ? true : false; +} + +ALWAYS_INLINE id +objc_object::rootRetain(bool tryRetain, bool handleOverflow) +{ + assert(!UseGC); + if (isTaggedPointer()) return (id)this; + + bool sideTableLocked = false; + bool transcribeToSideTable = false; + + isa_t oldisa; + isa_t newisa; + + do { + transcribeToSideTable = false; + oldisa = LoadExclusive(&isa.bits); + newisa = oldisa; + if (!newisa.indexed) goto unindexed; + // don't check newisa.fast_rr; we already called any RR overrides + if (tryRetain && newisa.deallocating) goto tryfail; + uintptr_t carry; + newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++ + + if (carry) { + // newisa.extra_rc++ overflowed + if (!handleOverflow) return rootRetain_overflow(tryRetain); + // Leave half of the retain counts inline and + // prepare to copy the other half to the side table. + if (!tryRetain && !sideTableLocked) sidetable_lock(); + sideTableLocked = true; + transcribeToSideTable = true; + newisa.extra_rc = RC_HALF; + newisa.has_sidetable_rc = true; + } + } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + + if (transcribeToSideTable) { + // Copy the other half of the retain counts to the side table. + sidetable_addExtraRC_nolock(RC_HALF); + } + + if (!tryRetain && sideTableLocked) sidetable_unlock(); + return (id)this; + + tryfail: + if (!tryRetain && sideTableLocked) sidetable_unlock(); + return nil; + + unindexed: + if (!tryRetain && sideTableLocked) sidetable_unlock(); + if (tryRetain) return sidetable_tryRetain() ? (id)this : nil; + else return sidetable_retain(); +} + + +// Equivalent to calling [this release], with shortcuts if there is no override +inline void +objc_object::release() +{ + // UseGC is allowed here, but requires hasCustomRR. + assert(!UseGC || ISA()->hasCustomRR()); + assert(!isTaggedPointer()); + + if (! ISA()->hasCustomRR()) { + rootRelease(); + return; + } + + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release); +} + + +// Base release implementation, ignoring overrides. +// Does not call -dealloc. +// Returns true if the object should now be deallocated. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super release]. +// +// handleUnderflow=false is the frameless fast path. +// handleUnderflow=true is the framed slow path including side table borrow +// The code is structured this way to prevent duplication. + +ALWAYS_INLINE bool +objc_object::rootRelease() +{ + return rootRelease(true, false); +} + +ALWAYS_INLINE bool +objc_object::rootReleaseShouldDealloc() +{ + return rootRelease(false, false); +} + +ALWAYS_INLINE bool +objc_object::rootRelease(bool performDealloc, bool handleUnderflow) +{ + assert(!UseGC); + if (isTaggedPointer()) return false; + + bool sideTableLocked = false; + + isa_t oldisa; + isa_t newisa; + + retry: + do { + oldisa = LoadExclusive(&isa.bits); + newisa = oldisa; + if (!newisa.indexed) goto unindexed; + // don't check newisa.fast_rr; we already called any RR overrides + uintptr_t carry; + newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- + if (carry) goto underflow; + } while (!StoreReleaseExclusive(&isa.bits, oldisa.bits, newisa.bits)); + + if (sideTableLocked) sidetable_unlock(); + return false; + + underflow: + // newisa.extra_rc-- underflowed: borrow from side table or deallocate + + // abandon newisa to undo the decrement + newisa = oldisa; + + if (newisa.has_sidetable_rc) { + if (!handleUnderflow) { + return rootRelease_underflow(performDealloc); + } + // Add some retain counts inline and prepare + // to remove them from the side table. + if (!sideTableLocked) sidetable_lock(); + sideTableLocked = true; + newisa.extra_rc = RC_HALF - 1; // redo the decrement + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; + + // Remove the retain counts from the side table. + bool zeroed = sidetable_subExtraRC_nolock(RC_HALF); + if (zeroed) { + // Side table count is now zero. Clear the marker bit. + do { + oldisa = LoadExclusive(&isa.bits); + newisa.has_sidetable_rc = false; + } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + } + + // Decrement successful after borrowing from side table. + // This decrement cannot be the deallocating decrement - the side + // table lock and has_sidetable_rc bit ensure that if everyone + // else tried to -release while we worked, the last one would block. + sidetable_unlock(); + return false; + } + + // Really deallocate. + + if (sideTableLocked) sidetable_unlock(); + + if (newisa.deallocating) { + return overrelease_error(); + } + newisa.deallocating = true; + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; + __sync_synchronize(); + if (performDealloc) { + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc); + } + return true; + + unindexed: + if (sideTableLocked) sidetable_unlock(); + return sidetable_release(performDealloc); +} + + +// Equivalent to [this autorelease], with shortcuts if there is no override +inline id +objc_object::autorelease() +{ + // UseGC is allowed here, but requires hasCustomRR. + assert(!UseGC || ISA()->hasCustomRR()); + + if (isTaggedPointer()) return (id)this; + if (! ISA()->hasCustomRR()) return rootAutorelease(); + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease); +} + + +// Base autorelease implementation, ignoring overrides. +inline id +objc_object::rootAutorelease() +{ + assert(!UseGC); + + if (isTaggedPointer()) return (id)this; + if (fastAutoreleaseForReturn((id)this)) return (id)this; + + return rootAutorelease2(); +} + + +inline uintptr_t +objc_object::rootRetainCount() +{ + assert(!UseGC); + if (isTaggedPointer()) return (uintptr_t)this; + + sidetable_lock(); + isa_t bits = LoadExclusive(&isa.bits); + if (bits.indexed) { + uintptr_t rc = 1 + bits.extra_rc; + if (bits.has_sidetable_rc) { + rc += sidetable_getExtraRC_nolock(); + } + sidetable_unlock(); + return rc; + } + + sidetable_unlock(); + return sidetable_retainCount(); +} + + +// SUPPORT_NONPOINTER_ISA +#else +// not SUPPORT_NONPOINTER_ISA + + +inline Class +objc_object::ISA() +{ + assert(!isTaggedPointer()); + return isa.cls; +} + + +inline bool +objc_object::hasIndexedIsa() +{ + return false; +} + + +inline Class +objc_object::getIsa() +{ +#if SUPPORT_TAGGED_POINTERS + if (isTaggedPointer()) { + uintptr_t slot = ((uintptr_t)this >> TAG_SLOT_SHIFT) & TAG_SLOT_MASK; + return objc_tag_classes[slot]; + } +#endif + return ISA(); +} + + +inline void +objc_object::initIsa(Class cls) +{ + assert(!isTaggedPointer()); + isa = (uintptr_t)cls; +} + + +inline void +objc_object::initClassIsa(Class cls) +{ + initIsa(cls); +} + + +inline void +objc_object::initProtocolIsa(Class cls) +{ + initIsa(cls); +} + + +inline void +objc_object::initInstanceIsa(Class cls, bool) +{ + initIsa(cls); +} + + +inline void +objc_object::initIsa(Class cls, bool, bool) +{ + initIsa(cls); +} + + +inline Class +objc_object::changeIsa(Class cls) +{ + assert(!isTaggedPointer()); + + isa_t oldisa, newisa; + newisa.cls = cls; + do { + oldisa = LoadExclusive(&isa.bits); + } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + + if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) { + cls->setInstancesHaveAssociatedObjects(); + } + + return oldisa.cls; +} + + +inline bool +objc_object::isTaggedPointer() +{ +#if SUPPORT_TAGGED_POINTERS + return ((uintptr_t)this & TAG_MASK); +#else + return false; +#endif +} + + +inline bool +objc_object::hasAssociatedObjects() +{ + assert(!UseGC); + + return getIsa()->instancesHaveAssociatedObjects(); +} + + +inline void +objc_object::setHasAssociatedObjects() +{ + assert(!UseGC); + + getIsa()->setInstancesHaveAssociatedObjects(); +} + + +inline bool +objc_object::isWeaklyReferenced() +{ + assert(!isTaggedPointer()); + assert(!UseGC); + + return sidetable_isWeaklyReferenced(); +} + + +inline void +objc_object::setWeaklyReferenced_nolock() +{ + assert(!isTaggedPointer()); + assert(!UseGC); + + sidetable_setWeaklyReferenced_nolock(); +} + + +inline bool +objc_object::hasCxxDtor() +{ + assert(!isTaggedPointer()); + return isa.cls->hasCxxDtor(); +} + + +inline bool +objc_object::rootIsDeallocating() +{ + assert(!UseGC); + + if (isTaggedPointer()) return false; + return sidetable_isDeallocating(); +} + + +inline void +objc_object::clearDeallocating() +{ + sidetable_clearDeallocating(); +} + + +inline void +objc_object::rootDealloc() +{ + if (isTaggedPointer()) return; + object_dispose((id)this); +} + + +// Equivalent to calling [this retain], with shortcuts if there is no override +inline id +objc_object::retain() +{ + // UseGC is allowed here, but requires hasCustomRR. + assert(!UseGC || ISA()->hasCustomRR()); + assert(!isTaggedPointer()); + + if (! ISA()->hasCustomRR()) { + return sidetable_retain(); + } + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain); +} + + +// Base retain implementation, ignoring overrides. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super retain]. +inline id +objc_object::rootRetain() +{ + assert(!UseGC); + + if (isTaggedPointer()) return (id)this; + return sidetable_retain(); +} + + +// Equivalent to calling [this release], with shortcuts if there is no override +inline void +objc_object::release() +{ + // UseGC is allowed here, but requires hasCustomRR. + assert(!UseGC || ISA()->hasCustomRR()); + assert(!isTaggedPointer()); + + if (! ISA()->hasCustomRR()) { + sidetable_release(); + return; + } + + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release); +} + + +// Base release implementation, ignoring overrides. +// Does not call -dealloc. +// Returns true if the object should now be deallocated. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super release]. +inline bool +objc_object::rootRelease() +{ + assert(!UseGC); + + if (isTaggedPointer()) return false; + return sidetable_release(true); +} + +inline bool +objc_object::rootReleaseShouldDealloc() +{ + if (isTaggedPointer()) return false; + return sidetable_release(false); +} + + +// Equivalent to [this autorelease], with shortcuts if there is no override +inline id +objc_object::autorelease() +{ + // UseGC is allowed here, but requires hasCustomRR. + assert(!UseGC || ISA()->hasCustomRR()); + + if (isTaggedPointer()) return (id)this; + if (! ISA()->hasCustomRR()) return rootAutorelease(); + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease); +} + + +// Base autorelease implementation, ignoring overrides. +inline id +objc_object::rootAutorelease() +{ + assert(!UseGC); + + if (isTaggedPointer()) return (id)this; + if (fastAutoreleaseForReturn((id)this)) return (id)this; + + return rootAutorelease2(); +} + + +// Base tryRetain implementation, ignoring overrides. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super _tryRetain]. +inline bool +objc_object::rootTryRetain() +{ + assert(!UseGC); + + if (isTaggedPointer()) return true; + return sidetable_tryRetain(); +} + + +inline uintptr_t +objc_object::rootRetainCount() +{ + assert(!UseGC); + + if (isTaggedPointer()) return (uintptr_t)this; + return sidetable_retainCount(); +} + + +// not SUPPORT_NONPOINTER_ISA +#endif + + +#if SUPPORT_RETURN_AUTORELEASE + +/*********************************************************************** + Fast handling of returned autoreleased values. + The caller and callee cooperate to keep the returned object + out of the autorelease pool. + + Caller: + ret = callee(); + objc_retainAutoreleasedReturnValue(ret); + // use ret here + + Callee: + // compute ret + [ret retain]; + return objc_autoreleaseReturnValue(ret); + + objc_autoreleaseReturnValue() examines the caller's instructions following + the return. If the caller's instructions immediately call + objc_autoreleaseReturnValue, then the callee omits the -autorelease and saves + the result in thread-local storage. If the caller does not look like it + cooperates, then the callee calls -autorelease as usual. + + objc_autoreleaseReturnValue checks if the returned value is the same as the + one in thread-local storage. If it is, the value is used directly. If not, + the value is assumed to be truly autoreleased and is retained again. In + either case, the caller now has a retained reference to the value. + + Tagged pointer objects do participate in the fast autorelease scheme, + because it saves message sends. They are not entered in the autorelease + pool in the slow case. +**********************************************************************/ + +# if __x86_64__ + +static ALWAYS_INLINE bool +callerAcceptsFastAutorelease(const void * const ra0) +{ + const uint8_t *ra1 = (const uint8_t *)ra0; + const uint16_t *ra2; + const uint32_t *ra4 = (const uint32_t *)ra1; + const void **sym; + +#define PREFER_GOTPCREL 0 +#if PREFER_GOTPCREL + // 48 89 c7 movq %rax,%rdi + // ff 15 callq *symbol@GOTPCREL(%rip) + if (*ra4 != 0xffc78948) { + return false; + } + if (ra1[4] != 0x15) { + return false; + } + ra1 += 3; +#else + // 48 89 c7 movq %rax,%rdi + // e8 callq symbol + if (*ra4 != 0xe8c78948) { + return false; + } + ra1 += (long)*(const int32_t *)(ra1 + 4) + 8l; + ra2 = (const uint16_t *)ra1; + // ff 25 jmpq *symbol@DYLDMAGIC(%rip) + if (*ra2 != 0x25ff) { + return false; + } +#endif + ra1 += 6l + (long)*(const int32_t *)(ra1 + 2); + sym = (const void **)ra1; + if (*sym != objc_retainAutoreleasedReturnValue) + { + return false; + } + + return true; +} + +// __x86_64__ +# elif __arm__ + +static ALWAYS_INLINE bool +callerAcceptsFastAutorelease(const void *ra) +{ + // if the low bit is set, we're returning to thumb mode + if ((uintptr_t)ra & 1) { + // 3f 46 mov r7, r7 + // we mask off the low bit via subtraction + if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) { + return true; + } + } else { + // 07 70 a0 e1 mov r7, r7 + if (*(uint32_t *)ra == 0xe1a07007) { + return true; + } + } + return false; +} + +// __arm__ +# elif __arm64__ + +static ALWAYS_INLINE bool +callerAcceptsFastAutorelease(const void *ra) +{ + // fd 03 1d aa mov fp, fp + if (*(uint32_t *)ra == 0xaa1d03fd) { + return true; + } + return false; +} + +// __arm64__ +# elif __i386__ && TARGET_IPHONE_SIMULATOR + +static inline bool +callerAcceptsFastAutorelease(const void *ra) +{ + return false; +} + +// __i386__ && TARGET_IPHONE_SIMULATOR +# else + +#warning unknown architecture + +static ALWAYS_INLINE bool +callerAcceptsFastAutorelease(const void *ra) +{ + return false; +} + +// unknown architecture +# endif + + +static ALWAYS_INLINE +bool fastAutoreleaseForReturn(id obj) +{ + assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == nil); + + if (callerAcceptsFastAutorelease(__builtin_return_address(0))) { + tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj); + return true; + } + + return false; +} + + +static ALWAYS_INLINE +bool fastRetainFromReturn(id obj) +{ + if (obj == tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY)) { + tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, 0); + return true; + } + + return false; +} + + +// SUPPORT_RETURN_AUTORELEASE +#else +// not SUPPORT_RETURN_AUTORELEASE + + +static ALWAYS_INLINE +bool fastAutoreleaseForReturn(id obj) +{ + return false; +} + + +static ALWAYS_INLINE +bool fastRetainFromReturn(id obj) +{ + return false; +} + + +// not SUPPORT_RETURN_AUTORELEASE +#endif + + +// _OBJC_OBJECT_H_ +#endif diff --git a/runtime/objc-opt.mm b/runtime/objc-opt.mm index 7a79b38..e14e5b3 100644 --- a/runtime/objc-opt.mm +++ b/runtime/objc-opt.mm @@ -28,18 +28,18 @@ #include "objc-private.h" -using namespace objc_opt; - #if !SUPPORT_PREOPT // Preoptimization not supported on this platform. +struct objc_selopt_t; + bool isPreoptimized(void) { return false; } -const objc_selopt_t *preoptimizedSelectors(void) +objc_selopt_t *preoptimizedSelectors(void) { return nil; } @@ -49,6 +49,12 @@ Class getPreoptimizedClass(const char *name) return nil; } +Class* copyPreoptimizedClasses(const char *name, int *outCount) +{ + *outCount = 0; + return nil; +} + header_info *preoptimizedHinfoForHeader(const headerType *mhdr) { return nil; @@ -69,44 +75,43 @@ void preopt_init(void) #else // SUPPORT_PREOPT - #include +using objc_opt::objc_clsopt_t; +using objc_opt::objc_headeropt_t; +using objc_opt::objc_opt_t; + __BEGIN_DECLS -// preopt: the actual opt used at runtime +// preopt: the actual opt used at runtime (nil or &_objc_opt_data) // _objc_opt_data: opt data possibly written by dyld -// empty_opt_data: empty data to use if dyld didn't cooperate or DisablePreopt +// opt is initialized to ~0 to detect incorrect use before preopt_init() -static const objc_opt_t *opt = nil; +static const objc_opt_t *opt = (objc_opt_t *)~0; static bool preoptimized; extern const objc_opt_t _objc_opt_data; // in __TEXT, __objc_opt_ro -static const uint32_t empty_opt_data[] = OPT_INITIALIZER; bool isPreoptimized(void) { return preoptimized; } - -const objc_selopt_t *preoptimizedSelectors(void) +objc_selopt_t *preoptimizedSelectors(void) { - assert(opt); - return opt->selopt(); + return opt ? opt->selopt() : nil; } Class getPreoptimizedClass(const char *name) { - assert(opt); - objc_clsopt_t *classes = opt->clsopt(); + objc_clsopt_t *classes = opt ? opt->clsopt() : nil; if (!classes) return nil; void *cls; void *hi; uint32_t count = classes->getClassAndHeader(name, cls, hi); if (count == 1 && ((header_info *)hi)->loaded) { - // exactly one matching class, and it's image is loaded + // exactly one matching class, and its image is loaded return (Class)cls; } else if (count > 1) { @@ -125,6 +130,48 @@ Class getPreoptimizedClass(const char *name) return nil; } + +Class* copyPreoptimizedClasses(const char *name, int *outCount) +{ + *outCount = 0; + + objc_clsopt_t *classes = opt ? opt->clsopt() : nil; + if (!classes) return nil; + + void *cls; + void *hi; + uint32_t count = classes->getClassAndHeader(name, cls, hi); + if (count == 0) return nil; + + Class *result = (Class *)_calloc_internal(count, sizeof(Class)); + if (count == 1 && ((header_info *)hi)->loaded) { + // exactly one matching class, and its image is loaded + result[(*outCount)++] = (Class)cls; + return result; + } + else if (count > 1) { + // more than one matching class - find those that are loaded + void *clslist[count]; + void *hilist[count]; + classes->getClassesAndHeaders(name, clslist, hilist); + for (uint32_t i = 0; i < count; i++) { + if (((header_info *)hilist[i])->loaded) { + result[(*outCount)++] = (Class)clslist[i]; + } + } + + if (*outCount == 0) { + // found multiple classes with that name, but none are loaded + free(result); + result = nil; + } + return result; + } + + // no match that is loaded + return nil; +} + namespace objc_opt { struct objc_headeropt_t { uint32_t count; @@ -163,8 +210,7 @@ struct objc_headeropt_t { header_info *preoptimizedHinfoForHeader(const headerType *mhdr) { - assert(opt); - objc_headeropt_t *hinfos = opt->headeropt(); + objc_headeropt_t *hinfos = opt ? opt->headeropt() : nil; if (hinfos) return hinfos->get(mhdr); else return nil; } @@ -182,8 +228,7 @@ void preopt_init(void) failure = "(by OBJC_DISABLE_PREOPTIMIZATION)"; } else if (opt->version != objc_opt::VERSION) { - // This shouldn't happen. You probably forgot to - // change OPT_INITIALIZER and objc-sel-table.s. + // This shouldn't happen. You probably forgot to edit objc-sel-table.s. // If dyld really did write the wrong optimization version, // then we must halt because we don't know what bits dyld twiddled. _objc_fatal("bad objc preopt version (want %d, got %d)", @@ -205,7 +250,7 @@ void preopt_init(void) if (failure) { // All preoptimized selector references are invalid. preoptimized = NO; - opt = (objc_opt_t *)empty_opt_data; + opt = nil; disableSharedCacheOptimizations(); if (PrintPreopt) { diff --git a/runtime/objc-os.h b/runtime/objc-os.h index 14657fc..52b2ef5 100644 --- a/runtime/objc-os.h +++ b/runtime/objc-os.h @@ -35,11 +35,20 @@ #ifdef __LP64__ # define WORD_SHIFT 3UL # define WORD_MASK 7UL +# define WORD_BITS 64 #else # define WORD_SHIFT 2UL # define WORD_MASK 3UL +# define WORD_BITS 32 #endif +static inline uint32_t word_align(uint32_t x) { + return (x + WORD_MASK) & ~WORD_MASK; +} +static inline size_t word_align(size_t x) { + return (x + WORD_MASK) & ~WORD_MASK; +} + #if TARGET_OS_MAC # ifndef __STDC_LIMIT_MACROS @@ -89,6 +98,116 @@ void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE; void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE; +#define ALWAYS_INLINE inline __attribute__((always_inline)) +#define NEVER_INLINE inline __attribute__((noinline)) + + + +static ALWAYS_INLINE uintptr_t +addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout) +{ + return __builtin_addcl(lhs, rhs, carryin, carryout); +} + +static ALWAYS_INLINE uintptr_t +subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout) +{ + return __builtin_subcl(lhs, rhs, carryin, carryout); +} + + +#if __arm64__ + +static ALWAYS_INLINE +uintptr_t +LoadExclusive(uintptr_t *src) +{ + uintptr_t result; + asm("ldxr %x0, [%x1]" + : "=r" (result) + : "r" (src), "m" (*src)); + return result; +} + +static ALWAYS_INLINE +bool +StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value) +{ + uint32_t result; + asm("stxr %w0, %x2, [%x3]" + : "=r" (result), "=m" (*dst) + : "r" (value), "r" (dst)); + return !result; +} + + +static ALWAYS_INLINE +bool +StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value) +{ + uint32_t result; + asm("stlxr %w0, %x2, [%x3]" + : "=r" (result), "=m" (*dst) + : "r" (value), "r" (dst)); + return !result; +} + + +#elif __arm__ + +static ALWAYS_INLINE +uintptr_t +LoadExclusive(uintptr_t *src) +{ + return *src; +} + +static ALWAYS_INLINE +bool +StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value, + (void **)dst); +} + +static ALWAYS_INLINE +bool +StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value, + (void **)dst); +} + + +#elif __x86_64__ || __i386__ + +static ALWAYS_INLINE +uintptr_t +LoadExclusive(uintptr_t *src) +{ + return *src; +} + +static ALWAYS_INLINE +bool +StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + + return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value); +} + +static ALWAYS_INLINE +bool +StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + return StoreExclusive(dst, oldvalue, value); +} + +#else +# error unknown architecture +#endif + + #define spinlock_t os_lock_handoff_s #define spinlock_trylock(l) os_lock_trylock(l) #define spinlock_lock(l) os_lock_lock(l) @@ -123,8 +242,8 @@ void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE; /* Use this for functions that are intended to be breakpoint hooks. If you do not, the compiler may optimize them away. BREAKPOINT_FUNCTION( void stop_on_error(void) ); */ -# define BREAKPOINT_FUNCTION(prototype) \ - OBJC_EXTERN __attribute__((noinline, visibility("hidden"))) \ +# define BREAKPOINT_FUNCTION(prototype) \ + OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \ prototype { asm(""); } #elif TARGET_OS_WIN32 @@ -551,8 +670,8 @@ static bool is_valid_direct_key(tls_key_t k) { // rdar://9162780 _pthread_get/setspecific_direct are inefficient // copied from libdispatch -__attribute__((always_inline)) __attribute__((const)) -static inline void** +__attribute__((const)) +static ALWAYS_INLINE void** tls_base(void) { uintptr_t p; @@ -564,8 +683,8 @@ tls_base(void) #endif } -__attribute__((always_inline)) -static inline void + +static ALWAYS_INLINE void tls_set_direct(void **tsdb, tls_key_t k, void *v) { assert(is_valid_direct_key(k)); @@ -575,8 +694,8 @@ tls_set_direct(void **tsdb, tls_key_t k, void *v) #define tls_set_direct(k, v) \ tls_set_direct(tls_base(), (k), (v)) -__attribute__((always_inline)) -static inline void * + +static ALWAYS_INLINE void * tls_get_direct(void **tsdb, tls_key_t k) { assert(is_valid_direct_key(k)); diff --git a/runtime/objc-os.mm b/runtime/objc-os.mm index a0044f2..06597fb 100644 --- a/runtime/objc-os.mm +++ b/runtime/objc-os.mm @@ -603,6 +603,41 @@ static const char *gc_enforcer(enum dyld_image_states state, #endif + +/*********************************************************************** +* getSDKVersion +* Look up the build-time SDK version for an image. +* Version X.Y.Z is encoded as 0xXXXXYYZZ. +* Images without the load command are assumed to be old (version 0.0.0). +**********************************************************************/ +#if TARGET_OS_IPHONE + // Simulator binaries encode an iOS version +# define LC_VERSION_MIN LC_VERSION_MIN_IPHONEOS +#elif TARGET_OS_MAC +# define LC_VERSION_MIN LC_VERSION_MIN_MACOSX +#else +# error unknown OS +#endif + +static uint32_t +getSDKVersion(const header_info *hi) +{ + const struct version_min_command *cmd; + unsigned long i; + + cmd = (const struct version_min_command *) (hi->mhdr + 1); + for (i = 0; i < hi->mhdr->ncmds; i++){ + if (cmd->cmd == LC_VERSION_MIN && cmd->cmdsize >= 16) { + return cmd->sdk; + } + cmd = (const struct version_min_command *)((char *)cmd + cmd->cmdsize); + } + + // Lack of version load command is assumed to be old. + return 0; +} + + /*********************************************************************** * map_images_nolock * Process the given images which are being mapped in by dyld. @@ -661,6 +696,10 @@ map_images_nolock(enum dyld_image_states state, uint32_t infoCount, continue; } if (mhdr->filetype == MH_EXECUTE) { + // Record main executable's build SDK version + AppSDKVersion = getSDKVersion(hi); + + // Size some data structures based on main executable's size #if __OBJC2__ size_t count; _getObjc2SelectorRefs(hi, &count); @@ -722,10 +761,7 @@ map_images_nolock(enum dyld_image_states state, uint32_t infoCount, #endif if (firstTime) { - extern SEL FwdSel; // in objc-msg-*.s sel_init(wantsGC, selrefCount); - FwdSel = sel_registerName("forward::"); - arr_init(); } @@ -990,7 +1026,7 @@ malloc_zone_t *_objc_internal_zone(void) if (z == (malloc_zone_t *)-1) { if (UseInternalZone) { z = malloc_create_zone(vm_page_size, 0); - malloc_set_zone_name(z, "ObjC"); + malloc_set_zone_name(z, "ObjC_Internal"); } else { z = malloc_default_zone(); } diff --git a/runtime/objc-private.h b/runtime/objc-private.h index 2c642d2..055fc72 100644 --- a/runtime/objc-private.h +++ b/runtime/objc-private.h @@ -51,69 +51,186 @@ struct objc_object; typedef struct objc_class *Class; typedef struct objc_object *id; -#if SUPPORT_TAGGED_POINTERS +namespace { + class SideTable; +}; -#define TAG_COUNT 8 -#define TAG_MASK 1 -#define TAG_SLOT_SHIFT 0 -#define TAG_SLOT_MASK 0xf -#define TAG_PAYLOAD_LSHIFT 0 -#define TAG_PAYLOAD_RSHIFT 4 -extern "C" { extern Class objc_debug_taggedpointer_classes[TAG_COUNT*2]; } -#define objc_tag_classes objc_debug_taggedpointer_classes +union isa_t +{ + isa_t() { } + isa_t(uintptr_t value) : bits(value) { } + + Class cls; + uintptr_t bits; + +#if SUPPORT_NONPOINTER_ISA + + // extra_rc must be the MSB-most field (so it matches carry/overflow flags) + // indexed must be the LSB (fixme or get rid of it) + // shiftcls must occupy the same bits that a real class pointer would + // bits + RC_ONE is equivalent to extra_rc + 1 + // RC_HALF is the high bit of extra_rc (i.e. half of its range) + + // future expansion: + // uintptr_t fast_rr : 1; // no r/r overrides + // uintptr_t lock : 2; // lock for atomic property, @synch + // uintptr_t extraBytes : 1; // allocated with extra bytes + +# if __arm64__ +# define ISA_MASK 0x00000001fffffff8ULL +# define ISA_MAGIC_MASK 0x000003fe00000001ULL +# define ISA_MAGIC_VALUE 0x000001a400000001ULL + struct { + uintptr_t indexed : 1; + uintptr_t has_assoc : 1; + uintptr_t has_cxx_dtor : 1; + uintptr_t shiftcls : 30; // MACH_VM_MAX_ADDRESS 0x1a0000000 + uintptr_t magic : 9; + uintptr_t weakly_referenced : 1; + uintptr_t deallocating : 1; + uintptr_t has_sidetable_rc : 1; + uintptr_t extra_rc : 19; +# define RC_ONE (1ULL<<45) +# define RC_HALF (1ULL<<18) + }; + +# elif __x86_64__ +# define ISA_MASK 0x00007ffffffffff8ULL +# define ISA_MAGIC_MASK 0x0000000000000001ULL +# define ISA_MAGIC_VALUE 0x0000000000000001ULL + struct { + uintptr_t indexed : 1; + uintptr_t has_assoc : 1; + uintptr_t has_cxx_dtor : 1; + uintptr_t shiftcls : 44; // MACH_VM_MAX_ADDRESS 0x7fffffe00000 + uintptr_t weakly_referenced : 1; + uintptr_t deallocating : 1; + uintptr_t has_sidetable_rc : 1; + uintptr_t extra_rc : 14; +# define RC_ONE (1ULL<<50) +# define RC_HALF (1ULL<<13) + }; + +# else + // Available bits in isa field are architecture-specific. +# error unknown architecture +# endif +// SUPPORT_NONPOINTER_ISA #endif +}; + struct objc_object { private: - uintptr_t isa; + isa_t isa; public: // ISA() assumes this is NOT a tagged pointer object - Class ISA() - { - assert(!isTaggedPointer()); - return (Class)isa; - } + Class ISA(); // getIsa() allows this to be a tagged pointer object - Class getIsa() - { -#if SUPPORT_TAGGED_POINTERS - if (isTaggedPointer()) { - uintptr_t slot = - ((uintptr_t)this >> TAG_SLOT_SHIFT) & TAG_SLOT_MASK; - return objc_tag_classes[slot]; - } -#endif - return ISA(); - } + Class getIsa(); + + // initIsa() should be used to init the isa of new objects only. + // If this object already has an isa, use changeIsa() for correctness. + // initInstanceIsa(): objects with no custom RR/AWZ + // initClassIsa(): class objects + // initProtocolIsa(): protocol objects + // initIsa(): other objects + void initIsa(Class cls /*indexed=false*/); + void initClassIsa(Class cls /*indexed=maybe*/); + void initProtocolIsa(Class cls /*indexed=maybe*/); + void initInstanceIsa(Class cls, bool hasCxxDtor); // changeIsa() should be used to change the isa of existing objects. // If this is a new object, use initIsa() for performance. - Class changeIsa(Class cls); + Class changeIsa(Class newCls); - // initIsa() should be used to init the isa of new objects only. - // If this object already has an isa, use changeIsa() for correctness. - void initIsa(Class cls) - { - assert(!isTaggedPointer()); - isa = (uintptr_t)cls; - } + bool hasIndexedIsa(); + bool isTaggedPointer(); + bool isClass(); - bool isTaggedPointer() - { -#if SUPPORT_TAGGED_POINTERS - return ((uintptr_t)this & TAG_MASK); -#else - return false; + // object may have associated objects? + bool hasAssociatedObjects(); + void setHasAssociatedObjects(); + + // object may be weakly referenced? + bool isWeaklyReferenced(); + void setWeaklyReferenced_nolock(); + + // object may have -.cxx_destruct implementation? + bool hasCxxDtor(); + + // Optimized calls to retain/release methods + id retain(); + void release(); + id autorelease(); + + // Implementations of retain/release methods + id rootRetain(); + bool rootRelease(); + id rootAutorelease(); + bool rootTryRetain(); + bool rootReleaseShouldDealloc(); + uintptr_t rootRetainCount(); + + // Implementation of dealloc methods + bool rootIsDeallocating(); + void clearDeallocating(); + void rootDealloc(); + +private: + void initIsa(Class newCls, bool indexed, bool hasCxxDtor); + + // Slow paths for inline control + id rootAutorelease2(); + bool overrelease_error(); + +#if SUPPORT_NONPOINTER_ISA + // Unified retain count manipulation for nonpointer isa + id rootRetain(bool tryRetain, bool handleOverflow); + bool rootRelease(bool performDealloc, bool handleUnderflow); + id rootRetain_overflow(bool tryRetain); + bool rootRelease_underflow(bool performDealloc); + + void clearDeallocating_weak(); + + // Side table retain count overflow for nonpointer isa + void sidetable_lock(); + void sidetable_unlock(); + + void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced); + bool sidetable_addExtraRC_nolock(size_t delta_rc); + bool sidetable_subExtraRC_nolock(size_t delta_rc); + size_t sidetable_getExtraRC_nolock(); +#endif + + // Side-table-only retain count + bool sidetable_isDeallocating(); + void sidetable_clearDeallocating(); + + bool sidetable_isWeaklyReferenced(); + void sidetable_setWeaklyReferenced_nolock(); + + id sidetable_retain(); + id sidetable_retain_slow(SideTable *table); + + bool sidetable_release(bool performDealloc = true); + bool sidetable_release_slow(SideTable *table, bool performDealloc = true); + + bool sidetable_tryRetain(); + + uintptr_t sidetable_retainCount(); +#if !NDEBUG + bool sidetable_present(); #endif - } }; + #if __OBJC2__ typedef struct method_t *Method; typedef struct ivar_t *Ivar; @@ -126,9 +243,28 @@ typedef struct old_category *Category; typedef struct old_property *objc_property_t; #endif +// Public headers + #include "objc.h" #include "runtime.h" #include "objc-os.h" +#include "objc-abi.h" +#include "objc-api.h" +#include "objc-auto.h" +#include "objc-config.h" +#include "objc-internal.h" +#include "maptable.h" +#include "hashtable2.h" + +/* Do not include message.h here. */ +/* #include "message.h" */ + +#define __APPLE_API_PRIVATE +#include "objc-gdb.h" +#undef __APPLE_API_PRIVATE + + +// Private headers #if __OBJC2__ #include "objc-runtime-new.h" @@ -136,44 +272,13 @@ typedef struct old_property *objc_property_t; #include "objc-runtime-old.h" #endif -#include "maptable.h" -#include "hashtable2.h" -#include "objc-api.h" -#include "objc-config.h" #include "objc-references.h" #include "objc-initialize.h" #include "objc-loadmethod.h" -#include "objc-internal.h" -#include "objc-abi.h" - -#include "objc-auto.h" - -#define __APPLE_API_PRIVATE -#include "objc-gdb.h" -#undef __APPLE_API_PRIVATE - -/* Do not include message.h here. */ -/* #include "message.h" */ __BEGIN_DECLS -inline Class objc_object::changeIsa(Class cls) -{ - assert(!isTaggedPointer()); - - Class old; - do { - old = (Class)isa; - } while (!OSAtomicCompareAndSwapPtr(old, cls, (void**)&isa)); - - if (old && old->instancesHaveAssociatedObjects()) { - cls->setInstancesHaveAssociatedObjects(); - } - - return old; -} - #if (defined(OBJC_NO_GC) && SUPPORT_GC) || \ (!defined(OBJC_NO_GC) && !SUPPORT_GC) @@ -203,19 +308,6 @@ inline Class objc_object::changeIsa(Class cls) #endif -typedef struct { - uint32_t version; // currently 0 - uint32_t flags; -} objc_image_info; - -// masks for objc_image_info.flags -#define OBJC_IMAGE_IS_REPLACEMENT (1<<0) -#define OBJC_IMAGE_SUPPORTS_GC (1<<1) -#define OBJC_IMAGE_REQUIRES_GC (1<<2) -#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3) -#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4) // might be re-assignable - - #define _objcHeaderIsReplacement(h) ((h)->info && ((h)->info->flags & OBJC_IMAGE_IS_REPLACEMENT)) /* OBJC_IMAGE_IS_REPLACEMENT: @@ -284,6 +376,8 @@ extern header_info *FirstHeader; extern header_info *LastHeader; extern int HeaderCount; +extern uint32_t AppSDKVersion; // X.Y.Z is 0xXXXXYYZZ + extern void appendHeader(header_info *hi); extern void removeHeader(header_info *hi); @@ -297,7 +391,6 @@ extern SEL sel_registerNameNoLock(const char *str, BOOL copy); extern void sel_lock(void); extern void sel_unlock(void); extern BOOL sel_preoptimizationValid(const header_info *hi); -extern void sel_nuke_nolock(void); extern SEL SEL_load; extern SEL SEL_initialize; @@ -311,10 +404,15 @@ extern SEL SEL_autorelease; extern SEL SEL_retainCount; extern SEL SEL_alloc; extern SEL SEL_allocWithZone; +extern SEL SEL_dealloc; extern SEL SEL_copy; extern SEL SEL_new; extern SEL SEL_finalize; extern SEL SEL_forwardInvocation; +extern SEL SEL_tryRetain; +extern SEL SEL_isDeallocating; +extern SEL SEL_retainWeakReference; +extern SEL SEL_allowsWeakReference; /* preoptimization */ extern void preopt_init(void); @@ -322,12 +420,16 @@ extern void disableSharedCacheOptimizations(void); extern bool isPreoptimized(void); extern header_info *preoptimizedHinfoForHeader(const headerType *mhdr); -#if __cplusplus -namespace objc_opt { struct objc_selopt_t; }; -extern const struct objc_opt::objc_selopt_t *preoptimizedSelectors(void); -extern Class getPreoptimizedClass(const char *name); +#if SUPPORT_PREOPT && __cplusplus +#include +using objc_selopt_t = const objc_opt::objc_selopt_t; +#else +struct objc_selopt_t; #endif +extern objc_selopt_t *preoptimizedSelectors(void); +extern Class getPreoptimizedClass(const char *name); +extern Class* copyPreoptimizedClasses(const char *name, int *outCount); /* optional malloc zone for runtime data */ @@ -349,6 +451,7 @@ extern IMP lookUpImpOrNil(Class, SEL, id obj, bool initialize, bool cache, bool extern IMP lookUpImpOrForward(Class, SEL, id obj, bool initialize, bool cache, bool resolver); extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel); +extern BOOL class_respondsToSelector_inst(Class cls, SEL sel, id inst); extern bool objcMsgLogEnabled; extern bool logMessageSend(bool isClassMethod, @@ -508,18 +611,6 @@ extern void _rwlock_assert_unlocked_debug(rwlock_t *l, const char *name); } while (0) -#if !TARGET_OS_WIN32 -/* nil handler object */ -extern id _objc_nilReceiver; -extern id _objc_setNilReceiver(id newNilReceiver); -extern id _objc_getNilReceiver(void); -#endif - -/* forward handler functions */ -extern void *_objc_forward_handler; -extern void *_objc_forward_stret_handler; - - /* ignored selector support */ /* Non-GC: no ignored selectors @@ -635,6 +726,7 @@ typedef struct { struct _objc_initializing_classes *initializingClasses; // for +initialize struct SyncCache *syncCache; // for @synchronize struct alt_handler_list *handlerList; // for exception alt handlers + char *printableNames[4]; // temporary demangled names for logging // If you add new fields here, don't forget to update // _objc_pthread_destroyspecific() @@ -660,6 +752,8 @@ extern void _destroySyncCache(struct SyncCache *cache); extern void arr_init(void); extern id objc_autoreleaseReturnValue(id obj); +// block trampolines +extern IMP _imp_implementationWithBlockNoCopy(id block); // layout.h typedef struct { @@ -696,8 +790,6 @@ extern void prepare_load_methods(header_info *hi); extern void _unload_image(header_info *hi); extern const char ** _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount); -extern Class _objc_allocateFutureClass(const char *name); - extern const header_info *_headerForClass(Class cls); @@ -708,14 +800,14 @@ extern BOOL _class_usesAutomaticRetainRelease(Class cls); extern uint32_t _class_getInstanceStart(Class cls); extern unsigned _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, id *results, unsigned num_requested); -extern id _objc_constructOrFree(Class cls, void *bytes); +extern id _objc_constructOrFree(id bytes, Class cls); extern const char *_category_getName(Category cat); extern const char *_category_getClassName(Category cat); extern Class _category_getClass(Category cat); extern IMP _category_getLoadMethod(Category cat); -extern BOOL object_cxxConstruct(id obj); +extern id object_cxxConstructFromClass(id obj, Class cls); extern void object_cxxDestruct(id obj); extern void _class_resolveMethod(Class cls, SEL sel, id inst); @@ -741,10 +833,14 @@ __END_DECLS } _static_assert_ ## line __attribute__((unavailable)) #endif +#define countof(arr) (sizeof(arr) / sizeof((arr)[0])) + // Global operator new and delete. We must not use any app overrides. // This ALSO REQUIRES each of these be in libobjc's unexported symbol list. #if __cplusplus +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winline-new-delete" #include inline void* operator new(std::size_t size) throw (std::bad_alloc) { return _malloc_internal(size); } inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return _malloc_internal(size); } @@ -754,8 +850,112 @@ inline void operator delete(void* p) throw() { _free_internal(p); } inline void operator delete[](void* p) throw() { _free_internal(p); } inline void operator delete(void* p, const std::nothrow_t&) throw() { _free_internal(p); } inline void operator delete[](void* p, const std::nothrow_t&) throw() { _free_internal(p); } +#pragma clang diagnostic pop #endif +// DisguisedPtr acts like pointer type T*, except the +// stored value is disguised to hide it from tools like `leaks`. +// nil is disguised as itself so zero-filled memory works as expected, +// which means 0x80..00 is also diguised as itself but we don't care +template +class DisguisedPtr { + uintptr_t value; + + static uintptr_t disguise(T* ptr) { + return -(uintptr_t)ptr; + } + + static T* undisguise(uintptr_t val) { + return (T*)-val; + } + + public: + DisguisedPtr() { } + DisguisedPtr(T* ptr) + : value(disguise(ptr)) { } + DisguisedPtr(const DisguisedPtr& ptr) + : value(ptr.value) { } + + DisguisedPtr& operator = (T* rhs) { + value = disguise(rhs); + return *this; + } + DisguisedPtr& operator = (const DisguisedPtr& rhs) { + value = rhs.value; + return *this; + } + + operator T* () const { + return undisguise(value); + } + T* operator -> () const { + return undisguise(value); + } + T& operator * () const { + return *undisguise(value); + } + T& operator [] (size_t i) const { + return undisguise(value)[i]; + } + + // pointer arithmetic operators omitted + // because we don't currently use them anywhere +}; + + +// Pointer hash function. +// This is not a terrific hash, but it is fast +// and not outrageously flawed for our purposes. + +// Based on principles from http://locklessinc.com/articles/fast_hash/ +// and evaluation ideas from http://floodyberry.com/noncryptohashzoo/ +#if __LP64__ +static inline uint32_t ptr_hash(uint64_t key) +{ + key ^= key >> 4; + key *= 0x8a970be7488fda55; + key ^= __builtin_bswap64(key); + return (uint32_t)key; +} +#else +static inline uint32_t ptr_hash(uint32_t key) +{ + key ^= key >> 4; + key *= 0x5052acdb; + key ^= __builtin_bswap32(key); + return key; +} +#endif + +/* + Higher-quality hash function. This is measurably slower in some workloads. +#if __LP64__ + uint32_t ptr_hash(uint64_t key) +{ + key -= __builtin_bswap64(key); + key *= 0x8a970be7488fda55; + key ^= __builtin_bswap64(key); + key *= 0x8a970be7488fda55; + key ^= __builtin_bswap64(key); + return (uint32_t)key; +} +#else +static uint32_t ptr_hash(uint32_t key) +{ + key -= __builtin_bswap32(key); + key *= 0x5052acdb; + key ^= __builtin_bswap32(key); + key *= 0x5052acdb; + key ^= __builtin_bswap32(key); + return key; +} +#endif +*/ + + +// Inlined parts of objc_object's implementation +#include "objc-object.h" + #endif /* _OBJC_PRIVATE_H_ */ diff --git a/runtime/objc-references.mm b/runtime/objc-references.mm index 893bbcb..b971323 100644 --- a/runtime/objc-references.mm +++ b/runtime/objc-references.mm @@ -291,8 +291,7 @@ void _object_set_associative_reference(id object, void *key, id value, uintptr_t ObjectAssociationMap *refs = new ObjectAssociationMap; associations[disguised_object] = refs; (*refs)[key] = ObjcAssociation(policy, new_value); - Class cls = object->getIsa(); - cls->setInstancesHaveAssociatedObjects(); + object->setHasAssociatedObjects(); } } else { // setting the association to nil breaks the association. diff --git a/runtime/objc-runtime-new.h b/runtime/objc-runtime-new.h index ae4e4f6..5825de7 100644 --- a/runtime/objc-runtime-new.h +++ b/runtime/objc-runtime-new.h @@ -26,125 +26,57 @@ __BEGIN_DECLS -// SEL points to characters -// struct objc_cache is stored in class object - -typedef uintptr_t cache_key_t; - #if __LP64__ - typedef uint32_t mask_t; -# define MASK_SHIFT ((mask_t)0) +typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits #else - typedef uint16_t mask_t; -# define MASK_SHIFT ((mask_t)0) +typedef uint16_t mask_t; #endif +typedef uintptr_t cache_key_t; -struct cache_t { - struct bucket_t *buckets; - mask_t shiftmask; - mask_t occupied; +struct swift_class_t; - mask_t mask() { - return shiftmask >> MASK_SHIFT; - } - mask_t capacity() { - return shiftmask ? (shiftmask >> MASK_SHIFT) + 1 : 0; - } - void setCapacity(uint32_t capacity) { - uint32_t newmask = (capacity - 1) << MASK_SHIFT; - assert(newmask == (uint32_t)(mask_t)newmask); - shiftmask = newmask; - } - void expand(); - void reallocate(mask_t oldCapacity, mask_t newCapacity); - struct bucket_t * find(cache_key_t key); +struct bucket_t { +private: + cache_key_t _key; + IMP _imp; + +public: + inline cache_key_t key() const { return _key; } + inline IMP imp() const { return (IMP)_imp; } + inline void setKey(cache_key_t newKey) { _key = newKey; } + inline void setImp(IMP newImp) { _imp = newImp; } - static void bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket) __attribute__((noreturn)); + void set(cache_key_t newKey, IMP newImp); }; -// We cannot store flags in the low bits of the 'data' field until we work with -// the 'leaks' team to not think that objc is leaking memory. See radar 8955342 -// for more info. -#define CLASS_FAST_FLAGS_VIA_RW_DATA 0 +struct cache_t { + struct bucket_t *_buckets; + mask_t _mask; + mask_t _occupied; +public: + struct bucket_t *buckets(); + mask_t mask(); + mask_t occupied(); + void incrementOccupied(); + void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask); + void setEmpty(); -// Values for class_ro_t->flags -// These are emitted by the compiler and are part of the ABI. -// class is a metaclass -#define RO_META (1<<0) -// class is a root class -#define RO_ROOT (1<<1) -// class has .cxx_construct/destruct implementations -#define RO_HAS_CXX_STRUCTORS (1<<2) -// class has +load implementation -// #define RO_HAS_LOAD_METHOD (1<<3) -// class has visibility=hidden set -#define RO_HIDDEN (1<<4) -// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak -#define RO_EXCEPTION (1<<5) -// this bit is available for reassignment -// #define RO_REUSE_ME (1<<6) -// class compiled with -fobjc-arc (automatic retain/release) -#define RO_IS_ARR (1<<7) -// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS) -#define RO_HAS_CXX_DTOR_ONLY (1<<8) + mask_t capacity(); + bool canBeFreed(); -// class is in an unloadable bundle - must never be set by compiler -#define RO_FROM_BUNDLE (1<<29) -// class is unrealized future class - must never be set by compiler -#define RO_FUTURE (1<<30) -// class is realized - must never be set by compiler -#define RO_REALIZED (1<<31) + static size_t bytesForCapacity(uint32_t cap); + static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap); + + void expand(); + void reallocate(mask_t oldCapacity, mask_t newCapacity); + struct bucket_t * find(cache_key_t key); + + static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn)); +}; -// Values for class_rw_t->flags -// These are not emitted by the compiler and are never used in class_ro_t. -// Their presence should be considered in future ABI versions. -// class_t->data is class_rw_t, not class_ro_t -#define RW_REALIZED (1<<31) -// class is unresolved future class -#define RW_FUTURE (1<<30) -// class is initialized -#define RW_INITIALIZED (1<<29) -// class is initializing -#define RW_INITIALIZING (1<<28) -// class_rw_t->ro is heap copy of class_ro_t -#define RW_COPIED_RO (1<<27) -// class allocated but not yet registered -#define RW_CONSTRUCTING (1<<26) -// class allocated and registered -#define RW_CONSTRUCTED (1<<25) -// GC: class has unsafe finalize method -#define RW_FINALIZE_ON_MAIN_THREAD (1<<24) -// class +load has been called -#define RW_LOADED (1<<23) -// class does not share super's vtable -#define RW_SPECIALIZED_VTABLE (1<<22) -// class instances may have associative references -#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<21) -// class or superclass has .cxx_construct implementation -#define RW_HAS_CXX_CTOR (1<<20) -// class or superclass has .cxx_destruct implementation -#define RW_HAS_CXX_DTOR (1<<19) -// class has instance-specific GC layout -#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 18) -// class's method list is an array of method lists -#define RW_METHOD_ARRAY (1<<17) -// class or superclass has custom allocWithZone: implementation -#define RW_HAS_CUSTOM_AWZ (1<<16) -// class or superclass has custom retain/release/autorelease/retainCount -#define RW_HAS_CUSTOM_RR (1<<15) - -// Flags may be stored in low bits of rw->data_NEVER_USE for fastest access -#define CLASS_FAST_FLAG_MASK 3 -#if CLASS_FAST_FLAGS_VIA_RW_DATA - // reserved for future expansion -# define CLASS_FAST_FLAG_RESERVED (1<<0) - // class or superclass has custom retain/release/autorelease/retainCount -# define CLASS_FAST_FLAG_HAS_CUSTOM_RR (1<<1) -# undef RW_HAS_CUSTOM_RR -#endif // classref_t is unremapped class_t* typedef struct classref * classref_t; @@ -304,7 +236,7 @@ typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped #define PROTOCOL_FIXED_UP (1<<31) // must never be set by compiler struct protocol_t : objc_object { - const char *name; + const char *mangledName; struct protocol_list_t *protocols; method_list_t *instanceMethods; method_list_t *classMethods; @@ -315,6 +247,16 @@ struct protocol_t : objc_object { uint32_t flags; const char **extendedMethodTypes; + // Fields below this point are allocated at runtime + // and are not present on disk. + const char *_demangledName; + + const char *demangledName(); + + const char *nameForLogging() { + return demangledName(); + } + bool isFixedUp() const { return flags & PROTOCOL_FIXED_UP; } @@ -368,71 +310,471 @@ struct class_rw_t { Class firstSubclass; Class nextSiblingClass; + + char *demangledName; + + void setFlags(uint32_t set) + { + OSAtomicOr32Barrier(set, &flags); + } + + void clearFlags(uint32_t clear) + { + OSAtomicXor32Barrier(clear, &flags); + } + + // set and clear must not overlap + void changeFlags(uint32_t set, uint32_t clear) + { + assert((set & clear) == 0); + + uint32_t oldf, newf; + do { + oldf = flags; + newf = (oldf | set) & ~clear; + } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags)); + } +}; + + +// class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags) +// The extra bits are optimized for the retain/release and alloc/dealloc paths. + +// Values for class_ro_t->flags +// These are emitted by the compiler and are part of the ABI. +// class is a metaclass +#define RO_META (1<<0) +// class is a root class +#define RO_ROOT (1<<1) +// class has .cxx_construct/destruct implementations +#define RO_HAS_CXX_STRUCTORS (1<<2) +// class has +load implementation +// #define RO_HAS_LOAD_METHOD (1<<3) +// class has visibility=hidden set +#define RO_HIDDEN (1<<4) +// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak +#define RO_EXCEPTION (1<<5) +// this bit is available for reassignment +// #define RO_REUSE_ME (1<<6) +// class compiled with -fobjc-arc (automatic retain/release) +#define RO_IS_ARR (1<<7) +// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS) +#define RO_HAS_CXX_DTOR_ONLY (1<<8) + +// class is in an unloadable bundle - must never be set by compiler +#define RO_FROM_BUNDLE (1<<29) +// class is unrealized future class - must never be set by compiler +#define RO_FUTURE (1<<30) +// class is realized - must never be set by compiler +#define RO_REALIZED (1<<31) + +// Values for class_rw_t->flags +// These are not emitted by the compiler and are never used in class_ro_t. +// Their presence should be considered in future ABI versions. +// class_t->data is class_rw_t, not class_ro_t +#define RW_REALIZED (1<<31) +// class is unresolved future class +#define RW_FUTURE (1<<30) +// class is initialized +#define RW_INITIALIZED (1<<29) +// class is initializing +#define RW_INITIALIZING (1<<28) +// class_rw_t->ro is heap copy of class_ro_t +#define RW_COPIED_RO (1<<27) +// class allocated but not yet registered +#define RW_CONSTRUCTING (1<<26) +// class allocated and registered +#define RW_CONSTRUCTED (1<<25) +// GC: class has unsafe finalize method +#define RW_FINALIZE_ON_MAIN_THREAD (1<<24) +// class +load has been called +#define RW_LOADED (1<<23) +#if !SUPPORT_NONPOINTER_ISA +// class instances may have associative references +#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22) +#endif +// class has instance-specific GC layout +#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21) +// class's method list is an array of method lists +#define RW_METHOD_ARRAY (1<<20) +// class has started realizing but not yet completed it +#define RW_REALIZING (1<<19) + +// NOTE: MORE RW_ FLAGS DEFINED BELOW + + +// Values for class_rw_t->flags or class_t->bits +// These flags are optimized for retain/release and alloc/dealloc +// 64-bit stores more of them in class_t->bits to reduce pointer indirection. + +#if !__LP64__ + +// class or superclass has .cxx_construct implementation +#define RW_HAS_CXX_CTOR (1<<18) +// class or superclass has .cxx_destruct implementation +#define RW_HAS_CXX_DTOR (1<<17) +// class or superclass has default alloc/allocWithZone: implementation +// Note this is is stored in the metaclass. +#define RW_HAS_DEFAULT_AWZ (1<<16) +// class's instances requires raw isa +// not tracked for 32-bit because it only applies to non-pointer isa +// #define RW_REQUIRES_RAW_ISA + +// class is a Swift class +#define FAST_IS_SWIFT (1UL<<0) +// class or superclass has default retain/release/autorelease/retainCount/ +// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference +#define FAST_HAS_DEFAULT_RR (1UL<<1) +// data pointer +#define FAST_DATA_MASK 0xfffffffcUL + +#elif 1 +// Leaks-compatible version that steals low bits only. + +// class or superclass has .cxx_construct implementation +#define RW_HAS_CXX_CTOR (1<<18) +// class or superclass has .cxx_destruct implementation +#define RW_HAS_CXX_DTOR (1<<17) +// class or superclass has default alloc/allocWithZone: implementation +// Note this is is stored in the metaclass. +#define RW_HAS_DEFAULT_AWZ (1<<16) + +// class is a Swift class +#define FAST_IS_SWIFT (1UL<<0) +// class or superclass has default retain/release/autorelease/retainCount/ +// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference +#define FAST_HAS_DEFAULT_RR (1UL<<1) +// class's instances requires raw isa +#define FAST_REQUIRES_RAW_ISA (1UL<<2) +// data pointer +#define FAST_DATA_MASK 0x00007ffffffffff8UL + +#else +// Leaks-incompatible version that steals lots of bits. + +// class is a Swift class +#define FAST_IS_SWIFT (1UL<<0) +// class's instances requires raw isa +#define FAST_REQUIRES_RAW_ISA (1UL<<1) +// class or superclass has .cxx_destruct implementation +// This bit is aligned with isa_t->hasCxxDtor to save an instruction. +#define FAST_HAS_CXX_DTOR (1UL<<2) +// data pointer +#define FAST_DATA_MASK 0x00007ffffffffff8UL +// class or superclass has .cxx_construct implementation +#define FAST_HAS_CXX_CTOR (1UL<<47) +// class or superclass has default alloc/allocWithZone: implementation +// Note this is is stored in the metaclass. +#define FAST_HAS_DEFAULT_AWZ (1UL<<48) +// class or superclass has default retain/release/autorelease/retainCount/ +// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference +#define FAST_HAS_DEFAULT_RR (1UL<<49) +// summary bit for fast alloc path: !hasCxxCtor and +// !requiresRawIsa and instanceSize fits into shiftedSize +#define FAST_ALLOC (1UL<<50) +// instance size in units of 16 bytes +// or 0 if the instance size is too big in this field +// This field must be LAST +#define FAST_SHIFTED_SIZE_SHIFT 51 + +// FAST_ALLOC means +// FAST_HAS_CXX_CTOR is set +// FAST_REQUIRES_RAW_ISA is not set +// FAST_SHIFTED_SIZE is not zero +// FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that +// bit is stored on the metaclass. +#define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA) +#define FAST_ALLOC_VALUE (0) + +#endif + + +struct class_data_bits_t { + + // Values are the FAST_ flags above. + uintptr_t bits; +private: + bool getBit(uintptr_t bit) + { + return bits & bit; + } + +#if FAST_ALLOC + static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) + { + if (change & FAST_ALLOC_MASK) { + if (((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) && + ((oldBits >> FAST_SHIFTED_SIZE_SHIFT) != 0)) + { + oldBits |= FAST_ALLOC; + } else { + oldBits &= ~FAST_ALLOC; + } + } + return oldBits; + } +#else + static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) { + return oldBits; + } +#endif + + void setBits(uintptr_t set) + { + uintptr_t oldBits; + uintptr_t newBits; + do { + oldBits = LoadExclusive(&bits); + newBits = updateFastAlloc(oldBits | set, set); + } while (!StoreReleaseExclusive(&bits, oldBits, newBits)); + } + + void clearBits(uintptr_t clear) + { + uintptr_t oldBits; + uintptr_t newBits; + do { + oldBits = LoadExclusive(&bits); + newBits = updateFastAlloc(oldBits & ~clear, clear); + } while (!StoreReleaseExclusive(&bits, oldBits, newBits)); + } + +public: + + class_rw_t* data() { + return (class_rw_t *)(bits & FAST_DATA_MASK); + } + void setData(class_rw_t *newData) + { + assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE))); + // Set during realization or construction only. No locking needed. + bits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData; + } + + bool hasDefaultRR() { + return getBit(FAST_HAS_DEFAULT_RR); + } + void setHasDefaultRR() { + setBits(FAST_HAS_DEFAULT_RR); + } + void setHasCustomRR() { + clearBits(FAST_HAS_DEFAULT_RR); + } + +#if FAST_HAS_DEFAULT_AWZ + bool hasDefaultAWZ() { + return getBit(FAST_HAS_DEFAULT_AWZ); + } + void setHasDefaultAWZ() { + setBits(FAST_HAS_DEFAULT_AWZ); + } + void setHasCustomAWZ() { + clearBits(FAST_HAS_DEFAULT_AWZ); + } +#else + bool hasDefaultAWZ() { + return data()->flags & RW_HAS_DEFAULT_AWZ; + } + void setHasDefaultAWZ() { + data()->setFlags(RW_HAS_DEFAULT_AWZ); + } + void setHasCustomAWZ() { + data()->clearFlags(RW_HAS_DEFAULT_AWZ); + } +#endif + +#if FAST_HAS_CXX_CTOR + bool hasCxxCtor() { + return getBit(FAST_HAS_CXX_CTOR); + } + void setHasCxxCtor() { + setBits(FAST_HAS_CXX_CTOR); + } +#else + bool hasCxxCtor() { + return data()->flags & RW_HAS_CXX_CTOR; + } + void setHasCxxCtor() { + data()->setFlags(RW_HAS_CXX_CTOR); + } +#endif + +#if FAST_HAS_CXX_DTOR + bool hasCxxDtor() { + return getBit(FAST_HAS_CXX_DTOR); + } + void setHasCxxDtor() { + setBits(FAST_HAS_CXX_DTOR); + } +#else + bool hasCxxDtor() { + return data()->flags & RW_HAS_CXX_DTOR; + } + void setHasCxxDtor() { + data()->setFlags(RW_HAS_CXX_DTOR); + } +#endif + +#if FAST_REQUIRES_RAW_ISA + bool requiresRawIsa() { + return getBit(FAST_REQUIRES_RAW_ISA); + } + void setRequiresRawIsa() { + setBits(FAST_REQUIRES_RAW_ISA); + } +#else +# if SUPPORT_NONPOINTER_ISA +# error oops +# endif + bool requiresRawIsa() { + return true; + } + void setRequiresRawIsa() { + // nothing + } +#endif + +#if FAST_ALLOC + size_t fastInstanceSize() + { + assert(bits & FAST_ALLOC); + return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16; + } + void setFastInstanceSize(size_t newSize) + { + // Set during realization or construction only. No locking needed. + assert(data()->flags & RW_REALIZING); + + // Round up to 16-byte boundary, then divide to get 16-byte units + newSize = ((newSize + 15) & ~15) / 16; + + uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT; + if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) { + int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT; + uintptr_t oldBits = (bits << shift) >> shift; + if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) { + newBits |= FAST_ALLOC; + } + bits = oldBits | newBits; + } + } + + bool canAllocFast() { + return bits & FAST_ALLOC; + } +#else + size_t fastInstanceSize() { + abort(); + } + void setFastInstanceSize(size_t) { + // nothing + } + bool canAllocFast() { + return false; + } +#endif + + bool isSwift() { + return getBit(FAST_IS_SWIFT); + } + + void setIsSwift() { + setBits(FAST_IS_SWIFT); + } }; + struct objc_class : objc_object { // Class ISA; Class superclass; - cache_t cache; - uintptr_t data_NEVER_USE; // class_rw_t * plus custom rr/alloc flags + cache_t cache; // formerly cache pointer and vtable + class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags class_rw_t *data() { - return (class_rw_t *)(data_NEVER_USE & ~CLASS_FAST_FLAG_MASK); + return bits.data(); } void setData(class_rw_t *newData) { - uintptr_t flags = (uintptr_t)data_NEVER_USE & CLASS_FAST_FLAG_MASK; - data_NEVER_USE = (uintptr_t)newData | flags; + bits.setData(newData); } void setInfo(uint32_t set) { assert(isFuture() || isRealized()); - OSAtomicOr32Barrier(set, (volatile uint32_t *)&data()->flags); + data()->setFlags(set); } void clearInfo(uint32_t clear) { assert(isFuture() || isRealized()); - OSAtomicXor32Barrier(clear, (volatile uint32_t *)&data()->flags); + data()->clearFlags(clear); } // set and clear must not overlap void changeInfo(uint32_t set, uint32_t clear) { assert(isFuture() || isRealized()); assert((set & clear) == 0); - - uint32_t oldf, newf; - do { - oldf = data()->flags; - newf = (oldf | set) & ~clear; - } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&data()->flags)); + data()->changeFlags(set, clear); } bool hasCustomRR() { -#if CLASS_FAST_FLAGS_VIA_RW_DATA - return data_NEVER_USE & CLASS_FAST_FLAG_HAS_CUSTOM_RR; -#else - return data()->flags & RW_HAS_CUSTOM_RR; -#endif + return ! bits.hasDefaultRR(); + } + void setHasDefaultRR() { + assert(isInitializing()); + bits.setHasDefaultRR(); } void setHasCustomRR(bool inherited = false); + void printCustomRR(bool inherited); bool hasCustomAWZ() { - return true; - // return data()->flags & RW_HAS_CUSTOM_AWZ; + return ! bits.hasDefaultAWZ(); + } + void setHasDefaultAWZ() { + assert(isInitializing()); + bits.setHasDefaultAWZ(); } void setHasCustomAWZ(bool inherited = false); + void printCustomAWZ(bool inherited); + + bool requiresRawIsa() { + return bits.requiresRawIsa(); + } + void setRequiresRawIsa(bool inherited = false); + void printRequiresRawIsa(bool inherited); + + bool canAllocIndexed() { + return !requiresRawIsa(); + } + bool canAllocFast() { + return bits.canAllocFast(); + } + bool hasCxxCtor() { // addSubclass() propagates this flag from the superclass. assert(isRealized()); - return data()->flags & RW_HAS_CXX_CTOR; + return bits.hasCxxCtor(); + } + void setHasCxxCtor() { + bits.setHasCxxCtor(); } bool hasCxxDtor() { // addSubclass() propagates this flag from the superclass. assert(isRealized()); - return data()->flags & RW_HAS_CXX_DTOR; + return bits.hasCxxDtor(); } + void setHasCxxDtor() { + bits.setHasCxxDtor(); + } + + bool isSwift() { + return bits.isSwift(); + } + + +#if SUPPORT_NONPOINTER_ISA + // Tracked in non-pointer isas; not tracked otherwise +#else bool instancesHaveAssociatedObjects() { // this may be an unrealized future class in the CF-bridged case assert(isFuture() || isRealized()); @@ -444,6 +786,7 @@ struct objc_class : objc_object { assert(isFuture() || isRealized()); setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS); } +#endif bool shouldGrowCache() { return true; @@ -477,11 +820,6 @@ struct objc_class : objc_object { return getMeta()->data()->flags & RW_INITIALIZED; } - // assumes this is a metaclass already - bool isInitialized_meta() { - return (data()->flags & RW_INITIALIZED); - } - void setInitialized(); bool isLoadable() { @@ -521,8 +859,7 @@ struct objc_class : objc_object { return ISA() == (Class)this; } - const char *getName() { return name(); } - const char *name() { + const char *mangledName() { // fixme can't assert locks here assert(this); @@ -532,6 +869,9 @@ struct objc_class : objc_object { return ((const class_ro_t *)data())->name; } } + + const char *demangledName(bool realize = false); + const char *nameForLogging(); // May be unaligned depending on class's ivars. uint32_t unalignedInstanceSize() { @@ -541,10 +881,45 @@ struct objc_class : objc_object { // Class's ivar size rounded up to a pointer-size boundary. uint32_t alignedInstanceSize() { - return (unalignedInstanceSize() + WORD_MASK) & ~WORD_MASK; + return word_align(unalignedInstanceSize()); + } + + size_t instanceSize(size_t extraBytes) { + size_t size = alignedInstanceSize() + extraBytes; + // CF requires all objects be at least 16 bytes. + if (size < 16) size = 16; + return size; + } + + void setInstanceSize(uint32_t newSize) { + assert(isRealized()); + if (newSize != data()->ro->instanceSize) { + assert(data()->flags & RW_COPIED_RO); + *const_cast(&data()->ro->instanceSize) = newSize; + } + bits.setFastInstanceSize(newSize); + } +}; + + +struct swift_class_t : objc_class { + uint32_t flags; + uint32_t instanceAddressOffset; + uint32_t instanceSize; + uint16_t instanceAlignMask; + uint16_t reserved; + + uint32_t classSize; + uint32_t classAddressOffset; + void *description; + // ... + + void *baseAddress() { + return (void *)((uint8_t *)this - classAddressOffset); } }; + struct category_t { const char *name; classref_t cls; @@ -567,27 +942,34 @@ struct message_ref_t { extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive); +static inline void +foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class)) +{ + // rwlock_assert_writing(&runtimeLock); + assert(top); + Class cls = top; + while (1) { + if (!code(cls)) break; + + if (cls->data()->firstSubclass) { + cls = cls->data()->firstSubclass; + } else { + while (!cls->data()->nextSiblingClass && cls != top) { + cls = cls->superclass; + } + if (cls == top) break; + cls = cls->data()->nextSiblingClass; + } + } +} -#define FOREACH_REALIZED_CLASS_AND_SUBCLASS(_c, _cls, code) \ - do { \ - rwlock_assert_writing(&runtimeLock); \ - assert(_cls); \ - Class _top = _cls; \ - Class _c = _top; \ - while (1) { \ - code \ - if (_c->data()->firstSubclass) { \ - _c = _c->data()->firstSubclass; \ - } else { \ - while (!_c->data()->nextSiblingClass && _c != _top) { \ - _c = _c->superclass; \ - } \ - if (_c == _top) break; \ - _c = _c->data()->nextSiblingClass; \ - } \ - } \ - } while (0) - +static inline void +foreach_realized_class_and_subclass(Class top, void (^code)(Class)) +{ + foreach_realized_class_and_subclass_2(top, ^bool(Class cls) { + code(cls); return true; + }); +} __END_DECLS diff --git a/runtime/objc-runtime-new.mm b/runtime/objc-runtime-new.mm index c7817a4..a3cc964 100644 --- a/runtime/objc-runtime-new.mm +++ b/runtime/objc-runtime-new.mm @@ -32,6 +32,7 @@ #include "objc-runtime-new.h" #include "objc-file.h" #include "objc-cache.h" +#include #include #include @@ -49,12 +50,18 @@ static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, BOOL repla static NXHashTable *realizedClasses(void); static bool isRRSelector(SEL sel); static bool isAWZSelector(SEL sel); +static bool methodListImplementsRR(const method_list_t *mlist); +static bool methodListImplementsAWZ(const method_list_t *mlist); static void updateCustomRR_AWZ(Class cls, method_t *meth); static method_t *search_method_list(const method_list_t *mlist, SEL sel); #if SUPPORT_FIXUP static void fixupMessageRef(message_ref_t *msg); #endif +static bool MetaclassNSObjectAWZSwizzled; +static bool ClassNSObjectRRSwizzled; + + id objc_noop_imp(id self, SEL _cmd __unused) { return self; } @@ -76,6 +83,34 @@ void lock_init(void) } +/*********************************************************************** +* Non-pointer isa decoding +**********************************************************************/ +#if SUPPORT_NONPOINTER_ISA + +const uintptr_t objc_debug_isa_class_mask = ISA_MASK; +const uintptr_t objc_debug_isa_magic_mask = ISA_MAGIC_MASK; +const uintptr_t objc_debug_isa_magic_value = ISA_MAGIC_VALUE; + +// die if masks overlap +STATIC_ASSERT((ISA_MASK & ISA_MAGIC_MASK) == 0); + +// die if magic is wrong +STATIC_ASSERT((~ISA_MAGIC_MASK & ISA_MAGIC_VALUE) == 0); + +// die if virtual address space bound goes up +STATIC_ASSERT((~ISA_MASK & MACH_VM_MAX_ADDRESS) == 0); + +#else + +// These variables exist but enforce pointer alignment only. +const uintptr_t objc_debug_isa_class_mask = (~WORD_MASK); +const uintptr_t objc_debug_isa_magic_mask = WORD_MASK; +const uintptr_t objc_debug_isa_magic_value = 0; + +#endif + + typedef struct { category_t *cat; BOOL fromBundle; @@ -88,16 +123,48 @@ typedef struct { #define FOREACH_METHOD_LIST(_mlist, _cls, code) \ do { \ + class_rw_t *_data = _cls->data(); \ const method_list_t *_mlist; \ - if (_cls->data()->method_lists) { \ - if (_cls->data()->flags & RW_METHOD_ARRAY) { \ + if (_data->method_lists) { \ + if (_data->flags & RW_METHOD_ARRAY) { \ method_list_t **_mlistp; \ - for (_mlistp=_cls->data()->method_lists; *_mlistp; _mlistp++){\ - _mlist = *_mlistp; \ + for (_mlistp=_data->method_lists; _mlistp[0]; _mlistp++){ \ + _mlist = _mlistp[0]; \ code \ } \ } else { \ - _mlist = _cls->data()->method_list; \ + _mlist = _data->method_list; \ + code \ + } \ + } \ + } while (0) + + +// As above, but skips the class's base method list. +#define FOREACH_CATEGORY_METHOD_LIST(_mlist, _cls, code) \ + do { \ + class_rw_t *_data = _cls->data(); \ + const method_list_t *_mlist; \ + if (_data->method_lists) { \ + if (_data->flags & RW_METHOD_ARRAY) { \ + if (_data->ro->baseMethods) { \ + /* has base methods: use all mlists except the last */ \ + method_list_t **_mlistp; \ + for (_mlistp=_data->method_lists; _mlistp[0] && _mlistp[1]; _mlistp++){ \ + _mlist = _mlistp[0]; \ + code \ + } \ + } else { \ + /* no base methods: use all mlists including the last */ \ + method_list_t **_mlistp; \ + for (_mlistp=_data->method_lists; _mlistp[0]; _mlistp++){ \ + _mlist = _mlistp[0]; \ + code \ + } \ + } \ + } else if (!_data->ro->baseMethods) { \ + /* no base methods: use all mlists including the last */ \ + _mlist = _data->method_list; \ code \ } \ } \ @@ -107,11 +174,14 @@ typedef struct { /* Low two bits of mlist->entsize is used as the fixed-up marker. PREOPTIMIZED VERSION: - Fixed-up method lists get entsize&3 == 3. - dyld shared cache sets this for method lists it preoptimizes. + Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted). + (Protocol method lists are not sorted because of their extra parallel data) + Runtime fixed-up method lists get 3. UN-PREOPTIMIZED VERSION: - Fixed-up method lists get entsize&3 == 1. - dyld shared cache uses 3, but those aren't trusted. + Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted) + Shared cache's sorting and uniquing are not trusted, but do affect the + location of the selector name string. + Runtime fixed-up method lists get 2. */ static uint32_t fixed_up_method_list = 3; @@ -119,19 +189,29 @@ static uint32_t fixed_up_method_list = 3; void disableSharedCacheOptimizations(void) { - fixed_up_method_list = 1; + fixed_up_method_list = 2; } -static BOOL isMethodListFixedUp(const method_list_t *mlist) +static bool +isMethodListFixedUp(const method_list_t *mlist) { return (mlist->entsize_NEVER_USE & 3) == fixed_up_method_list; } -static void setMethodListFixedUp(method_list_t *mlist) + +static const char *sel_cname(SEL sel) +{ + return (const char *)(void *)sel; +} + + +static void +setMethodListFixedUp(method_list_t *mlist) { rwlock_assert_writing(&runtimeLock); assert(!isMethodListFixedUp(mlist)); - mlist->entsize_NEVER_USE = (mlist->entsize_NEVER_USE & ~3) | fixed_up_method_list; + mlist->entsize_NEVER_USE = + (mlist->entsize_NEVER_USE & ~3) | fixed_up_method_list; } /* @@ -151,7 +231,7 @@ static size_t protocol_list_size(const protocol_list_t *plist) // low bit used by dyld shared cache static uint32_t method_list_entsize(const method_list_t *mlist) { - return mlist->entsize_NEVER_USE & ~(uint32_t)3; + return mlist->entsize_NEVER_USE & ~3; } static size_t method_list_size(const method_list_t *mlist) @@ -240,6 +320,58 @@ static void try_free(const void *p) } +static Class +alloc_class_for_subclass(Class supercls, size_t extraBytes) +{ + if (!supercls || !supercls->isSwift()) { + return _calloc_class(sizeof(objc_class) + extraBytes); + } + + // Superclass is a Swift class. New subclass must duplicate its extra bits. + + // Allocate the new class, with space for super's prefix and suffix + // and self's extraBytes. + swift_class_t *swiftSupercls = (swift_class_t *)supercls; + size_t superSize = swiftSupercls->classSize; + void *superBits = swiftSupercls->baseAddress(); + void *bits = _malloc_internal(superSize + extraBytes); + + // Copy all of the superclass's data to the new class. + memcpy(bits, superBits, superSize); + + // Erase the objc data and the Swift description in the new class. + swift_class_t *swcls = (swift_class_t *) + ((uint8_t *)bits + swiftSupercls->classAddressOffset); + bzero(swcls, sizeof(objc_class)); + swcls->description = nil; + + // Mark this class as Swift-enhanced. + swcls->bits.setIsSwift(); + + return (Class)swcls; +} + + +/*********************************************************************** +* object_getIndexedIvars. +**********************************************************************/ +void *object_getIndexedIvars(id obj) +{ + uint8_t *base = (uint8_t *)obj; + + if (!obj) return nil; + if (obj->isTaggedPointer()) return nil; + + if (!obj->isClass()) return base + obj->ISA()->alignedInstanceSize(); + + Class cls = (Class)obj; + if (!cls->isSwift()) return base + sizeof(objc_class); + + swift_class_t *swcls = (swift_class_t *)cls; + return base - swcls->classAddressOffset + word_align(swcls->classSize); +} + + /*********************************************************************** * make_ro_writeable * Reallocates rw->ro if necessary to make it writeable. @@ -389,7 +521,7 @@ static void printReplacements(Class cls, category_list *cats) uint32_t c2, m2; method_t *meth2 = nil; method_t *meth = cat_method_nth(cat, isMeta, m); - SEL s = sel_registerName((const char *)meth->name); + SEL s = sel_registerName(sel_cname(meth->name)); // Don't warn about GC-ignored selectors if (ignoreSelector(s)) continue; @@ -400,7 +532,7 @@ static void printReplacements(Class cls, category_list *cats) uint32_t cm2Count = cat_method_count(cat2, isMeta); for (m2 = 0; m2 < cm2Count; m2++) { meth2 = cat_method_nth(cat2, isMeta, m2); - SEL s2 = sel_registerName((const char *)meth2->name); + SEL s2 = sel_registerName(sel_cname(meth2->name)); if (s == s2) goto whine; } } @@ -409,7 +541,7 @@ static void printReplacements(Class cls, category_list *cats) FOREACH_METHOD_LIST(mlist, cls, { for (m2 = 0; m2 < mlist->count; m2++) { meth2 = method_list_nth(mlist, m2); - SEL s2 = sel_registerName((const char *)meth2->name); + SEL s2 = sel_registerName(sel_cname(meth2->name)); if (s == s2) goto whine; } }); @@ -419,7 +551,8 @@ static void printReplacements(Class cls, category_list *cats) whine: // Found an override. - logReplacedMethod(cls->name(), s, cls->isMetaClass(), cat->name, + logReplacedMethod(cls->nameForLogging(), s, + cls->isMetaClass(), cat->name, _method_getImplementation(meth2), _method_getImplementation(meth)); } @@ -444,19 +577,22 @@ fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort) // fixme lock less in attachMethodLists ? sel_lock(); - + // Unique selectors in list. uint32_t m; for (m = 0; m < mlist->count; m++) { method_t *meth = method_list_nth(mlist, m); - SEL sel = sel_registerNameNoLock((const char *)meth->name, bundleCopy); + + const char *name = sel_cname(meth->name); + + SEL sel = sel_registerNameNoLock(name, bundleCopy); meth->name = sel; - + if (ignoreSelector(sel)) { meth->imp = (IMP)&_objc_ignored_method; } } - + sel_unlock(); // Sort by selector address. @@ -483,31 +619,12 @@ attachMethodLists(Class cls, method_list_t **addedLists, int addedCount, bool scanForCustomRR = !UseGC && !cls->hasCustomRR(); bool scanForCustomAWZ = !UseGC && !cls->hasCustomAWZ(); - // RR special cases: - // GC is custom RR. - // NSObject's base instance methods are not custom RR. - // All other root classes are custom RR. - // updateCustomRR_AWZ also knows about these cases. - if (UseGC) { - cls->setHasCustomRR(); - scanForCustomRR = false; - } - if (baseMethods && scanForCustomRR && cls->isRootClass()) { - if (cls != classNSObject()) { - cls->setHasCustomRR(); - } - scanForCustomRR = false; - } - - // AWZ special cases: - // NSObject's base class methods are not custom AWZ. - // All other root metaclasses are custom AWZ. - // updateCustomRR_AWZ also knows about these cases. - if (baseMethods && scanForCustomAWZ && cls->isRootMetaclass()) { - if (cls != classNSObject()->ISA()) { - cls->setHasCustomAWZ(); - } - scanForCustomAWZ = false; + // There exist RR/AWZ special cases for some class's base methods. + // But this code should never need to scan base methods for RR/AWZ: + // default RR/AWZ cannot be set before setInitialized(). + // Therefore we need not handle any special cases here. + if (baseMethods) { + assert(!scanForCustomRR && !scanForCustomAWZ); } // Method list array is nil-terminated. @@ -557,18 +674,13 @@ attachMethodLists(Class cls, method_list_t **addedLists, int addedCount, } // Scan for method implementations tracked by the class's flags - for (uint32_t m = 0; - (scanForCustomRR || scanForCustomAWZ) && m < mlist->count; - m++) - { - SEL sel = method_list_nth(mlist, m)->name; - if (scanForCustomRR && isRRSelector(sel)) { - cls->setHasCustomRR(); - scanForCustomRR = false; - } else if (scanForCustomAWZ && isAWZSelector(sel)) { - cls->setHasCustomAWZ(); - scanForCustomAWZ = false; - } + if (scanForCustomRR && methodListImplementsRR(mlist)) { + cls->setHasCustomRR(); + scanForCustomRR = false; + } + if (scanForCustomAWZ && methodListImplementsAWZ(mlist)) { + cls->setHasCustomAWZ(); + scanForCustomAWZ = false; } // Update method caches @@ -625,7 +737,6 @@ attachCategoryMethods(Class cls, category_list *cats, bool flushCaches) attachMethodLists(cls, mlists, mcount, NO, fromBundle, flushCaches); _free_internal(mlists); - } @@ -764,7 +875,7 @@ static void methodizeClass(Class cls) // Methodizing for the first time if (PrintConnecting) { _objc_inform("CLASS: methodizing class '%s' %s", - cls->name(), isMeta ? "(meta)" : ""); + cls->nameForLogging(), isMeta ? "(meta)" : ""); } // Build method and protocol and property lists. @@ -800,7 +911,7 @@ static void methodizeClass(Class cls) for (i = 0; i < cats->count; i++) { _objc_inform("CLASS: attached category %c%s(%s)", isMeta ? '+' : '-', - cls->name(), cats->list[i].cat->name); + cls->nameForLogging(), cats->list[i].cat->name); } } } @@ -815,7 +926,7 @@ static void methodizeClass(Class cls) for ( ; iter != end; ++iter) { if (PrintConnecting) { _objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-', - cls->name(), sel_getName(iter->name)); + cls->nameForLogging(), sel_getName(iter->name)); } assert(ignoreSelector(iter->name) || sel_registerName(sel_getName(iter->name))==iter->name); } @@ -847,7 +958,7 @@ static void remethodizeClass(Class cls) if (PrintConnecting) { _objc_inform("CLASS: attaching categories to class '%s' %s", - cls->name(), isMeta ? "(meta)" : ""); + cls->nameForLogging(), isMeta ? "(meta)" : ""); } // Update methods, properties, protocols @@ -871,9 +982,180 @@ static void remethodizeClass(Class cls) } +/*********************************************************************** +* nonMetaClasses +* Returns the secondary metaclass => class map +* Used for some cases of +initialize and +resolveClassMethod:. +* This map does not contain all class and metaclass pairs. It only +* contains metaclasses whose classes would be in the runtime-allocated +* named-class table, but are not because some other class with the same name +* is in that table. +* Classes with no duplicates are not included. +* Classes in the preoptimized named-class table are not included. +* Classes whose duplicates are in the preoptimized table are not included. +* Most code should use getNonMetaClass() instead of reading this table. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static NXMapTable *nonmeta_class_map = nil; +static NXMapTable *nonMetaClasses(void) +{ + rwlock_assert_locked(&runtimeLock); + + if (nonmeta_class_map) return nonmeta_class_map; + + // nonmeta_class_map is typically small + INIT_ONCE_PTR(nonmeta_class_map, + NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32, + _objc_internal_zone()), + NXFreeMapTable(v)); + + return nonmeta_class_map; +} + + +/*********************************************************************** +* addNonMetaClass +* Adds metacls => cls to the secondary metaclass map +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void addNonMetaClass(Class cls) +{ + rwlock_assert_writing(&runtimeLock); + void *old; + old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls); + + assert(!cls->isMetaClass()); + assert(cls->ISA()->isMetaClass()); + assert(!old); +} + + +static void removeNonMetaClass(Class cls) +{ + rwlock_assert_writing(&runtimeLock); + NXMapRemove(nonMetaClasses(), cls->ISA()); +} + + +static bool scanMangledField(const char *&string, const char *end, + const char *&field, int& length) +{ + // Leading zero not allowed. + if (*string == '0') return false; + + length = 0; + field = string; + while (field < end) { + char c = *field; + if (!isdigit(c)) break; + field++; + if (__builtin_smul_overflow(length, 10, &length)) return false; + if (__builtin_sadd_overflow(length, c - '0', &length)) return false; + } + + string = field + length; + return length > 0 && string <= end; +} + + +/*********************************************************************** +* copySwiftV1DemangledName +* Returns the pretty form of the given Swift-v1-mangled class or protocol name. +* Returns nil if the string doesn't look like a mangled Swift v1 name. +* The result must be freed with free(). +**********************************************************************/ +static char *copySwiftV1DemangledName(const char *string, bool isProtocol = false) +{ + if (!string) return nil; + + // Swift mangling prefix. + if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nil; + string += 4; + + const char *end = string + strlen(string); + + // Module name. + const char *prefix; + int prefixLength; + if (strncmp(string, "Ss", 2) == 0) { + prefix = "Swift"; + prefixLength = 5; + string += 2; + } else { + if (! scanMangledField(string, end, prefix, prefixLength)) return nil; + } + + // Class or protocol name. + const char *suffix; + int suffixLength; + if (! scanMangledField(string, end, suffix, suffixLength)) return nil; + + if (isProtocol) { + // Remainder must be "_". + if (strcmp(string, "_") != 0) return nil; + } else { + // Remainder must be empty. + if (string != end) return nil; + } + + char *result; + asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix); + return result; +} + + +/*********************************************************************** +* copySwiftV1MangledName +* Returns the Swift 1.0 mangled form of the given class or protocol name. +* Returns nil if the string doesn't look like an unmangled Swift name. +* The result must be freed with free(). +**********************************************************************/ +static char *copySwiftV1MangledName(const char *string, bool isProtocol = false) +{ + if (!string) return nil; + + size_t dotCount = 0; + size_t dotIndex; + const char *s; + for (s = string; *s; s++) { + if (*s == '.') { + dotCount++; + dotIndex = s - string; + } + } + size_t stringLength = s - string; + + if (dotCount != 1 || dotIndex == 0 || dotIndex >= stringLength-1) { + return nil; + } + + const char *prefix = string; + size_t prefixLength = dotIndex; + const char *suffix = string + dotIndex + 1; + size_t suffixLength = stringLength - (dotIndex + 1); + + char *name; + + if (strncmp(prefix, "Swift", prefixLength) == 0) { + asprintf(&name, "_Tt%cSs%zu%.*s%s", + isProtocol ? 'P' : 'C', + suffixLength, (int)suffixLength, suffix, + isProtocol ? "_" : ""); + } else { + asprintf(&name, "_Tt%c%zu%.*s%zu%.*s%s", + isProtocol ? 'P' : 'C', + prefixLength, (int)prefixLength, prefix, + suffixLength, (int)suffixLength, suffix, + isProtocol ? "_" : ""); + } + return name; +} + + /*********************************************************************** * getClass * Looks up a class by name. The class MIGHT NOT be realized. +* Demangled Swift names are recognized. * Locking: runtimeLock must be read- or write-locked by the caller. **********************************************************************/ @@ -881,7 +1163,7 @@ static void remethodizeClass(Class cls) // named classes not in the dyld shared cache, whether realized or not. NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h -static Class getClass(const char *name) +static Class getClass_impl(const char *name) { rwlock_assert_locked(&runtimeLock); @@ -896,6 +1178,24 @@ static Class getClass(const char *name) return getPreoptimizedClass(name); } +static Class getClass(const char *name) +{ + rwlock_assert_locked(&runtimeLock); + + // Try name as-is + Class result = getClass_impl(name); + if (result) return result; + + // Try Swift-mangled equivalent of the given name. + if (char *swName = copySwiftV1MangledName(name)) { + result = getClass_impl(swName); + free(swName); + return result; + } + + return nil; +} + /*********************************************************************** * addNamedClass @@ -909,6 +1209,10 @@ static void addNamedClass(Class cls, const char *name) Class old; if ((old = getClass(name))) { inform_duplicate(name, old, cls); + + // getNonMetaClass uses name lookups. Classes not found by name + // lookup must be in the secondary meta->nonmeta table. + addNonMetaClass(cls); } else { NXMapInsert(gdb_objc_realized_classes, name, cls); } @@ -932,6 +1236,8 @@ static void removeNamedClass(Class cls, const char *name) NXMapRemove(gdb_objc_realized_classes, name); } else { // cls has a name collision with another class - don't remove the other + // but do remove cls from the secondary metaclass->class map. + removeNonMetaClass(cls); } } @@ -1147,7 +1453,7 @@ static void addRemappedClass(Class oldcls, Class newcls) if (PrintFuture) { _objc_inform("FUTURE: using %p instead of %p for %s", - (void*)oldcls, (void*)newcls, oldcls->name()); + (void*)oldcls, (void*)newcls, oldcls->nameForLogging()); } void *old; @@ -1206,58 +1512,6 @@ static void remapClassRef(Class *clsref) } -/*********************************************************************** -* nonMetaClasses -* Returns the memoized metaclass => class map -* Used for some cases of +initialize. -* This map does not contain all classes and metaclasses. It only -* contains memoized results from the slow path in getNonMetaClass(), -* and classes that the slow path can't find (like objc_registerClassPair). -* Locking: runtimeLock must be read- or write-locked by the caller -**********************************************************************/ -static NXMapTable *nonmeta_class_map = nil; -static NXMapTable *nonMetaClasses(void) -{ - rwlock_assert_locked(&runtimeLock); - - if (nonmeta_class_map) return nonmeta_class_map; - - // nonmeta_class_map is typically small - INIT_ONCE_PTR(nonmeta_class_map, - NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32, - _objc_internal_zone()), - NXFreeMapTable(v)); - - return nonmeta_class_map; -} - - -/*********************************************************************** -* addNonMetaClass -* Adds metacls => cls to the memoized metaclass map -* Locking: runtimeLock must be held by the caller -**********************************************************************/ -static void addNonMetaClass(Class cls) -{ - rwlock_assert_writing(&runtimeLock); - void *old; - old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls); - - assert(cls->isRealized()); - assert(cls->ISA()->isRealized()); - assert(!cls->isMetaClass()); - assert(cls->ISA()->isMetaClass()); - assert(!old); -} - - -static void removeNonMetaClass(Class cls) -{ - rwlock_assert_writing(&runtimeLock); - NXMapRemove(nonMetaClasses(), cls->ISA()); -} - - /*********************************************************************** * getNonMetaClass * Return the ordinary class for this class or metaclass. @@ -1268,7 +1522,7 @@ static void removeNonMetaClass(Class cls) **********************************************************************/ static Class getNonMetaClass(Class metacls, id inst) { - static int total, slow, memo; + static int total, named, secondary, sharedcache; rwlock_assert_locked(&runtimeLock); realizeClass(metacls); @@ -1311,45 +1565,69 @@ static Class getNonMetaClass(Class metacls, id inst) #endif } - // try memoized table - Class cls = (Class)NXMapGet(nonMetaClasses(), metacls); - if (cls) { - memo++; - if (PrintInitializing) { - _objc_inform("INITIALIZE: %d/%d (%g%%) memoized metaclass lookups", - memo, total, memo*100.0/total); - } + // try name lookup + { + Class cls = getClass(metacls->mangledName()); + if (cls->ISA() == metacls) { + named++; + if (PrintInitializing) { + _objc_inform("INITIALIZE: %d/%d (%g%%) " + "successful by-name metaclass lookups", + named, total, named*100.0/total); + } - assert(cls->isRealized()); - assert(!cls->isMetaClass()); - assert(cls->ISA() == metacls); - return cls; + realizeClass(cls); + return cls; + } } - // try slow lookup - slow++; - if (PrintInitializing) { - _objc_inform("INITIALIZE: %d/%d (%g%%) slow metaclass lookups", - slow, total, slow*100.0/total); + // try secondary table + { + Class cls = (Class)NXMapGet(nonMetaClasses(), metacls); + if (cls) { + secondary++; + if (PrintInitializing) { + _objc_inform("INITIALIZE: %d/%d (%g%%) " + "successful secondary metaclass lookups", + secondary, total, secondary*100.0/total); + } + + assert(cls->ISA() == metacls); + realizeClass(cls); + return cls; + } } - for (header_info *hi = FirstHeader; hi; hi = hi->next) { - size_t count; - classref_t *classlist = _getObjc2ClassList(hi, &count); - for (size_t i = 0; i < count; i++) { - cls = remapClass(classlist[i]); - if (cls && cls->ISA() == metacls) { - // memoize result - realizeClass(cls); - addNonMetaClass(cls); - return cls; + // try any duplicates in the dyld shared cache + { + Class cls = nil; + + int count; + Class *classes = copyPreoptimizedClasses(metacls->mangledName(),&count); + if (classes) { + for (int i = 0; i < count; i++) { + if (classes[i]->ISA() == metacls) { + cls = classes[i]; + break; + } } + free(classes); + } + + if (cls) { + sharedcache++; + if (PrintInitializing) { + _objc_inform("INITIALIZE: %d/%d (%g%%) " + "successful shared cache metaclass lookups", + sharedcache, total, sharedcache*100.0/total); + } + + realizeClass(cls); + return cls; } } _objc_fatal("no class for metaclass %p", (void*)metacls); - - return cls; } @@ -1385,12 +1663,12 @@ static void addSubclass(Class supercls, Class subcls) subcls->data()->nextSiblingClass = supercls->data()->firstSubclass; supercls->data()->firstSubclass = subcls; - if (supercls->data()->flags & RW_HAS_CXX_CTOR) { - subcls->data()->flags |= RW_HAS_CXX_CTOR; + if (supercls->hasCxxCtor()) { + subcls->setHasCxxCtor(); } - if (supercls->data()->flags & RW_HAS_CXX_DTOR) { - subcls->data()->flags |= RW_HAS_CXX_DTOR; + if (supercls->hasCxxDtor()) { + subcls->setHasCxxDtor(); } if (supercls->hasCustomRR()) { @@ -1400,6 +1678,10 @@ static void addSubclass(Class supercls, Class subcls) if (supercls->hasCustomAWZ()) { subcls->setHasCustomAWZ(true); } + + if (supercls->requiresRawIsa()) { + subcls->setRequiresRawIsa(true); + } } } @@ -1447,6 +1729,37 @@ static NXMapTable *protocols(void) } +/*********************************************************************** +* getProtocol +* Looks up a protocol by name. Demangled Swift names are recognized. +* Locking: runtimeLock must be read- or write-locked by the caller. +**********************************************************************/ +static Protocol *getProtocol_impl(const char *name) +{ + rwlock_assert_locked(&runtimeLock); + + return (Protocol *)NXMapGet(protocols(), name); +} + +static Protocol *getProtocol(const char *name) +{ + rwlock_assert_locked(&runtimeLock); + + // Try name as-is. + Protocol *result = getProtocol_impl(name); + if (result) return result; + + // Try Swift-mangled equivalent of the given name. + if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) { + result = getProtocol_impl(swName); + free(swName); + return result; + } + + return nil; +} + + /*********************************************************************** * remapProtocol * Returns the live protocol pointer for proto, which may be pointing to @@ -1458,7 +1771,7 @@ static protocol_t *remapProtocol(protocol_ref_t proto) rwlock_assert_locked(&runtimeLock); protocol_t *newproto = (protocol_t *) - NXMapGet(protocols(), ((protocol_t *)proto)->name); + getProtocol(((protocol_t *)proto)->mangledName); return newproto ? newproto : (protocol_t *)proto; } @@ -1582,14 +1895,27 @@ static ivar_t *getIvar(Class cls, const char *name) } -static void reconcileInstanceVariables(Class cls, Class supercls) +static void reconcileInstanceVariables(Class cls, Class supercls, const class_ro_t*& ro) { class_rw_t *rw = cls->data(); - const class_ro_t *ro = rw->ro; assert(supercls); assert(!cls->isMetaClass()); + /* debug: print them all before sliding + if (ro->ivars) { + uint32_t i; + for (i = 0; i < ro->ivars->count; i++) { + ivar_t *ivar = ivar_list_nth(ro->ivars, i); + if (!ivar->offset) continue; // anonymous bitfield + + _objc_inform("IVARS: %s.%s (offset %u, size %u, align %u)", + ro->name, ivar->name, + *ivar->offset, ivar->size, ivar->alignment()); + } + } + */ + // Non-fragile ivars - reconcile this class with its superclass layout_bitmap ivarBitmap; layout_bitmap weakBitmap; @@ -1609,10 +1935,11 @@ static void reconcileInstanceVariables(Class cls, Class supercls) // __CF* classes, NSConstantString, NSSimpleCString // (already know it's not root because supercls != nil) - if (!strstr(cls->name(), "NSCF") && - 0 != strncmp(cls->name(), "__CF", 4) && - 0 != strcmp(cls->name(), "NSConstantString") && - 0 != strcmp(cls->name(), "NSSimpleCString")) + const char *clsname = cls->mangledName(); + if (!strstr(clsname, "NSCF") && + 0 != strncmp(clsname, "__CF", 4) && + 0 != strcmp(clsname, "NSConstantString") && + 0 != strcmp(clsname, "NSSimpleCString")) { uint32_t oldStart = ro->instanceStart; uint32_t oldSize = ro->instanceSize; @@ -1639,7 +1966,7 @@ static void reconcileInstanceVariables(Class cls, Class supercls) if (PrintIvars) { _objc_inform("IVARS: DEBUG: forcing ivars for class '%s' " "to slide (instanceStart %zu -> %zu)", - cls->name(), (size_t)oldStart, + cls->nameForLogging(), (size_t)oldStart, (size_t)ro->instanceStart); } @@ -1692,7 +2019,7 @@ static void reconcileInstanceVariables(Class cls, Class supercls) // how long it is. Force a new layout to be created. if (PrintIvars) { _objc_inform("IVARS: instanceStart/Size==0 for class %s; " - "disregarding ivar layout", ro->name); + "disregarding ivar layout", cls->nameForLogging()); } ivarBitmap = layout_bitmap_create_empty(super_ro->instanceSize, NO); weakBitmap = layout_bitmap_create_empty(super_ro->instanceSize, YES); @@ -1718,7 +2045,7 @@ static void reconcileInstanceVariables(Class cls, Class supercls) if (PrintIvars) { _objc_inform("IVARS: sliding ivars for class %s " "(superclass was %u bytes, now %u)", - ro->name, ro->instanceStart, + cls->nameForLogging(), ro->instanceStart, super_ro->instanceSize); } class_ro_t *ro_w = make_ro_writeable(rw); @@ -1753,7 +2080,8 @@ static void reconcileInstanceVariables(Class cls, Class supercls) // Rebuild layout strings if necessary. if (layoutsChanged) { if (PrintIvars) { - _objc_inform("IVARS: gc layout changed for class %s",ro->name); + _objc_inform("IVARS: gc layout changed for class %s", + cls->nameForLogging()); } class_ro_t *ro_w = make_ro_writeable(rw); ro = rw->ro; @@ -1770,6 +2098,7 @@ static void reconcileInstanceVariables(Class cls, Class supercls) } } + /*********************************************************************** * realizeClass * Performs first-time initialization on class cls, @@ -1791,17 +2120,19 @@ static Class realizeClass(Class cls) if (cls->isRealized()) return cls; assert(cls == remapClass(cls)); + // fixme verify class is not in an un-dlopened part of the shared cache? + ro = (const class_ro_t *)cls->data(); if (ro->flags & RO_FUTURE) { // This was a future class. rw data is already allocated. rw = cls->data(); ro = cls->data()->ro; - cls->changeInfo(RW_REALIZED, RW_FUTURE); + cls->changeInfo(RW_REALIZED|RW_REALIZING, RW_FUTURE); } else { // Normal class. Allocate writeable class data. rw = (class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1); rw->ro = ro; - rw->flags = RW_REALIZED; + rw->flags = RW_REALIZED|RW_REALIZING; cls->setData(rw); } @@ -1811,7 +2142,8 @@ static Class realizeClass(Class cls) if (PrintConnecting) { _objc_inform("CLASS: realizing class '%s' %s %p %p", - ro->name, isMeta ? "(meta)" : "", (void*)cls, ro); + cls->nameForLogging(), isMeta ? "(meta)" : "", + (void*)cls, ro); } // Realize superclass and metaclass, if they aren't already. @@ -1819,39 +2151,49 @@ static Class realizeClass(Class cls) supercls = realizeClass(remapClass(cls->superclass)); metacls = realizeClass(remapClass(cls->ISA())); - // Check for remapped superclass and metaclass - if (supercls != cls->superclass) { - cls->superclass = supercls; - } - if (metacls != cls->ISA()) { - cls->changeIsa(metacls); - } - - /* debug: print them all - if (ro->ivars) { - uint32_t i; - for (i = 0; i < ro->ivars->count; i++) { - ivar_t *ivar = ivar_list_nth(ro->ivars, i); - if (!ivar->offset) continue; // anonymous bitfield - - _objc_inform("IVARS: %s.%s (offset %u, size %u, align %u)", - ro->name, ivar->name, - *ivar->offset, ivar->size, ivar->alignment()); - } - } - */ + // Update superclass and metaclass in case of remapping + cls->superclass = supercls; + cls->initClassIsa(metacls); // Reconcile instance variable offsets / layout. - if (supercls && !isMeta) reconcileInstanceVariables(cls, supercls); + // This may reallocate class_ro_t, updating our ro variable. + if (supercls && !isMeta) reconcileInstanceVariables(cls, supercls, ro); + + // Set fastInstanceSize if it wasn't set already. + cls->setInstanceSize(ro->instanceSize); // Copy some flags from ro to rw if (ro->flags & RO_HAS_CXX_STRUCTORS) { - rw->flags |= RW_HAS_CXX_DTOR; + cls->setHasCxxDtor(); if (! (ro->flags & RO_HAS_CXX_DTOR_ONLY)) { - rw->flags |= RW_HAS_CXX_CTOR; + cls->setHasCxxCtor(); } } + // Disable non-pointer isa for some classes and/or platforms. +#if SUPPORT_NONPOINTER_ISA + { + bool disable = false; + static bool hackedDispatch = false; + + if (DisableIndexedIsa) { + // Non-pointer isa disabled by environment or GC or app SDK version + disable = true; + } + else if (!hackedDispatch && !(ro->flags & RO_META) && + 0 == strcmp(ro->name, "OS_object")) + { + // hack for libdispatch et al - isa also acts as vtable pointer + hackedDispatch = true; + disable = true; + } + + if (disable) { + cls->setRequiresRawIsa(false/*inherited*/); + } + } +#endif + // Connect this class to its superclass's subclass lists if (supercls) { addSubclass(supercls, cls); @@ -1962,6 +2304,36 @@ Class _objc_allocateFutureClass(const char *name) } +/*********************************************************************** +* objc_getFutureClass. Return the id of the named class. +* If the class does not exist, return an uninitialized class +* structure that will be used for the class when and if it +* does get loaded. +* Not thread safe. +**********************************************************************/ +Class objc_getFutureClass(const char *name) +{ + Class cls; + + // YES unconnected, NO class handler + // (unconnected is OK because it will someday be the real class) + cls = look_up_class(name, YES, NO); + if (cls) { + if (PrintFuture) { + _objc_inform("FUTURE: found %p already in use for %s", + (void*)cls, name); + } + + return cls; + } + + // No class or future class with that name yet. Make one. + // fixme not thread-safe with respect to + // simultaneous library load or getFutureClass. + return _objc_allocateFutureClass(name); +} + + /*********************************************************************** * **********************************************************************/ @@ -1991,14 +2363,14 @@ static void flushCaches(Class cls) mutex_lock(&cacheUpdateLock); if (cls) { - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, { + foreach_realized_class_and_subclass(cls, ^(Class c){ cache_erase_nolock(&c->cache); }); if (!cls->superclass) { // root; metaclasses are subclasses and were flushed above } else { - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls->ISA(), { + foreach_realized_class_and_subclass(cls->ISA(), ^(Class c){ cache_erase_nolock(&c->cache); }); } @@ -2028,7 +2400,7 @@ static void flushImps(Class cls, SEL sel1, IMP imp1, SEL sel2, IMP imp2) mutex_lock(&cacheUpdateLock); if (cls) { - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, { + foreach_realized_class_and_subclass(cls, ^(Class c){ cache_eraseImp_nolock(c, sel1, imp1); if (sel2) cache_eraseImp_nolock(c, sel2, imp2); }); @@ -2036,7 +2408,7 @@ static void flushImps(Class cls, SEL sel1, IMP imp1, SEL sel2, IMP imp2) if (!cls->superclass) { // root; metaclasses are subclasses and were flushed above } else { - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls->ISA(), { + foreach_realized_class_and_subclass(cls->ISA(), ^(Class c){ cache_eraseImp_nolock(c, sel1, imp1); if (sel2) cache_eraseImp_nolock(c, sel2, imp2); }); @@ -2150,6 +2522,119 @@ unmap_image(const struct mach_header *mh, intptr_t vmaddr_slide) + +/*********************************************************************** +* readClass +* Read a class and metaclass as written by a compiler. +* Returns the new class pointer. This could be: +* - cls +* - nil (cls has a missing weak-linked superclass) +* - something else (space for this class was reserved by a future class) +* +* Locking: runtimeLock acquired by map_images or objc_readClassPair +**********************************************************************/ +static unsigned int PreoptTotalMethodLists; +static unsigned int PreoptOptimizedMethodLists; +static unsigned int PreoptTotalClasses; +static unsigned int PreoptOptimizedClasses; + +Class readClass(Class cls, bool headerIsBundle, bool headerInSharedCache) +{ + const char *mangledName = cls->mangledName(); + + if (missingWeakSuperclass(cls)) { + // No superclass (probably weak-linked). + // Disavow any knowledge of this subclass. + if (PrintConnecting) { + _objc_inform("CLASS: IGNORING class '%s' with " + "missing weak-linked superclass", + cls->nameForLogging()); + } + addRemappedClass(cls, nil); + cls->superclass = nil; + return nil; + } + + // Note: Class __ARCLite__'s hack does not go through here. + // Class structure fixups that apply to it also need to be + // performed in non-lazy realization below. + + // These fields should be set to zero because of the + // binding of _objc_empty_vtable, but OS X 10.8's dyld + // does not bind shared cache absolute symbols as expected. + // This (and the __ARCLite__ hack below) can be removed + // once the simulator drops 10.8 support. +#if TARGET_IPHONE_SIMULATOR + if (cls->cache._mask) cls->cache._mask = 0; + if (cls->cache._occupied) cls->cache._occupied = 0; + if (cls->ISA()->cache._mask) cls->ISA()->cache._mask = 0; + if (cls->ISA()->cache._occupied) cls->ISA()->cache._occupied = 0; +#endif + + NXMapTable *future_named_class_map = futureNamedClasses(); + + if (NXCountMapTable(future_named_class_map) > 0) { + Class newCls = nil; + newCls = (Class)NXMapGet(future_named_class_map, mangledName); + removeFutureNamedClass(mangledName); + + if (newCls) { + // Copy objc_class to future class's struct. + // Preserve future's rw data block. + + if (newCls->isSwift()) { + _objc_fatal("Can't complete future class request for '%s' " + "because the real class is too big.", + cls->nameForLogging()); + } + + class_rw_t *rw = newCls->data(); + const class_ro_t *old_ro = rw->ro; + memcpy(newCls, cls, sizeof(objc_class)); + rw->ro = (class_ro_t *)newCls->data(); + newCls->setData(rw); + _free_internal((void *)old_ro->name); + _free_internal((void *)old_ro); + + addRemappedClass(cls, newCls); + + cls = newCls; + } + } + + PreoptTotalClasses++; + if (headerInSharedCache && isPreoptimized()) { + // class list built in shared cache + // fixme strict assert doesn't work because of duplicates + // assert(cls == getClass(name)); + assert(getClass(mangledName)); + PreoptOptimizedClasses++; + } else { + addNamedClass(cls, mangledName); + } + + // for future reference: shared cache never contains MH_BUNDLEs + if (headerIsBundle) { + cls->data()->flags |= RO_FROM_BUNDLE; + cls->ISA()->data()->flags |= RO_FROM_BUNDLE; + } + + if (PrintPreopt) { + const method_list_t *mlist; + if ((mlist = ((class_ro_t *)cls->data())->baseMethods)) { + PreoptTotalMethodLists++; + if (isMethodListFixedUp(mlist)) PreoptOptimizedMethodLists++; + } + if ((mlist = ((class_ro_t *)cls->ISA()->data())->baseMethods)) { + PreoptTotalMethodLists++; + if (isMethodListFixedUp(mlist)) PreoptOptimizedMethodLists++; + } + } + + return cls; +} + + /*********************************************************************** * _read_images * Perform initial processing of the headers in the linked @@ -2167,10 +2652,6 @@ void _read_images(header_info **hList, uint32_t hCount) size_t i; Class *resolvedFutureClasses = nil; size_t resolvedFutureClassCount = 0; - static unsigned int totalMethodLists; - static unsigned int preoptimizedMethodLists; - static unsigned int totalClasses; - static unsigned int preoptimizedClasses; static BOOL doneOnce; rwlock_assert_writing(&runtimeLock); @@ -2183,6 +2664,33 @@ void _read_images(header_info **hList, uint32_t hCount) if (!doneOnce) { doneOnce = YES; +#if SUPPORT_NONPOINTER_ISA + +# if TARGET_OS_MAC && !TARGET_OS_IPHONE + // Disable non-pointer isa if the app is too old. + if (AppSDKVersion < INSERT VERSION HERE) { + DisableIndexedIsa = true; + if (PrintRawIsa) { + _objc_inform("RAW ISA: disabling non-pointer isa because " + "the app is too old (SDK version %hu.%hhu.%hhu)", + (unsigned short)(AppSDKVersion>>16), + (unsigned char)(AppSDKVersion>>8), + (unsigned char)(AppSDKVersion)); + } + } +# endif + + // Disable non-pointer isa for all GC apps. + if (UseGC) { + DisableIndexedIsa = true; + if (PrintRawIsa) { + _objc_inform("RAW ISA: disabling non-pointer isa because " + "the app is GC"); + } + } + +#endif + if (DisableTaggedPointers) { disableTaggedPointers(); } @@ -2221,7 +2729,6 @@ void _read_images(header_info **hList, uint32_t hCount) // Discover classes. Fix up unresolved future classes. Mark bundle classes. - NXMapTable *future_named_class_map = futureNamedClasses(); for (EACH_HEADER) { bool headerIsBundle = (hi->mhdr->filetype == MH_BUNDLE); @@ -2230,44 +2737,11 @@ void _read_images(header_info **hList, uint32_t hCount) classref_t *classlist = _getObjc2ClassList(hi, &count); for (i = 0; i < count; i++) { Class cls = (Class)classlist[i]; - const char *name = cls->name(); - - if (missingWeakSuperclass(cls)) { - // No superclass (probably weak-linked). - // Disavow any knowledge of this subclass. - if (PrintConnecting) { - _objc_inform("CLASS: IGNORING class '%s' with " - "missing weak-linked superclass", name); - } - addRemappedClass(cls, nil); - cls->superclass = nil; - continue; - } - - // Note: Class __ARCLite__'s hack does not go through here. - // Class structure fixups that apply to it also need to be - // performed in non-lazy realization below. - - Class newCls = nil; - if (NXCountMapTable(future_named_class_map) > 0) { - newCls = (Class)NXMapGet(future_named_class_map, name); - removeFutureNamedClass(name); - } - - if (newCls) { - // Copy objc_class to future class's struct. - // Preserve future's rw data block. - class_rw_t *rw = newCls->data(); - const class_ro_t *old_ro = rw->ro; - memcpy(newCls, cls, sizeof(objc_class)); - rw->ro = (class_ro_t *)newCls->data(); - newCls->setData(rw); - _free_internal((void *)old_ro->name); - _free_internal((void *)old_ro); - - addRemappedClass(cls, newCls); - cls = newCls; + Class newCls = readClass(cls, headerIsBundle, headerInSharedCache); + if (newCls != cls && newCls) { + // Class was moved but not deleted. Currently this occurs + // only when the new class resolved a future class. // Non-lazily realize the class below. resolvedFutureClasses = (Class *) _realloc_internal(resolvedFutureClasses, @@ -2275,47 +2749,18 @@ void _read_images(header_info **hList, uint32_t hCount) * sizeof(Class)); resolvedFutureClasses[resolvedFutureClassCount++] = newCls; } - - totalClasses++; - if (headerInSharedCache && isPreoptimized()) { - // class list built in shared cache - // fixme strict assert doesn't work because of duplicates - // assert(cls == getClass(name)); - assert(getClass(name)); - preoptimizedClasses++; - } else { - addNamedClass(cls, name); - } - - // for future reference: shared cache never contains MH_BUNDLEs - if (headerIsBundle) { - cls->data()->flags |= RO_FROM_BUNDLE; - cls->ISA()->data()->flags |= RO_FROM_BUNDLE; - } - - if (PrintPreopt) { - const method_list_t *mlist; - if ((mlist = ((class_ro_t *)cls->data())->baseMethods)) { - totalMethodLists++; - if (isMethodListFixedUp(mlist)) preoptimizedMethodLists++; - } - if ((mlist = ((class_ro_t *)cls->ISA()->data())->baseMethods)) { - totalMethodLists++; - if (isMethodListFixedUp(mlist)) preoptimizedMethodLists++; - } - } } } - if (PrintPreopt && totalMethodLists) { + if (PrintPreopt && PreoptTotalMethodLists) { _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) method lists pre-sorted", - preoptimizedMethodLists, totalMethodLists, - 100.0*preoptimizedMethodLists/totalMethodLists); + PreoptOptimizedMethodLists, PreoptTotalMethodLists, + 100.0*PreoptOptimizedMethodLists/PreoptTotalMethodLists); } - if (PrintPreopt && totalClasses) { + if (PrintPreopt && PreoptTotalClasses) { _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) classes pre-registered", - preoptimizedClasses, totalClasses, - 100.0*preoptimizedClasses/totalClasses); + PreoptOptimizedClasses, PreoptTotalClasses, + 100.0*PreoptOptimizedClasses/PreoptTotalClasses); } // Fix up remapped classes @@ -2353,10 +2798,11 @@ void _read_images(header_info **hList, uint32_t hCount) if (sel_preoptimizationValid(hi)) continue; + bool isBundle = hi->mhdr->filetype == MH_BUNDLE; SEL *sels = _getObjc2SelectorRefs(hi, &count); - BOOL isBundle = hi->mhdr->filetype == MH_BUNDLE; for (i = 0; i < count; i++) { - sels[i] = sel_registerNameNoLock((const char *)sels[i], isBundle); + const char *name = sel_cname(sels[i]); + sels[i] = sel_registerNameNoLock(name, isBundle); } } sel_unlock(); @@ -2378,35 +2824,43 @@ void _read_images(header_info **hList, uint32_t hCount) #endif // Discover protocols. Fix up protocol refs. - NXMapTable *protocol_map = protocols(); for (EACH_HEADER) { extern objc_class OBJC_CLASS_$_Protocol; Class cls = (Class)&OBJC_CLASS_$_Protocol; assert(cls); - protocol_t **protocols = _getObjc2ProtocolList(hi, &count); - // fixme duplicate protocol from bundle + protocol_t **protolist = _getObjc2ProtocolList(hi, &count); + NXMapTable *protocol_map = protocols(); + // fixme duplicate protocols from unloadable bundle for (i = 0; i < count; i++) { - if (!NXMapGet(protocol_map, protocols[i]->name)) { - protocols[i]->initIsa(cls); + protocol_t *oldproto = (protocol_t *) + getProtocol(protolist[i]->mangledName); + if (!oldproto) { + size_t size = max(sizeof(protocol_t), + (size_t)protolist[i]->size); + protocol_t *newproto = (protocol_t *)_calloc_internal(size, 1); + memcpy(newproto, protolist[i], protolist[i]->size); + newproto->size = (typeof(newproto->size))size; + + newproto->initIsa(cls); // fixme pinned NXMapKeyCopyingInsert(protocol_map, - protocols[i]->name, protocols[i]); + newproto->mangledName, newproto); if (PrintProtocols) { _objc_inform("PROTOCOLS: protocol at %p is %s", - protocols[i], protocols[i]->name); + newproto, newproto->nameForLogging()); } } else { if (PrintProtocols) { _objc_inform("PROTOCOLS: protocol at %p is %s (duplicate)", - protocols[i], protocols[i]->name); + protolist[i], oldproto->nameForLogging()); } } } } for (EACH_HEADER) { - protocol_t **protocols; - protocols = _getObjc2ProtocolRefs(hi, &count); + protocol_t **protolist; + protolist = _getObjc2ProtocolRefs(hi, &count); for (i = 0; i < count; i++) { - remapProtocolRef(&protocols[i]); + remapProtocolRef(&protolist[i]); } } @@ -2418,6 +2872,22 @@ void _read_images(header_info **hList, uint32_t hCount) Class cls = remapClass(classlist[i]); if (!cls) continue; + // hack for class __ARCLite__, which didn't get this above +#if TARGET_IPHONE_SIMULATOR + if (cls->cache._buckets == (void*)&_objc_empty_cache && + (cls->cache._mask || cls->cache._occupied)) + { + cls->cache._mask = 0; + cls->cache._occupied = 0; + } + if (cls->ISA()->cache._buckets == (void*)&_objc_empty_cache && + (cls->ISA()->cache._mask || cls->ISA()->cache._occupied)) + { + cls->ISA()->cache._mask = 0; + cls->ISA()->cache._occupied = 0; + } +#endif + realizeClass(cls); } } @@ -2426,6 +2896,7 @@ void _read_images(header_info **hList, uint32_t hCount) if (resolvedFutureClasses) { for (i = 0; i < resolvedFutureClassCount; i++) { realizeClass(resolvedFutureClasses[i]); + resolvedFutureClasses[i]->setRequiresRawIsa(false/*inherited*/); } _free_internal(resolvedFutureClasses); } @@ -2465,7 +2936,7 @@ void _read_images(header_info **hList, uint32_t hCount) } if (PrintConnecting) { _objc_inform("CLASS: found category -%s(%s) %s", - cls->name(), cat->name, + cls->nameForLogging(), cat->name, classExists ? "on existing class" : ""); } } @@ -2479,7 +2950,7 @@ void _read_images(header_info **hList, uint32_t hCount) } if (PrintConnecting) { _objc_inform("CLASS: found category +%s(%s)", - cls->name(), cat->name); + cls->nameForLogging(), cat->name); } } } @@ -3112,15 +3583,38 @@ _protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel, return enc; } + +/*********************************************************************** +* protocol_t::demangledName +* Returns the (Swift-demangled) name of the given protocol. +* Locking: none +**********************************************************************/ +const char * +protocol_t::demangledName() +{ + assert(size >= offsetof(protocol_t, _demangledName)+sizeof(_demangledName)); + + if (! _demangledName) { + char *de = copySwiftV1DemangledName(mangledName, true/*isProtocol*/); + if (! OSAtomicCompareAndSwapPtrBarrier(nil, (void*)(de ?: mangledName), + (void**)&_demangledName)) + { + if (de) free(de); + } + } + return _demangledName; +} + /*********************************************************************** * protocol_getName -* Returns the name of the given protocol. +* Returns the (Swift-demangled) name of the given protocol. * Locking: runtimeLock must not be held by the caller **********************************************************************/ const char * protocol_getName(Protocol *proto) { - return newprotocol(proto)->name; + if (!proto) return "nil"; + else return newprotocol(proto)->demangledName(); } @@ -3157,7 +3651,7 @@ protocol_conformsToProtocol_nolock(protocol_t *self, protocol_t *other) // protocols need not be fixed up - if (0 == strcmp(self->name, other->name)) { + if (0 == strcmp(self->mangledName, other->mangledName)) { return YES; } @@ -3165,7 +3659,7 @@ protocol_conformsToProtocol_nolock(protocol_t *self, protocol_t *other) uintptr_t i; for (i = 0; i < self->protocols->count; i++) { protocol_t *proto = remapProtocol(self->protocols->list[i]); - if (0 == strcmp(other->name, proto->name)) { + if (0 == strcmp(other->mangledName, proto->mangledName)) { return YES; } if (protocol_conformsToProtocol_nolock(proto, other)) { @@ -3416,7 +3910,7 @@ objc_allocateProtocol(const char *name) { rwlock_write(&runtimeLock); - if (NXMapGet(protocols(), name)) { + if (getProtocol(name)) { rwlock_unlock_write(&runtimeLock); return nil; } @@ -3425,8 +3919,10 @@ objc_allocateProtocol(const char *name) extern objc_class OBJC_CLASS_$___IncompleteProtocol; Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol; - result->initIsa(cls); - result->name = _strdup_internal(name); + result->initProtocolIsa(cls); + result->size = sizeof(protocol_t); + // fixme mangle the name if it looks swift-y? + result->mangledName = _strdup_internal(name); // fixme reserve name without installing @@ -3455,20 +3951,20 @@ void objc_registerProtocol(Protocol *proto_gen) if (proto->ISA() == cls) { _objc_inform("objc_registerProtocol: protocol '%s' was already " - "registered!", proto->name); + "registered!", proto->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } if (proto->ISA() != oldcls) { _objc_inform("objc_registerProtocol: protocol '%s' was not allocated " - "with objc_allocateProtocol!", proto->name); + "with objc_allocateProtocol!", proto->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } - proto->initIsa(cls); + proto->initProtocolIsa(cls); - NXMapKeyCopyingInsert(protocols(), proto->name, proto); + NXMapKeyCopyingInsert(protocols(), proto->mangledName, proto); rwlock_unlock_write(&runtimeLock); } @@ -3497,13 +3993,13 @@ protocol_addProtocol(Protocol *proto_gen, Protocol *addition_gen) if (proto->ISA() != cls) { _objc_inform("protocol_addProtocol: modified protocol '%s' is not " - "under construction!", proto->name); + "under construction!", proto->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } if (addition->ISA() == cls) { _objc_inform("protocol_addProtocol: added protocol '%s' is still " - "under construction!", addition->name); + "under construction!", addition->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } @@ -3566,7 +4062,7 @@ protocol_addMethodDescription(Protocol *proto_gen, SEL name, const char *types, if (proto->ISA() != cls) { _objc_inform("protocol_addMethodDescription: protocol '%s' is not " - "under construction!", proto->name); + "under construction!", proto->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } @@ -3628,7 +4124,7 @@ protocol_addProperty(Protocol *proto_gen, const char *name, if (proto->ISA() != cls) { _objc_inform("protocol_addProperty: protocol '%s' is not " - "under construction!", proto->name); + "under construction!", proto->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } @@ -3777,7 +4273,7 @@ objc_copyProtocolList(unsigned int *outCount) Protocol *objc_getProtocol(const char *name) { rwlock_read(&runtimeLock); - Protocol *result = (Protocol *)NXMapGet(protocols(), name); + Protocol *result = getProtocol(name); rwlock_unlock_read(&runtimeLock); return result; } @@ -3941,10 +4437,13 @@ objc_class::getLoadMethod() assert(ISA()->isMetaClass()); mlist = ISA()->data()->ro->baseMethods; - if (mlist) for (i = 0; i < mlist->count; i++) { - method_t *m = method_list_nth(mlist, i); - if (0 == strcmp((const char *)m->name, "load")) { - return m->imp; + if (mlist) { + for (i = 0; i < mlist->count; i++) { + method_t *m = method_list_nth(mlist, i); + const char *name = sel_cname(m->name); + if (0 == strcmp(name, "load")) { + return m->imp; + } } } @@ -3968,14 +4467,14 @@ _category_getName(Category cat) * _category_getClassName * Returns a category's class's name * Called only from add_category_to_loadable_list and -* remove_category_from_loadable_list. +* remove_category_from_loadable_list for logging purposes. * Locking: runtimeLock must be read- or write-locked by the caller **********************************************************************/ const char * _category_getClassName(Category cat) { rwlock_assert_locked(&runtimeLock); - return remapClass(cat->cls)->name(); + return remapClass(cat->cls)->nameForLogging(); } @@ -4011,10 +4510,13 @@ _category_getLoadMethod(Category cat) uint32_t i; mlist = cat->classMethods; - if (mlist) for (i = 0; i < mlist->count; i++) { - method_t *m = method_list_nth(mlist, i); - if (0 == strcmp((const char *)m->name, "load")) { - return m->imp; + if (mlist) { + for (i = 0; i < mlist->count; i++) { + method_t *m = method_list_nth(mlist, i); + const char *name = sel_cname(m->name); + if (0 == strcmp(name, "load")) { + return m->imp; + } } } @@ -4070,7 +4572,7 @@ class_copyProtocolList(Class cls, unsigned int *outCount) /*********************************************************************** * _objc_copyClassNamesForImage * fixme -* Locking: read-locks runtimeLock +* Locking: write-locks runtimeLock **********************************************************************/ const char ** _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount) @@ -4079,7 +4581,8 @@ _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount) classref_t *classlist; const char **names; - rwlock_read(&runtimeLock); + // Need to write-lock in case demangledName() needs to realize a class. + rwlock_write(&runtimeLock); classlist = _getObjc2ClassList(hi, &count); names = (const char **)malloc((count+1) * sizeof(const char *)); @@ -4088,7 +4591,7 @@ _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount) for (i = 0; i < count; i++) { Class cls = remapClass(classlist[i]); if (cls) { - names[i-shift] = cls->name(); + names[i-shift] = cls->demangledName(true/*realize*/); } else { shift++; // ignored weak-linked class } @@ -4096,7 +4599,7 @@ _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount) count -= shift; names[count] = nil; - rwlock_unlock_read(&runtimeLock); + rwlock_unlock_write(&runtimeLock); if (outCount) *outCount = (unsigned int)count; return names; @@ -4115,7 +4618,7 @@ alignedInstanceStart(Class cls) { assert(cls); assert(cls->isRealized()); - return (uint32_t)((cls->data()->ro->instanceStart + WORD_MASK) & ~WORD_MASK); + return (uint32_t)word_align(cls->data()->ro->instanceStart); } uint32_t _class_getInstanceStart(Class cls) { @@ -4123,6 +4626,115 @@ uint32_t _class_getInstanceStart(Class cls) { } +/*********************************************************************** +* saveTemporaryString +* Save a string in a thread-local FIFO buffer. +* This is suitable for temporary strings generated for logging purposes. +**********************************************************************/ +static void +saveTemporaryString(char *str) +{ + // Fixed-size FIFO. We free the first string, shift + // the rest, and add the new string to the end. + _objc_pthread_data *data = _objc_fetch_pthread_data(true); + if (data->printableNames[0]) { + free(data->printableNames[0]); + } + int last = countof(data->printableNames) - 1; + for (int i = 0; i < last; i++) { + data->printableNames[i] = data->printableNames[i+1]; + } + data->printableNames[last] = str; +} + + +/*********************************************************************** +* objc_class::nameForLogging +* Returns the class's name, suitable for display. +* The returned memory is TEMPORARY. Print it or copy it immediately. +* Locking: none +**********************************************************************/ +const char * +objc_class::nameForLogging() +{ + // Handle the easy case directly. + if (isRealized() || isFuture()) { + if (data()->demangledName) return data()->demangledName; + } + + char *result; + + const char *name = mangledName(); + char *de = copySwiftV1DemangledName(name); + if (de) result = de; + else result = strdup(name); + + saveTemporaryString(result); + return result; +} + + +/*********************************************************************** +* objc_class::demangledName +* If realize=false, the class must already be realized or future. +* Locking: If realize=true, runtimeLock must be held for writing by the caller. +**********************************************************************/ +const char * +objc_class::demangledName(bool realize) +{ + // Return previously demangled name if available. + if (isRealized() || isFuture()) { + if (data()->demangledName) return data()->demangledName; + } + + // Try demangling the mangled name. + const char *mangled = mangledName(); + char *de = copySwiftV1DemangledName(mangled); + if (isRealized() || isFuture()) { + // Class is already realized or future. + // Save demangling result in rw data. + // We may not own rwlock for writing so use an atomic operation instead. + if (! OSAtomicCompareAndSwapPtrBarrier(nil, (void*)(de ?: mangled), + (void**)&data()->demangledName)) + { + if (de) free(de); + } + return data()->demangledName; + } + + // Class is not yet realized. + if (!de) { + // Name is not mangled. Return it without caching. + return mangled; + } + + // Class is not yet realized and name is mangled. Realize the class. + // Only objc_copyClassNamesForImage() should get here. + rwlock_assert_writing(&runtimeLock); + assert(realize); + if (realize) { + realizeClass((Class)this); + data()->demangledName = de; + return de; + } else { + return de; // bug - just leak + } +} + + +/*********************************************************************** +* class_getName +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +const char *class_getName(Class cls) +{ + if (!cls) return "nil"; + assert(cls->isRealized() || cls->isFuture()); + return cls->demangledName(); +} + + /*********************************************************************** * class_getVersion * fixme @@ -4315,8 +4927,8 @@ log_and_fill_cache(Class cls, Class implementer, IMP imp, SEL sel) #if SUPPORT_MESSAGE_LOGGING if (objcMsgLogEnabled) { bool cacheIt = logMessageSend(implementer->isMetaClass(), - cls->getName(), - implementer->getName(), + cls->nameForLogging(), + implementer->nameForLogging(), sel); if (!cacheIt) return; } @@ -4332,7 +4944,7 @@ log_and_fill_cache(Class cls, Class implementer, IMP imp, SEL sel) * already tried that. **********************************************************************/ IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls) -{ +{ return lookUpImpOrForward(cls, sel, obj, YES/*initialize*/, NO/*cache*/, YES/*resolver*/); } @@ -4559,7 +5171,7 @@ objc_property_t class_getProperty(Class cls, const char *name) Class gdb_class_getClass(Class cls) { - const char *className = cls->name(); + const char *className = cls->mangledName(); if(!className || !strlen(className)) return Nil; Class rCls = look_up_class(className, NO, NO); return rCls; @@ -4579,12 +5191,121 @@ void objc_class::setInitialized() { Class metacls; + Class cls; assert(!isMetaClass()); - metacls = this->ISA(); + cls = (Class)this; + metacls = cls->ISA(); + + rwlock_read(&runtimeLock); + + // Scan metaclass for custom AWZ. + // Scan metaclass for custom RR. + // Scan class for custom RR. + // Also print custom RR/AWZ because we probably haven't done it yet. + + // Special cases: + // GC's RR and AWZ are never default. + // NSObject AWZ class methods are default. + // NSObject RR instance methods are default. + // updateCustomRR_AWZ() also knows these special cases. + // attachMethodLists() also knows these special cases. + + bool inherited; + bool metaCustomAWZ = NO; + if (UseGC) { + // GC is always custom AWZ + metaCustomAWZ = YES; + inherited = NO; + } + else if (MetaclassNSObjectAWZSwizzled) { + // Somebody already swizzled NSObject's methods + metaCustomAWZ = YES; + inherited = NO; + } + else if (metacls == classNSObject()->ISA()) { + // NSObject's metaclass AWZ is default, but we still need to check cats + FOREACH_CATEGORY_METHOD_LIST(mlist, metacls, { + if (methodListImplementsAWZ(mlist)) { + metaCustomAWZ = YES; + inherited = NO; + break; + } + }); + } + else if (metacls->superclass->hasCustomAWZ()) { + // Superclass is custom AWZ, therefore we are too. + metaCustomAWZ = YES; + inherited = YES; + } + else { + // Not metaclass NSObject. + FOREACH_METHOD_LIST(mlist, metacls, { + if (methodListImplementsAWZ(mlist)) { + metaCustomAWZ = YES; + inherited = NO; + break; + } + }); + } + if (!metaCustomAWZ) metacls->setHasDefaultAWZ(); + + if (PrintCustomAWZ && metaCustomAWZ) metacls->printCustomAWZ(inherited); + // metacls->printCustomRR(); + + + bool clsCustomRR = NO; + if (UseGC) { + // GC is always custom RR + clsCustomRR = YES; + inherited = NO; + } + else if (ClassNSObjectRRSwizzled) { + // Somebody already swizzled NSObject's methods + clsCustomRR = YES; + inherited = NO; + } + if (cls == classNSObject()) { + // NSObject's RR is default, but we still need to check categories + FOREACH_CATEGORY_METHOD_LIST(mlist, cls, { + if (methodListImplementsRR(mlist)) { + clsCustomRR = YES; + inherited = NO; + break; + } + }); + } + else if (!cls->superclass) { + // Custom root class + clsCustomRR = YES; + inherited = NO; + } + else if (cls->superclass->hasCustomRR()) { + // Superclass is custom RR, therefore we are too. + clsCustomRR = YES; + inherited = YES; + } + else { + // Not class NSObject. + FOREACH_METHOD_LIST(mlist, cls, { + if (methodListImplementsRR(mlist)) { + clsCustomRR = YES; + inherited = NO; + break; + } + }); + } + if (!clsCustomRR) cls->setHasDefaultRR(); + + // cls->printCustomAWZ(); + if (PrintCustomRR && clsCustomRR) cls->printCustomRR(inherited); + // Update the +initialize flags. + // Do this last. metacls->changeInfo(RW_INITIALIZED, RW_INITIALIZING); + + rwlock_unlock_read(&runtimeLock); } @@ -4601,64 +5322,165 @@ BOOL _class_usesAutomaticRetainRelease(Class cls) /*********************************************************************** * Return YES if sel is used by retain/release implementors **********************************************************************/ -static bool isRRSelector(SEL sel) +static bool +isRRSelector(SEL sel) +{ + return (sel == SEL_retain || sel == SEL_release || + sel == SEL_autorelease || sel == SEL_retainCount || + sel == SEL_tryRetain || sel == SEL_retainWeakReference || + sel == SEL_isDeallocating || sel == SEL_allowsWeakReference); +} + + +/*********************************************************************** +* Return YES if mlist implements one of the isRRSelector() methods +**********************************************************************/ +static bool +methodListImplementsRR(const method_list_t *mlist) { - return (sel == SEL_retain || sel == SEL_release || - sel == SEL_autorelease || sel == SEL_retainCount); + return (search_method_list(mlist, SEL_retain) || + search_method_list(mlist, SEL_release) || + search_method_list(mlist, SEL_autorelease) || + search_method_list(mlist, SEL_retainCount) || + search_method_list(mlist, SEL_tryRetain) || + search_method_list(mlist, SEL_isDeallocating) || + search_method_list(mlist, SEL_retainWeakReference) || + search_method_list(mlist, SEL_allowsWeakReference)); } /*********************************************************************** * Return YES if sel is used by alloc or allocWithZone implementors **********************************************************************/ -static bool isAWZSelector(SEL sel) +static bool +isAWZSelector(SEL sel) { return (sel == SEL_allocWithZone || sel == SEL_alloc); } +/*********************************************************************** +* Return YES if mlist implements one of the isAWZSelector() methods +**********************************************************************/ +static bool +methodListImplementsAWZ(const method_list_t *mlist) +{ + return (search_method_list(mlist, SEL_allocWithZone) || + search_method_list(mlist, SEL_alloc)); +} + + +void +objc_class::printCustomRR(bool inherited) +{ + assert(PrintCustomRR); + assert(hasCustomRR()); + _objc_inform("CUSTOM RR: %s%s%s", nameForLogging(), + isMetaClass() ? " (meta)" : "", + inherited ? " (inherited)" : ""); +} + +void +objc_class::printCustomAWZ(bool inherited) +{ + assert(PrintCustomAWZ); + assert(hasCustomAWZ()); + _objc_inform("CUSTOM AWZ: %s%s%s", nameForLogging(), + isMetaClass() ? " (meta)" : "", + inherited ? " (inherited)" : ""); +} + +void +objc_class::printRequiresRawIsa(bool inherited) +{ + assert(PrintRawIsa); + assert(requiresRawIsa()); + _objc_inform("RAW ISA: %s%s%s", nameForLogging(), + isMetaClass() ? " (meta)" : "", + inherited ? " (inherited)" : ""); +} + + /*********************************************************************** * Mark this class and all of its subclasses as implementors or * inheritors of custom RR (retain/release/autorelease/retainCount) **********************************************************************/ void objc_class::setHasCustomRR(bool inherited) { + Class cls = (Class)this; rwlock_assert_writing(&runtimeLock); if (hasCustomRR()) return; - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, (Class)this, { - if (PrintCustomRR && !c->hasCustomRR()) { - _objc_inform("CUSTOM RR: %s%s%s", c->name(), - c->isMetaClass() ? " (meta)" : "", - (inherited || c != (Class)this) ? " (inherited)" : ""); - } -#if CLASS_FAST_FLAGS_VIA_RW_DATA - c->data_NEVER_USE |= CLASS_FAST_FLAG_HAS_CUSTOM_RR; -#else - c->data()->flags |= RW_HAS_CUSTOM_RR; -#endif + foreach_realized_class_and_subclass(cls, ^(Class c){ + if (c != cls && !c->isInitialized()) { + // Subclass not yet initialized. Wait for setInitialized() to do it + // fixme short circuit recursion? + return; + } + if (c->hasCustomRR()) { + // fixme short circuit recursion? + return; + } + + c->bits.setHasCustomRR(); + + if (PrintCustomRR) c->printCustomRR(inherited || c != cls); }); } - /*********************************************************************** * Mark this class and all of its subclasses as implementors or * inheritors of custom alloc/allocWithZone: **********************************************************************/ -void objc_class::setHasCustomAWZ(bool inherited ) +void objc_class::setHasCustomAWZ(bool inherited) { + Class cls = (Class)this; rwlock_assert_writing(&runtimeLock); if (hasCustomAWZ()) return; - FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, (Class)this, { - if (PrintCustomAWZ && !c->hasCustomAWZ()) { - _objc_inform("CUSTOM AWZ: %s%s%s", c->name(), - c->isMetaClass() ? " (meta)" : "", - (inherited || c != (Class)this) ? " (inherited)" : ""); + foreach_realized_class_and_subclass(cls, ^(Class c){ + if (c != cls && !c->isInitialized()) { + // Subclass not yet initialized. Wait for setInitialized() to do it + // fixme short circuit recursion? + return; + } + if (c->hasCustomAWZ()) { + // fixme short circuit recursion? + return; + } + + c->bits.setHasCustomAWZ(); + + if (PrintCustomAWZ) c->printCustomAWZ(inherited || c != cls); + }); +} + + +/*********************************************************************** +* Mark this class and all of its subclasses as requiring raw isa pointers +**********************************************************************/ +void objc_class::setRequiresRawIsa(bool inherited) +{ + Class cls = (Class)this; + rwlock_assert_writing(&runtimeLock); + + if (requiresRawIsa()) return; + + foreach_realized_class_and_subclass(cls, ^(Class c){ + if (c->isInitialized()) { + _objc_fatal("too late to require raw isa"); + return; + } + if (c->requiresRawIsa()) { + // fixme short circuit recursion? + return; } - c->data()->flags |= RW_HAS_CUSTOM_AWZ; + + c->bits.setRequiresRawIsa(); + + if (PrintRawIsa) c->printRequiresRawIsa(inherited || c != cls); }); } @@ -4670,47 +5492,75 @@ static void updateCustomRR_AWZ(Class cls, method_t *meth) { // In almost all cases, IMP swizzling does not affect custom RR/AWZ bits. - // The class is already marked for custom RR/AWZ, so changing the IMP - // does not transition from non-custom to custom. + // Custom RR/AWZ search will already find the method whether or not + // it is swizzled, so it does not transition from non-custom to custom. // // The only cases where IMP swizzling can affect the RR/AWZ bits is // if the swizzled method is one of the methods that is assumed to be - // non-custom. These special cases come from attachMethodLists(). - // We look for such cases here if we do not know the affected class. + // non-custom. These special cases are listed in setInitialized(). + // We look for such cases here. if (isRRSelector(meth->name)) { - if (cls) { - cls->setHasCustomRR(); + // already custom, nothing would change + if (classNSObject()->hasCustomRR()) return; + + bool swizzlingNSObject = NO; + if (cls == classNSObject()) { + swizzlingNSObject = YES; } else { // Don't know the class. // The only special case is class NSObject. FOREACH_METHOD_LIST(mlist, classNSObject(), { for (uint32_t i = 0; i < mlist->count; i++) { if (meth == method_list_nth(mlist, i)) { - // Yep, they're swizzling NSObject. - classNSObject()->setHasCustomRR(); - return; + swizzlingNSObject = YES; + break; } } + if (swizzlingNSObject) break; }); } + if (swizzlingNSObject) { + if (classNSObject()->isInitialized()) { + classNSObject()->setHasCustomRR(); + } else { + // NSObject not yet +initialized, so custom RR has not yet + // been checked, and setInitialized() will not notice the + // swizzle. + ClassNSObjectRRSwizzled = YES; + } + } } else if (isAWZSelector(meth->name)) { - if (cls) { - cls->setHasCustomAWZ(); + // already custom, nothing would change + if (classNSObject()->ISA()->hasCustomAWZ()) return; + + bool swizzlingNSObject = NO; + if (cls == classNSObject()->ISA()) { + swizzlingNSObject = YES; } else { // Don't know the class. // The only special case is metaclass NSObject. FOREACH_METHOD_LIST(mlist, classNSObject()->ISA(), { for (uint32_t i = 0; i < mlist->count; i++) { if (meth == method_list_nth(mlist, i)) { - // Yep, they're swizzling metaclass NSObject. - classNSObject()->ISA()->setHasCustomAWZ(); - return; + swizzlingNSObject = YES; + break; } } + if (swizzlingNSObject) break; }); } + if (swizzlingNSObject) { + if (classNSObject()->ISA()->isInitialized()) { + classNSObject()->ISA()->setHasCustomAWZ(); + } else { + // NSObject not yet +initialized, so custom RR has not yet + // been checked, and setInitialized() will not notice the + // swizzle. + MetaclassNSObjectAWZSwizzled = YES; + } + } } } @@ -4764,7 +5614,7 @@ class_setIvarLayout(Class cls, const uint8_t *layout) // allowed, there would be a race below (us vs. concurrent GC scan) if (!(cls->data()->flags & RW_CONSTRUCTING)) { _objc_inform("*** Can't set ivar layout for already-registered " - "class '%s'", cls->name()); + "class '%s'", cls->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } @@ -4830,7 +5680,7 @@ class_setWeakIvarLayout(Class cls, const uint8_t *layout) // allowed, there would be a race below (us vs. concurrent GC scan) if (!(cls->data()->flags & RW_CONSTRUCTING)) { _objc_inform("*** Can't set weak ivar layout for already-registered " - "class '%s'", cls->name()); + "class '%s'", cls->nameForLogging()); rwlock_unlock_write(&runtimeLock); return; } @@ -5049,7 +5899,7 @@ class_addIvar(Class cls, const char *name, size_t size, ivar->size = (uint32_t)size; ro_w->ivars = newlist; - ro_w->instanceSize = (uint32_t)(offset + size); + cls->setInstanceSize((uint32_t)(offset + size)); // Ivar layout updated in registerClass. @@ -5206,48 +6056,46 @@ objc_duplicateClass(Class original, const char *name, assert(original->isRealized()); assert(!original->isMetaClass()); - duplicate = _calloc_class(original->ISA()->alignedInstanceSize()+extraBytes); - if (original->ISA()->unalignedInstanceSize() < sizeof(objc_class)) { - _objc_inform("busted! %s\n", original->data()->ro->name); - } - + duplicate = alloc_class_for_subclass(original, extraBytes); - duplicate->initIsa(original->ISA()); + duplicate->initClassIsa(original->ISA()); duplicate->superclass = original->superclass; - duplicate->cache.buckets = (bucket_t *)&_objc_empty_cache; - // cache.shiftmask and cache.occupied are already zero + duplicate->cache.setEmpty(); - duplicate->setData((class_rw_t *)_calloc_internal(sizeof(*original->data()), 1)); - duplicate->data()->flags = (original->data()->flags | RW_COPIED_RO); - duplicate->data()->version = original->data()->version; - duplicate->data()->firstSubclass = nil; - duplicate->data()->nextSiblingClass = nil; + class_rw_t *rw = (class_rw_t *)_calloc_internal(sizeof(*original->data()), 1); + rw->flags = (original->data()->flags | RW_COPIED_RO | RW_REALIZING); + rw->version = original->data()->version; + rw->firstSubclass = nil; + rw->nextSiblingClass = nil; - duplicate->data()->ro = (class_ro_t *) + duplicate->bits = original->bits; + duplicate->setData(rw); + + rw->ro = (class_ro_t *) _memdup_internal(original->data()->ro, sizeof(*original->data()->ro)); - *(char **)&duplicate->data()->ro->name = _strdup_internal(name); + *(char **)&rw->ro->name = _strdup_internal(name); if (original->data()->flags & RW_METHOD_ARRAY) { - duplicate->data()->method_lists = (method_list_t **) + rw->method_lists = (method_list_t **) _memdup_internal(original->data()->method_lists, malloc_size(original->data()->method_lists)); method_list_t **mlistp; - for (mlistp = duplicate->data()->method_lists; *mlistp; mlistp++) { + for (mlistp = rw->method_lists; *mlistp; mlistp++) { *mlistp = (method_list_t *) _memdup_internal(*mlistp, method_list_size(*mlistp)); } } else { if (original->data()->method_list) { - duplicate->data()->method_list = (method_list_t *) + rw->method_list = (method_list_t *) _memdup_internal(original->data()->method_list, method_list_size(original->data()->method_list)); } } // fixme dies when categories are added to the base - duplicate->data()->properties = original->data()->properties; - duplicate->data()->protocols = original->data()->protocols; + rw->properties = original->data()->properties; + rw->protocols = original->data()->protocols; if (duplicate->superclass) { addSubclass(duplicate->superclass, duplicate); @@ -5262,10 +6110,12 @@ objc_duplicateClass(Class original, const char *name, if (PrintConnecting) { _objc_inform("CLASS: realizing class '%s' (duplicate of %s) %p %p", - name, original->data()->ro->name, + name, original->nameForLogging(), (void*)duplicate, duplicate->data()->ro); } + duplicate->clearInfo(RW_REALIZING); + rwlock_unlock_write(&runtimeLock); return duplicate; @@ -5285,9 +6135,8 @@ static void objc_initializeClassPair_internal(Class superclass, const char *name class_ro_t *cls_ro_w, *meta_ro_w; - cls->cache.buckets = (bucket_t *)&_objc_empty_cache; - meta->cache.buckets = (bucket_t *)&_objc_empty_cache; - // cache.shiftmask and cache.occupied are already zero + cls->cache.setEmpty(); + meta->cache.setEmpty(); cls->setData((class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1)); meta->setData((class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1)); @@ -5298,8 +6147,8 @@ static void objc_initializeClassPair_internal(Class superclass, const char *name // Set basic info - cls->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED; - meta->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED; + cls->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED | RW_REALIZING; + meta->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED | RW_REALIZING; cls->data()->version = 0; meta->data()->version = 7; @@ -5312,13 +6161,13 @@ static void objc_initializeClassPair_internal(Class superclass, const char *name if (superclass) { cls_ro_w->instanceStart = superclass->unalignedInstanceSize(); meta_ro_w->instanceStart = superclass->ISA()->unalignedInstanceSize(); - cls_ro_w->instanceSize = cls_ro_w->instanceStart; - meta_ro_w->instanceSize = meta_ro_w->instanceStart; + cls->setInstanceSize(cls_ro_w->instanceStart); + meta->setInstanceSize(meta_ro_w->instanceStart); } else { cls_ro_w->instanceStart = 0; meta_ro_w->instanceStart = (uint32_t)sizeof(objc_class); - cls_ro_w->instanceSize = (uint32_t)sizeof(id); // just an isa - meta_ro_w->instanceSize = meta_ro_w->instanceStart; + cls->setInstanceSize((uint32_t)sizeof(id)); // just an isa + meta->setInstanceSize(meta_ro_w->instanceStart); } cls_ro_w->name = _strdup_internal(name); @@ -5328,53 +6177,68 @@ static void objc_initializeClassPair_internal(Class superclass, const char *name cls_ro_w->weakIvarLayout = &UnsetLayout; // Connect to superclasses and metaclasses - cls->initIsa(meta); + cls->initClassIsa(meta); if (superclass) { - meta->initIsa(superclass->ISA()->ISA()); + meta->initClassIsa(superclass->ISA()->ISA()); cls->superclass = superclass; meta->superclass = superclass->ISA(); addSubclass(superclass, cls); addSubclass(superclass->ISA(), meta); } else { - meta->initIsa(meta); + meta->initClassIsa(meta); cls->superclass = Nil; meta->superclass = cls; addSubclass(cls, meta); } } + +/*********************************************************************** +* verifySuperclass +* Sanity-check the superclass provided to +* objc_allocateClassPair, objc_initializeClassPair, or objc_readClassPair. +**********************************************************************/ +bool +verifySuperclass(Class superclass, bool rootOK) +{ + if (!superclass) { + // Superclass does not exist. + // If subclass may be a root class, this is OK. + // If subclass must not be a root class, this is bad. + return rootOK; + } + + // Superclass must be realized. + if (! superclass->isRealized()) return false; + + // Superclass must not be under construction. + if (superclass->data()->flags & RW_CONSTRUCTING) return false; + + return true; +} + + /*********************************************************************** * objc_initializeClassPair **********************************************************************/ Class objc_initializeClassPair(Class superclass, const char *name, Class cls, Class meta) { rwlock_write(&runtimeLock); - - // - // Common superclass integrity checks with objc_allocateClassPair - // - if (getClass(name)) { - rwlock_unlock_write(&runtimeLock); - return Nil; - } - // fixme reserve class against simultaneous allocation - - if (superclass) assert(superclass->isRealized()); - if (superclass && superclass->data()->flags & RW_CONSTRUCTING) { - // Can't make subclass of an in-construction class + // Fail if the class name is in use. + // Fail if the superclass isn't kosher. + if (getClass(name) || !verifySuperclass(superclass, true/*rootOK*/)) { rwlock_unlock_write(&runtimeLock); - return Nil; + return nil; } - - // just initialize what was supplied objc_initializeClassPair_internal(superclass, name, cls, meta); rwlock_unlock_write(&runtimeLock); return cls; } + /*********************************************************************** * objc_allocateClassPair * fixme @@ -5387,35 +6251,18 @@ Class objc_allocateClassPair(Class superclass, const char *name, rwlock_write(&runtimeLock); - // - // Common superclass integrity checks with objc_initializeClassPair - // - if (getClass(name)) { - rwlock_unlock_write(&runtimeLock); - return Nil; - } - // fixme reserve class against simmultaneous allocation - - if (superclass) assert(superclass->isRealized()); - - if (superclass && superclass->data()->flags & RW_CONSTRUCTING) { - // Can't make subclass of an in-construction class + // Fail if the class name is in use. + // Fail if the superclass isn't kosher. + if (getClass(name) || !verifySuperclass(superclass, true/*rootOK*/)) { rwlock_unlock_write(&runtimeLock); - return Nil; + return nil; } - - // Allocate new classes. - size_t size = sizeof(objc_class); - size_t metasize = sizeof(objc_class); - if (superclass) { - size = superclass->ISA()->alignedInstanceSize(); - metasize = superclass->ISA()->ISA()->alignedInstanceSize(); - } - cls = _calloc_class(size + extraBytes); - meta = _calloc_class(metasize + extraBytes); + cls = alloc_class_for_subclass(superclass, extraBytes); + meta = alloc_class_for_subclass(superclass, extraBytes); + // fixme mangle the name if it looks swift-y? objc_initializeClassPair_internal(superclass, name, cls, meta); rwlock_unlock_write(&runtimeLock); @@ -5507,21 +6354,58 @@ void objc_registerClassPair(Class cls) } // Clear "under construction" bit, set "done constructing" bit - cls->data()->flags &= ~RW_CONSTRUCTING; - cls->ISA()->data()->flags &= ~RW_CONSTRUCTING; - cls->data()->flags |= RW_CONSTRUCTED; - cls->ISA()->data()->flags |= RW_CONSTRUCTED; + cls->ISA()->changeInfo(RW_CONSTRUCTED, RW_CONSTRUCTING | RW_REALIZING); + cls->changeInfo(RW_CONSTRUCTED, RW_CONSTRUCTING | RW_REALIZING); // Add to named and realized classes addNamedClass(cls, cls->data()->ro->name); addRealizedClass(cls); addRealizedMetaclass(cls->ISA()); - addNonMetaClass(cls); rwlock_unlock_write(&runtimeLock); } +/*********************************************************************** +* objc_readClassPair() +* Read a class and metaclass as written by a compiler. +* Assumes the class and metaclass are not referenced by other things +* that might need to be fixed up (such as categories and subclasses). +* Does not call +load. +* Returns the class pointer, or nil. +* +* Locking: runtimeLock acquired by map_images +**********************************************************************/ +Class objc_readClassPair(Class bits, const struct objc_image_info *info) +{ + rwlock_write(&runtimeLock); + + // No info bits are significant yet. + (void)info; + + // Fail if the class name is in use. + // Fail if the superclass isn't kosher. + const char *name = bits->mangledName(); + bool rootOK = bits->data()->flags & RO_ROOT; + if (getClass(name) || !verifySuperclass(bits->superclass, rootOK)){ + rwlock_unlock_write(&runtimeLock); + return nil; + } + + Class cls = readClass(bits, false/*bundle*/, false/*shared cache*/); + if (cls != bits) { + // This function isn't allowed to remap anything. + _objc_fatal("objc_readClassPair for class %s changed %p to %p", + cls->nameForLogging(), bits, cls); + } + realizeClass(cls); + + rwlock_unlock_write(&runtimeLock); + + return cls; +} + + /*********************************************************************** * detach_class * Disconnect a class from other data structures. @@ -5548,9 +6432,8 @@ static void detach_class(Class cls, BOOL isMeta) // class tables and +load queue if (!isMeta) { - removeNamedClass(cls, cls->name()); + removeNamedClass(cls, cls->mangledName()); removeRealizedClass(cls); - removeNonMetaClass(cls); } else { removeRealizedMetaclass(cls); } @@ -5571,8 +6454,8 @@ static void free_class(Class cls) uint32_t i; - if (cls->cache.buckets != (bucket_t *)&_objc_empty_cache) { - free(cls->cache.buckets); + if (cls->cache.canBeFreed()) { + free(cls->cache.buckets()); } FOREACH_METHOD_LIST(mlist, cls, { @@ -5653,12 +6536,12 @@ void objc_disposeClassPair(Class cls) if (cls->data()->firstSubclass) { _objc_inform("objc_disposeClassPair: class '%s' still has subclasses, " "including '%s'!", cls->data()->ro->name, - cls->data()->firstSubclass->name()); + cls->data()->firstSubclass->nameForLogging()); } if (cls->ISA()->data()->firstSubclass) { _objc_inform("objc_disposeClassPair: class '%s' still has subclasses, " "including '%s'!", cls->data()->ro->name, - cls->ISA()->data()->firstSubclass->name()); + cls->ISA()->data()->firstSubclass->nameForLogging()); } // don't remove_class_from_loadable_list() @@ -5672,45 +6555,90 @@ void objc_disposeClassPair(Class cls) } +/*********************************************************************** +* objc_constructInstance +* Creates an instance of `cls` at the location pointed to by `bytes`. +* `bytes` must point to at least class_getInstanceSize(cls) bytes of +* well-aligned zero-filled memory. +* The new object's isa is set. Any C++ constructors are called. +* Returns `bytes` if successful. Returns nil if `cls` or `bytes` is +* nil, or if C++ constructors fail. +* Note: class_createInstance() and class_createInstances() preflight this. +**********************************************************************/ +id +objc_constructInstance(Class cls, void *bytes) +{ + if (!cls || !bytes) return nil; + + id obj = (id)bytes; + + // Read class's info bits all at once for performance + bool hasCxxCtor = cls->hasCxxCtor(); + bool hasCxxDtor = cls->hasCxxDtor(); + bool fast = cls->canAllocIndexed(); + + if (!UseGC && fast) { + obj->initInstanceIsa(cls, hasCxxDtor); + } else { + obj->initIsa(cls); + } + + if (hasCxxCtor) { + return object_cxxConstructFromClass(obj, cls); + } else { + return obj; + } +} + + /*********************************************************************** * class_createInstance * fixme * Locking: none **********************************************************************/ -static id -_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone) - __attribute__((always_inline)); -static id +static __attribute__((always_inline)) +id _class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone) { if (!cls) return nil; assert(cls->isRealized()); - size_t size = cls->alignedInstanceSize() + extraBytes; + // Read class's info bits all at once for performance + bool hasCxxCtor = cls->hasCxxCtor(); + bool hasCxxDtor = cls->hasCxxDtor(); + bool fast = cls->canAllocIndexed(); - // CF requires all object be at least 16 bytes. - if (size < 16) size = 16; + size_t size = cls->instanceSize(extraBytes); id obj; + if (!UseGC && !zone && fast) { + obj = (id)calloc(1, size); + if (!obj) return nil; + obj->initInstanceIsa(cls, hasCxxDtor); + } + else { #if SUPPORT_GC - if (UseGC) { - obj = (id)auto_zone_allocate_object(gc_zone, size, - AUTO_OBJECT_SCANNED, 0, 1); - } else + if (UseGC) { + obj = (id)auto_zone_allocate_object(gc_zone, size, + AUTO_OBJECT_SCANNED, 0, 1); + } else #endif - if (zone) { - obj = (id)malloc_zone_calloc ((malloc_zone_t *)zone, 1, size); + if (zone) { + obj = (id)malloc_zone_calloc ((malloc_zone_t *)zone, 1, size); } else { - obj = (id)calloc(1, size); - } - if (!obj) return nil; + obj = (id)calloc(1, size); + } + if (!obj) return nil; - obj->initIsa(cls); + // Use non-indexed isa on the assumption that they might be + // doing something weird with the zone or RR. + obj->initIsa(cls); + } - if (cls->hasCxxCtor()) { - obj = _objc_constructOrFree(cls, obj); + if (hasCxxCtor) { + obj = _objc_constructOrFree(obj, cls); } return obj; @@ -5723,11 +6651,15 @@ class_createInstance(Class cls, size_t extraBytes) return _class_createInstanceFromZone(cls, extraBytes, nil); } + /*********************************************************************** * class_createInstances * fixme * Locking: none **********************************************************************/ +#if SUPPORT_NONPOINTER_ISA +#warning fixme optimize class_createInstances +#endif unsigned class_createInstances(Class cls, size_t extraBytes, id *results, unsigned num_requested) @@ -5802,7 +6734,7 @@ _object_copyFromZone(id oldObj, size_t extraBytes, void *zone) if (!oldObj) return nil; if (oldObj->isTaggedPointer()) return oldObj; - size = oldObj->ISA()->alignedInstanceSize() + extraBytes; + size = oldObj->ISA()->instanceSize(extraBytes); #if SUPPORT_GC if (UseGC) { obj = (id) auto_zone_allocate_object(gc_zone, size, @@ -5822,12 +6754,10 @@ _object_copyFromZone(id oldObj, size_t extraBytes, void *zone) #if SUPPORT_GC if (UseGC) gc_fixup_weakreferences(obj, oldObj); - else if (classOrSuperClassesUseARR(obj->ISA())) - arr_fixup_copied_references(obj, oldObj); -#else + else +#endif if (classOrSuperClassesUseARR(obj->ISA())) arr_fixup_copied_references(obj, oldObj); -#endif return obj; } @@ -5885,17 +6815,15 @@ object_copyFromZone(id oldObj, size_t extraBytes, void *zone) void *objc_destructInstance(id obj) { if (obj) { - Class cls = obj->getIsa(); - // Read all of the flags at once for performance. - bool cxx = cls->hasCxxDtor(); - bool assoc = !UseGC && cls->instancesHaveAssociatedObjects(); + bool cxx = obj->hasCxxDtor(); + bool assoc = !UseGC && obj->hasAssociatedObjects(); + bool dealloc = !UseGC; // This order is important. if (cxx) object_cxxDestruct(obj); if (assoc) _object_remove_assocations(obj); - - if (!UseGC) objc_clear_deallocating(obj); + if (dealloc) obj->clearDeallocating(); } return obj; @@ -5995,10 +6923,10 @@ disableTaggedPointers() static int tagSlotForTagIndex(objc_tag_index_t tag) { -#if TAG_MASK == 1 - return (tag << 1) | 1; +#if SUPPORT_MSB_TAGGED_POINTERS + return 0x8 | tag; #else -# error unimplemented + return (tag << 1) | 1; #endif } @@ -6026,7 +6954,8 @@ _objc_registerTaggedPointerClass(objc_tag_index_t tag, Class cls) if (cls && oldCls && cls != oldCls) { _objc_fatal("tag index %u used for two different classes " "(was %p %s, now %p %s)", tag, - oldCls, class_getName(oldCls), cls, class_getName(cls)); + oldCls, oldCls->nameForLogging(), + cls, cls->nameForLogging()); } objc_tag_classes[slot] = cls; @@ -6057,6 +6986,17 @@ _objc_getClassForTag(objc_tag_index_t tag) #if SUPPORT_FIXUP +OBJC_EXTERN void objc_msgSend_fixup(void); +OBJC_EXTERN void objc_msgSendSuper2_fixup(void); +OBJC_EXTERN void objc_msgSend_stret_fixup(void); +OBJC_EXTERN void objc_msgSendSuper2_stret_fixup(void); +#if defined(__i386__) || defined(__x86_64__) +OBJC_EXTERN void objc_msgSend_fpret_fixup(void); +#endif +#if defined(__x86_64__) +OBJC_EXTERN void objc_msgSend_fp2ret_fixup(void); +#endif + OBJC_EXTERN void objc_msgSend_fixedup(void); OBJC_EXTERN void objc_msgSendSuper2_fixedup(void); OBJC_EXTERN void objc_msgSend_stret_fixedup(void); @@ -6083,7 +7023,19 @@ fixupMessageRef(message_ref_t *msg) msg->imp = (IMP)&_objc_ignored_method; } else if (msg->imp == &objc_msgSend_fixup) { - msg->imp = &objc_msgSend_fixedup; + if (msg->sel == SEL_alloc) { + msg->imp = (IMP)&objc_alloc; + } else if (msg->sel == SEL_allocWithZone) { + msg->imp = (IMP)&objc_allocWithZone; + } else if (msg->sel == SEL_retain) { + msg->imp = (IMP)&objc_retain; + } else if (msg->sel == SEL_release) { + msg->imp = (IMP)&objc_release; + } else if (msg->sel == SEL_autorelease) { + msg->imp = (IMP)&objc_autorelease; + } else { + msg->imp = &objc_msgSend_fixedup; + } } else if (msg->imp == &objc_msgSendSuper2_fixup) { msg->imp = &objc_msgSendSuper2_fixedup; @@ -6151,4 +7103,6 @@ Class class_setSuperclass(Class cls, Class newSuper) return oldSuper; } + +// __OBJC2__ #endif diff --git a/runtime/objc-runtime-old.h b/runtime/objc-runtime-old.h index ad5e99e..31e1335 100644 --- a/runtime/objc-runtime-old.h +++ b/runtime/objc-runtime-old.h @@ -120,6 +120,20 @@ struct objc_class : objc_object { return hasCxxCtor(); // one bit for both ctor and dtor } + bool hasCustomRR() { + return true; + } + void setHasCustomRR(bool = false) { } + void setHasDefaultRR() { } + void printCustomRR(bool) { } + + bool hasCustomAWZ() { + return true; + } + void setHasCustomAWZ(bool = false) { } + void setHasDefaultAWZ() { } + void printCustomAWZ(bool) { } + bool instancesHaveAssociatedObjects() { return info & CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS; } @@ -175,7 +189,9 @@ struct objc_class : objc_object { bool isConnected(); - const char *getName() { return name; } + const char *mangledName() { return name; } + const char *demangledName() { return name; } + const char *nameForLogging() { return name; } bool isMetaClass() { return info & CLS_META; @@ -197,6 +213,13 @@ struct objc_class : objc_object { return (unalignedInstanceSize() + WORD_MASK) & ~WORD_MASK; } + size_t instanceSize(size_t extraBytes) { + size_t size = alignedInstanceSize() + extraBytes; + // CF requires all objects be at least 16 bytes. + if (size < 16) size = 16; + return size; + } + }; struct old_class_ext { diff --git a/runtime/objc-runtime-old.mm b/runtime/objc-runtime-old.mm index bf5d5c5..9002bdc 100644 --- a/runtime/objc-runtime-old.mm +++ b/runtime/objc-runtime-old.mm @@ -243,7 +243,7 @@ void objc_dump_class_hash(void) count = 0; state = NXInitHashState (table); while (NXNextHashState (table, &state, (void **) &data)) - printf ("class %d: %s\n", ++count, data->getName()); + printf ("class %d: %s\n", ++count, data->nameForLogging()); } @@ -408,7 +408,7 @@ static uintptr_t classHash(void *info, Class data) return 0; // Call through to real hash function - return _objc_strhash (data->getName()); + return _objc_strhash (data->mangledName()); } /*********************************************************************** @@ -419,7 +419,7 @@ static uintptr_t classHash(void *info, Class data) static int classIsEqual(void *info, Class name, Class cls) { // Standard string comparison - return strcmp(name->getName(), cls->getName()) == 0; + return strcmp(name->mangledName(), cls->mangledName()) == 0; } @@ -532,6 +532,35 @@ Class _objc_allocateFutureClass(const char *name) } +/*********************************************************************** +* objc_getFutureClass. Return the id of the named class. +* If the class does not exist, return an uninitialized class +* structure that will be used for the class when and if it +* does get loaded. +* Not thread safe. +**********************************************************************/ +Class objc_getFutureClass(const char *name) +{ + Class cls; + + // YES unconnected, NO class handler + // (unconnected is OK because it will someday be the real class) + cls = look_up_class(name, YES, NO); + if (cls) { + if (PrintFuture) { + _objc_inform("FUTURE: found %p already in use for %s", + (void*)cls, name); + } + return cls; + } + + // No class or future class with that name yet. Make one. + // fixme not thread-safe with respect to + // simultaneous library load or getFutureClass. + return _objc_allocateFutureClass(name); +} + + /*********************************************************************** * objc_setFutureClass. * Like objc_getFutureClass, but uses the provided memory block. @@ -2111,7 +2140,7 @@ static BOOL versionIsExt(uintptr_t version, const char *names, size_t size) // the only version number used on Mac OS X was 2. // gcc (10.5 and later) uses isa field for ext pointer - if (version < PAGE_SIZE) { + if (version < 4096 /* not PAGE_SIZE */) { return NO; } diff --git a/runtime/objc-runtime.mm b/runtime/objc-runtime.mm index 04c2571..86e1c43 100644 --- a/runtime/objc-runtime.mm +++ b/runtime/objc-runtime.mm @@ -79,15 +79,22 @@ SEL SEL_autorelease = NULL; SEL SEL_retainCount = NULL; SEL SEL_alloc = NULL; SEL SEL_allocWithZone = NULL; +SEL SEL_dealloc = NULL; SEL SEL_copy = NULL; SEL SEL_new = NULL; SEL SEL_finalize = NULL; SEL SEL_forwardInvocation = NULL; +SEL SEL_tryRetain = NULL; +SEL SEL_isDeallocating = NULL; +SEL SEL_retainWeakReference = NULL; +SEL SEL_allowsWeakReference = NULL; + header_info *FirstHeader = 0; // NULL means empty list header_info *LastHeader = 0; // NULL means invalid; recompute it int HeaderCount = 0; +uint32_t AppSDKVersion = 0; /*********************************************************************** @@ -134,34 +141,6 @@ Class objc_lookUpClass(const char *aClassName) return look_up_class(aClassName, NO, NO); } -/*********************************************************************** -* objc_getFutureClass. Return the id of the named class. -* If the class does not exist, return an uninitialized class -* structure that will be used for the class when and if it -* does get loaded. -* Not thread safe. -**********************************************************************/ -Class objc_getFutureClass(const char *name) -{ - Class cls; - - // YES unconnected, NO class handler - // (unconnected is OK because it will someday be the real class) - cls = look_up_class(name, YES, NO); - if (cls) { - if (PrintFuture) { - _objc_inform("FUTURE: found %p already in use for %s", - (void*)cls, name); - } - return cls; - } - - // No class or future class with that name yet. Make one. - // fixme not thread-safe with respect to - // simultaneous library load or getFutureClass. - return _objc_allocateFutureClass(name); -} - /*********************************************************************** * objc_getMetaClass. Return the id of the meta class the named class. @@ -384,6 +363,11 @@ void _objc_pthread_destroyspecific(void *arg) _destroyInitializingClassList(data->initializingClasses); _destroySyncCache(data->syncCache); _destroyAltHandlerList(data->handlerList); + for (int i = 0; i < (int)countof(data->printableNames); i++) { + if (data->printableNames[i]) { + free(data->printableNames[i]); + } + } // add further cleanup here... @@ -415,37 +399,47 @@ void _objcInit(void) } -#if !(TARGET_OS_WIN32 || TARGET_OS_EMBEDDED || TARGET_OS_IPHONE) /*********************************************************************** -* _objc_setNilReceiver +* objc_setForwardHandler **********************************************************************/ -id _objc_setNilReceiver(id newNilReceiver) -{ - id oldNilReceiver; - oldNilReceiver = _objc_nilReceiver; - _objc_nilReceiver = newNilReceiver; +#if !__OBJC2__ + +// Default forward handler (nil) goes to forward:: dispatch. +void *_objc_forward_handler = nil; +void *_objc_forward_stret_handler = nil; - return oldNilReceiver; +#else + +// Default forward handler halts the process. +__attribute__((noreturn)) void +objc_defaultForwardHandler(id self, SEL sel) +{ + _objc_fatal("%c[%s %s]: unrecognized selector sent to instance %p " + "(no message forward handler is installed)", + class_isMetaClass(object_getClass(self)) ? '+' : '-', + object_getClassName(self), sel_getName(sel), self); } +void *_objc_forward_handler = (void*)objc_defaultForwardHandler; -/*********************************************************************** -* _objc_getNilReceiver -**********************************************************************/ -id _objc_getNilReceiver(void) +#if SUPPORT_STRET +struct stret { int i[100]; }; +__attribute__((noreturn)) struct stret +objc_defaultForwardStretHandler(id self, SEL sel) { - return _objc_nilReceiver; + objc_defaultForwardHandler(self, sel); } +void *_objc_forward_stret_handler = (void*)objc_defaultForwardStretHandler; #endif +#endif -/*********************************************************************** -* objc_setForwardHandler -**********************************************************************/ void objc_setForwardHandler(void *fwd, void *fwd_stret) { _objc_forward_handler = fwd; +#if SUPPORT_STRET _objc_forward_stret_handler = fwd_stret; +#endif } @@ -465,13 +459,13 @@ const char *class_getImageName(Class cls) if (!cls) return NULL; #if !__OBJC2__ - cls = _objc_getOrigClass(cls->getName()); + cls = _objc_getOrigClass(cls->demangledName()); #endif #if TARGET_OS_WIN32 charactersCopied = 0; szFileName = malloc(MAX_PATH * sizeof(TCHAR)); - origCls = objc_getOrigClass(cls->getName()); + origCls = objc_getOrigClass(cls->demangledName()); classModule = NULL; res = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)origCls, &classModule); if (res && classModule) { @@ -639,7 +633,7 @@ void objc_removeAssociatedObjects(id object) } else #endif { - if (object && object->getIsa()->instancesHaveAssociatedObjects()) { + if (object && object->hasAssociatedObjects()) { _object_remove_assocations(object); } } diff --git a/runtime/objc-sel-old.mm b/runtime/objc-sel-old.mm index d16bbcd..57e7b9f 100644 --- a/runtime/objc-sel-old.mm +++ b/runtime/objc-sel-old.mm @@ -35,7 +35,6 @@ #if SUPPORT_PREOPT #include -using namespace objc_opt; static const objc_selopt_t *builtins = NULL; #endif @@ -47,42 +46,6 @@ static const char *_objc_empty_selector = ""; static struct __objc_sel_set *_objc_selectors = NULL; -#if SUPPORT_PREOPT -void dump_builtins(void) -{ - uint32_t occupied = builtins->occupied; - uint32_t capacity = builtins->capacity; - - _objc_inform("BUILTIN SELECTORS: %d selectors", occupied); - _objc_inform("BUILTIN SELECTORS: %d/%d (%d%%) hash table occupancy", - occupied, capacity, (int)(occupied/(double)capacity * 100)); - _objc_inform("BUILTIN SELECTORS: using __TEXT,__objc_selopt at %p", - builtins); - _objc_inform("BUILTIN SELECTORS: capacity: %u", builtins->capacity); - _objc_inform("BUILTIN SELECTORS: occupied: %u", builtins->occupied); - _objc_inform("BUILTIN SELECTORS: shift: %u", builtins->shift); - _objc_inform("BUILTIN SELECTORS: mask: 0x%x", builtins->mask); - _objc_inform("BUILTIN SELECTORS: zero: %u", builtins->zero); - _objc_inform("BUILTIN SELECTORS: salt: 0x%llx", builtins->salt); - - const int32_t *offsets = builtins->offsets(); - uint32_t i; - for (i = 0; i < capacity; i++) { - if (offsets[i] != offsetof(objc_stringhash_t, zero)) { - const char *str = (const char *)builtins + offsets[i]; - _objc_inform("BUILTIN SELECTORS: %6d: %+8d %s", - i, offsets[i], str); - if ((const char *)sel_registerName(str) != str) { - _objc_fatal("bogus"); - } - } else { - _objc_inform("BUILTIN SELECTORS: %6d: ", i); - } - } -} -#endif - - static SEL _objc_search_builtins(const char *key) { #if defined(DUMP_SELECTORS) @@ -97,8 +60,7 @@ static SEL _objc_search_builtins(const char *key) if ('\0' == *key) return (SEL)_objc_empty_selector; #if SUPPORT_PREOPT - assert(builtins); - return (SEL)builtins->get(key); + if (builtins) return (SEL)builtins->get(key); #endif return (SEL)0; @@ -277,10 +239,18 @@ void sel_init(BOOL wantsGC, size_t selrefCount) s(retainCount); s(alloc); t(allocWithZone:, allocWithZone); + s(dealloc); s(copy); s(new); s(finalize); t(forwardInvocation:, forwardInvocation); + t(_tryRetain, tryRetain); + t(_isDeallocating, isDeallocating); + s(retainWeakReference); + s(allowsWeakReference); + + extern SEL FwdSel; + FwdSel = sel_registerNameNoLock("forward::", NO); sel_unlock(); diff --git a/runtime/objc-sel-set.h b/runtime/objc-sel-set.h index 517123e..f4540db 100644 --- a/runtime/objc-sel-set.h +++ b/runtime/objc-sel-set.h @@ -29,6 +29,8 @@ #ifndef _OBJC_SEL_SET_H_ #define _OBJC_SEL_SET_H_ +#if !__OBJC2__ + #include #include "objc-os.h" @@ -43,3 +45,5 @@ extern void __objc_sel_set_add(struct __objc_sel_set *sset, SEL value); __END_DECLS #endif + +#endif diff --git a/runtime/objc-sel-set.mm b/runtime/objc-sel-set.mm index 7be1545..93b9248 100644 --- a/runtime/objc-sel-set.mm +++ b/runtime/objc-sel-set.mm @@ -34,6 +34,9 @@ #include "objc-private.h" #include "objc-sel-set.h" +#if !__OBJC2__ + + #if !SUPPORT_MOD // mod-free power of 2 version @@ -167,3 +170,7 @@ void __objc_sel_set_add(struct __objc_sel_set *sset, SEL value) { sset->_count++; } } + + +// !__OBJC2__ +#endif diff --git a/runtime/objc-sel-table.s b/runtime/objc-sel-table.s index ce78c9c..0b1cdbb 100644 --- a/runtime/objc-sel-table.s +++ b/runtime/objc-sel-table.s @@ -7,7 +7,7 @@ __objc_opt_data: .long 0 /* table.selopt_offset */ .long 0 /* table.headeropt_offset */ .long 0 /* table.clsopt_offset */ -.space PAGE_SIZE-16 +.space PAGE_MAX_SIZE-16 /* space for selopt, smax/capacity=262144, blen/mask=65535+1 */ .space 65536 @@ -15,11 +15,11 @@ __objc_opt_data: .space 262144*4 /* offsets */ -/* space for clsopt, smax/capacity=16384, blen/mask=4095+1 */ -.space PAGE_SIZE -.space 16384 /* checkbytes */ -.space 16384*12 /* offsets to name and class and header_info */ -.space PAGE_SIZE /* some duplicate classes */ +/* space for clsopt, smax/capacity=32768, blen/mask=4095+1 */ +.space PAGE_MAX_SIZE +.space 32768 /* checkbytes */ +.space 32768*12 /* offsets to name and class and header_info */ +.space PAGE_MAX_SIZE /* some duplicate classes */ .section __DATA,__objc_opt_rw @@ -27,4 +27,4 @@ __objc_opt_data: .private_extern __objc_opt_rw_data __objc_opt_rw_data: /* space for header_info structures */ -.space 16384 +.space 32768 diff --git a/runtime/objc-sel.mm b/runtime/objc-sel.mm index 31182f5..c1c3fa5 100644 --- a/runtime/objc-sel.mm +++ b/runtime/objc-sel.mm @@ -27,8 +27,6 @@ #include "objc-cache.h" #if SUPPORT_PREOPT -#include -using namespace objc_opt; static const objc_selopt_t *builtins = NULL; #endif @@ -55,6 +53,17 @@ void sel_init(BOOL wantsGC, size_t selrefCount) #if SUPPORT_PREOPT builtins = preoptimizedSelectors(); + + if (PrintPreopt && builtins) { + uint32_t occupied = builtins->occupied; + uint32_t capacity = builtins->capacity; + + _objc_inform("PREOPTIMIZATION: using selopt at %p", builtins); + _objc_inform("PREOPTIMIZATION: %u selectors", occupied); + _objc_inform("PREOPTIMIZATION: %u/%u (%u%%) hash table occupancy", + occupied, capacity, + (unsigned)(occupied/(double)capacity*100)); + } #endif // Register selectors used by libobjc @@ -82,10 +91,15 @@ void sel_init(BOOL wantsGC, size_t selrefCount) s(retainCount); s(alloc); t(allocWithZone:, allocWithZone); + s(dealloc); s(copy); s(new); s(finalize); t(forwardInvocation:, forwardInvocation); + t(_tryRetain, tryRetain); + t(_isDeallocating, isDeallocating); + s(retainWeakReference); + s(allowsWeakReference); sel_unlock(); @@ -112,12 +126,13 @@ BOOL sel_isMapped(SEL sel) { if (!sel) return NO; - const char *name = (const char *)sel; + const char *name = (const char *)(void *)sel; if (sel == search_builtins(name)) return YES; + bool result = false; rwlock_read(&selLock); - bool result = (sel == (SEL)NXMapGet(namedSelectors, name)); + if (namedSelectors) result = (sel == (SEL)NXMapGet(namedSelectors, name)); rwlock_unlock_read(&selLock); return result; diff --git a/runtime/objc-weak.h b/runtime/objc-weak.h index 7e497ad..e40e99f 100644 --- a/runtime/objc-weak.h +++ b/runtime/objc-weak.h @@ -31,25 +31,27 @@ __BEGIN_DECLS /* The weak table is a hash table governed by a single spin lock. -An allocated blob of memory, most often an object, but under GC any such allocation, -may have its address stored in a __weak marked storage location through use of -compiler generated write-barriers or hand coded uses of the register weak primitive. -Associated with the registration can be a callback block for the case when one of - the allocated chunks of memory is reclaimed. -The table is hashed on the address of the allocated memory. When __weak marked memory - changes its reference, we count on the fact that we can still see its previous reference. - -So, in the hash table, indexed by the weakly referenced item, is a list of all locations - where this address is currently being stored. - -For ARR, we also keep track of whether an arbitrary object is being deallocated by - briefly placing it in the table just prior to invoking dealloc, and removing it - via objc_clear_deallocating just prior to memory reclamation. +An allocated blob of memory, most often an object, but under GC any such +allocation, may have its address stored in a __weak marked storage location +through use of compiler generated write-barriers or hand coded uses of the +register weak primitive. Associated with the registration can be a callback +block for the case when one of the allocated chunks of memory is reclaimed. +The table is hashed on the address of the allocated memory. When __weak +marked memory changes its reference, we count on the fact that we can still +see its previous reference. + +So, in the hash table, indexed by the weakly referenced item, is a list of +all locations where this address is currently being stored. +For ARR, we also keep track of whether an arbitrary object is being +deallocated by briefly placing it in the table just prior to invoking +dealloc, and removing it via objc_clear_deallocating just prior to memory +reclamation. + */ /// The address of a __weak object reference -typedef id * weak_referrer_t; +typedef objc_object ** weak_referrer_t; #if __LP64__ #define PTR_MINUS_1 63 @@ -65,7 +67,7 @@ typedef id * weak_referrer_t; */ #define WEAK_INLINE_COUNT 4 struct weak_entry_t { - id referent; + DisguisedPtr referent; union { struct { weak_referrer_t *referrers; @@ -98,6 +100,11 @@ id weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer); /// Removes an (object, weak pointer) pair from the weak table. void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer); +#if !NDEBUG +/// Returns true if an object is weakly referenced somewhere. +bool weak_is_registered_no_lock(weak_table_t *weak_table, id referent); +#endif + /// Assert a weak pointer is valid and retain the object during its use. id weak_read_no_lock(weak_table_t *weak_table, id *referrer); diff --git a/runtime/objc-weak.mm b/runtime/objc-weak.mm index 1241948..d7dfccc 100644 --- a/runtime/objc-weak.mm +++ b/runtime/objc-weak.mm @@ -32,7 +32,11 @@ #define TABLE_SIZE(entry) (entry->mask ? entry->mask + 1 : 0) -static void append_referrer(weak_entry_t *entry, id *new_referrer); +static void append_referrer(weak_entry_t *entry, objc_object **new_referrer); + +BREAKPOINT_FUNCTION( + void objc_weak_error(void) +); /** * Unique hash function for object pointers only. @@ -41,9 +45,8 @@ static void append_referrer(weak_entry_t *entry, id *new_referrer); * * @return Size unrestricted hash of pointer. */ -static inline uintptr_t hash_pointer(id key) { - uintptr_t k = (uintptr_t)key; - return (k >> 4) ^ (k >> 9); +static inline uintptr_t hash_pointer(objc_object *key) { + return ptr_hash((uintptr_t)key); } /** @@ -53,9 +56,8 @@ static inline uintptr_t hash_pointer(id key) { * * @return Size unrestricted hash of pointer. */ -static inline uintptr_t w_hash_pointer(id *key) { - uintptr_t k = (uintptr_t)key; - return (sizeof(size_t) == 8) ? (k >> 3) : (k >> 2); +static inline uintptr_t w_hash_pointer(objc_object **key) { + return ptr_hash((uintptr_t)key); } /** @@ -65,7 +67,8 @@ static inline uintptr_t w_hash_pointer(id *key) { * @param entry Weak pointer hash set for a particular object. */ __attribute__((noinline, used)) -static void grow_refs_and_insert(weak_entry_t *entry, id *new_referrer) +static void grow_refs_and_insert(weak_entry_t *entry, + objc_object **new_referrer) { assert(entry->out_of_line); @@ -100,7 +103,7 @@ static void grow_refs_and_insert(weak_entry_t *entry, id *new_referrer) * @param entry The entry holding the set of weak pointers. * @param new_referrer The new weak pointer to be added. */ -static void append_referrer(weak_entry_t *entry, id *new_referrer) +static void append_referrer(weak_entry_t *entry, objc_object **new_referrer) { if (! entry->out_of_line) { // Try to insert inline. @@ -149,12 +152,12 @@ static void append_referrer(weak_entry_t *entry, id *new_referrer) * Remove old_referrer from set of referrers, if it's present. * Does not remove duplicates, because duplicates should not exist. * - * @todo this is slow if old_referrer is not present. But, is this ever the case? + * @todo this is slow if old_referrer is not present. Is this ever the case? * * @param entry The entry holding the referrers. * @param old_referrer The referrer to remove. */ -static void remove_referrer(weak_entry_t *entry, id *old_referrer) +static void remove_referrer(weak_entry_t *entry, objc_object **old_referrer) { if (! entry->out_of_line) { for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) { @@ -163,8 +166,13 @@ static void remove_referrer(weak_entry_t *entry, id *old_referrer) return; } } - _objc_inform("attempted to remove unregistered weak referrer %p\n", + _objc_inform("Attempted to unregister unknown __weak variable " + "at %p. This is probably incorrect use of " + "objc_storeWeak() and objc_loadWeak(). " + "Break on objc_weak_error to debug.\n", old_referrer); + objc_weak_error(); + return; } size_t index = w_hash_pointer(old_referrer) & (entry->mask); @@ -173,8 +181,12 @@ static void remove_referrer(weak_entry_t *entry, id *old_referrer) index = (index+1) & entry->mask; hash_displacement++; if (hash_displacement > entry->max_hash_displacement) { - _objc_inform("attempted to remove unregistered weak referrer %p\n", + _objc_inform("Attempted to unregister unknown __weak variable " + "at %p. This is probably incorrect use of " + "objc_storeWeak() and objc_loadWeak(). " + "Break on objc_weak_error to debug.\n", old_referrer); + objc_weak_error(); return; } } @@ -281,7 +293,8 @@ static void weak_entry_remove(weak_table_t *weak_table, weak_entry_t *entry) * * @return The table of weak referrers to this object. */ -static weak_entry_t *weak_entry_for_referent(weak_table_t *weak_table, id referent) +static weak_entry_t * +weak_entry_for_referent(weak_table_t *weak_table, objc_object *referent) { assert(referent); @@ -317,9 +330,13 @@ static weak_entry_t *weak_entry_for_referent(weak_table_t *weak_table, id refere * @param referent The object. * @param referrer The weak reference. */ -void -weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer) +void +weak_unregister_no_lock(weak_table_t *weak_table, id referent_id, + id *referrer_id) { + objc_object *referent = (objc_object *)referent_id; + objc_object **referrer = (objc_object **)referrer_id; + weak_entry_t *entry; if (!referent) return; @@ -357,45 +374,70 @@ weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer) * @param referrer The weak pointer address. */ id -weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer) +weak_register_no_lock(weak_table_t *weak_table, id referent_id, id *referrer_id) { - if (referent && !referent->isTaggedPointer()) { - // ensure that the referenced object is viable - BOOL (*allowsWeakReference)(id, SEL) = (BOOL(*)(id, SEL)) - object_getMethodImplementation(referent, - @selector(allowsWeakReference)); - if ((IMP)allowsWeakReference != _objc_msgForward) { - if (! (*allowsWeakReference)(referent, @selector(allowsWeakReference))) { - _objc_fatal("Cannot form weak reference to instance (%p) of class %s. It is possible that this object was over-released, or is in the process of deallocation.", (void*)referent, object_getClassName(referent)); - } - } - else { + objc_object *referent = (objc_object *)referent_id; + objc_object **referrer = (objc_object **)referrer_id; + + if (!referent || referent->isTaggedPointer()) return referent_id; + + // ensure that the referenced object is viable + bool deallocating; + if (!referent->ISA()->hasCustomRR()) { + deallocating = referent->rootIsDeallocating(); + } + else { + BOOL (*allowsWeakReference)(objc_object *, SEL) = + (BOOL(*)(objc_object *, SEL)) + object_getMethodImplementation((id)referent, + SEL_allowsWeakReference); + if ((IMP)allowsWeakReference == _objc_msgForward) { return nil; } - // now remember it and where it is being stored - weak_entry_t *entry; - if ((entry = weak_entry_for_referent(weak_table, referent))) { - append_referrer(entry, referrer); - } - else { - weak_entry_t new_entry; - new_entry.referent = referent; - new_entry.out_of_line = 0; - new_entry.inline_referrers[0] = referrer; - for (size_t i = 1; i < WEAK_INLINE_COUNT; i++) { - new_entry.inline_referrers[i] = nil; - } + deallocating = + ! (*allowsWeakReference)(referent, SEL_allowsWeakReference); + } - weak_grow_maybe(weak_table); - weak_entry_insert(weak_table, &new_entry); + if (deallocating) { + _objc_fatal("Cannot form weak reference to instance (%p) of " + "class %s. It is possible that this object was " + "over-released, or is in the process of deallocation.", + (void*)referent, object_getClassName((id)referent)); + } + + // now remember it and where it is being stored + weak_entry_t *entry; + if ((entry = weak_entry_for_referent(weak_table, referent))) { + append_referrer(entry, referrer); + } + else { + weak_entry_t new_entry; + new_entry.referent = referent; + new_entry.out_of_line = 0; + new_entry.inline_referrers[0] = referrer; + for (size_t i = 1; i < WEAK_INLINE_COUNT; i++) { + new_entry.inline_referrers[i] = nil; } + + weak_grow_maybe(weak_table); + weak_entry_insert(weak_table, &new_entry); } // Do not set *referrer. objc_storeWeak() requires that the // value not change. - return referent; + return referent_id; +} + + +#if !NDEBUG +bool +weak_is_registered_no_lock(weak_table_t *weak_table, id referent_id) +{ + return weak_entry_for_referent(weak_table, (objc_object *)referent_id); } +#endif + /** * Called by dealloc; nils out all weak pointers that point to the @@ -405,8 +447,10 @@ weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer) * @param referent The object being deallocated. */ void -weak_clear_no_lock(weak_table_t *weak_table, id referent) +weak_clear_no_lock(weak_table_t *weak_table, id referent_id) { + objc_object *referent = (objc_object *)referent_id; + weak_entry_t *entry = weak_entry_for_referent(weak_table, referent); if (entry == nil) { /// XXX shouldn't happen, but does with mismatched CF/objc @@ -428,13 +472,18 @@ weak_clear_no_lock(weak_table_t *weak_table, id referent) } for (size_t i = 0; i < count; ++i) { - id *referrer = referrers[i]; + objc_object **referrer = referrers[i]; if (referrer) { if (*referrer == referent) { *referrer = nil; } else if (*referrer) { - _objc_inform("__weak variable @ %p holds %p instead of %p\n", referrer, (void*)*referrer, (void*)referent); + _objc_inform("__weak variable at %p holds %p instead of %p. " + "This is probably incorrect use of " + "objc_storeWeak() and objc_loadWeak(). " + "Break on objc_weak_error to debug.\n", + referrer, (void*)*referrer, (void*)referent); + objc_weak_error(); } } } @@ -454,29 +503,46 @@ weak_clear_no_lock(weak_table_t *weak_table, id referent) * @param weak_table * @param referrer The weak pointer address. */ +/* + Once upon a time we eagerly cleared *referrer if we saw the referent + was deallocating. This confuses code like NSPointerFunctions which + tries to pre-flight the raw storage and assumes if the storage is + zero then the weak system is done interfering. That is false: the + weak system is still going to check and clear the storage later. + This can cause objc_weak_error complaints and crashes. + So we now don't touch the storage until deallocation completes. +*/ id -weak_read_no_lock(weak_table_t *weak_table, id *referrer) +weak_read_no_lock(weak_table_t *weak_table, id *referrer_id) { - id referent = *referrer; - if (referent->isTaggedPointer()) return referent; + objc_object **referrer = (objc_object **)referrer_id; + objc_object *referent = *referrer; + if (referent->isTaggedPointer()) return (id)referent; weak_entry_t *entry; - if (referent == nil || !(entry = weak_entry_for_referent(weak_table, referent))) { - *referrer = nil; + if (referent == nil || + !(entry = weak_entry_for_referent(weak_table, referent))) + { return nil; } - BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL)) - object_getMethodImplementation(referent, - @selector(retainWeakReference)); - if ((IMP)tryRetain == _objc_msgForward) { - *referrer = nil; - return nil; + if (! referent->ISA()->hasCustomRR()) { + if (! referent->rootTryRetain()) { + return nil; + } } - if (! (*tryRetain)(referent, @selector(retainWeakReference))) { - return nil; + else { + BOOL (*tryRetain)(objc_object *, SEL) = (BOOL(*)(objc_object *, SEL)) + object_getMethodImplementation((id)referent, + SEL_retainWeakReference); + if ((IMP)tryRetain == _objc_msgForward) { + return nil; + } + if (! (*tryRetain)(referent, SEL_retainWeakReference)) { + return nil; + } } - - return referent; + + return (id)referent; } diff --git a/runtime/objc.h b/runtime/objc.h index 1dd926c..be2f29b 100644 --- a/runtime/objc.h +++ b/runtime/objc.h @@ -31,6 +31,7 @@ #include // for __DARWIN_NULL #include #include +#include #if !OBJC_TYPES_DEFINED /// An opaque type that represents an Objective-C class. @@ -58,16 +59,20 @@ typedef id (*IMP)(id, SEL, ...); #define OBJC_BOOL_DEFINED /// Type to represent a boolean value. +#if !defined(OBJC_HIDE_64) && TARGET_OS_IPHONE && __LP64__ +typedef bool BOOL; +#else typedef signed char BOOL; // BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C" // even if -funsigned-char is used. +#endif #if __has_feature(objc_bool) -#define YES __objc_yes -#define NO __objc_no +#define YES __objc_yes +#define NO __objc_no #else -#define YES ((BOOL)1) -#define NO ((BOOL)0) +#define YES ((BOOL)1) +#define NO ((BOOL)0) #endif #ifndef Nil diff --git a/runtime/runtime.h b/runtime/runtime.h index 1125ace..6001f82 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -140,6 +140,18 @@ OBJC_EXPORT Class object_getClass(id obj) OBJC_EXPORT Class object_setClass(id obj, Class cls) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); + +/** + * Returns whether an object is a class object. + * + * @param obj An Objective-C object. + * + * @return true if the object is a class or metaclass, false otherwise. + */ +OBJC_EXPORT BOOL object_isClass(id obj) + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); + + /** * Returns the class name of a given object. * @@ -517,7 +529,8 @@ OBJC_EXPORT IMP class_getMethodImplementation(Class cls, SEL name) * with an instance of the class, or \c NULL if \e cls is \c Nil. */ OBJC_EXPORT IMP class_getMethodImplementation_stret(Class cls, SEL name) - __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); + __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0) + OBJC_ARM64_UNAVAILABLE; /** * Returns a Boolean value that indicates whether instances of a class respond to a particular selector. @@ -796,8 +809,6 @@ OBJC_EXPORT id class_createInstance(Class cls, size_t extraBytes) * @return \e bytes on success, \c nil otherwise. (For example, \e cls or \e bytes * might be \c nil) * - * @note \c class_createInstance and \c class_createInstances preflight this. - * * @see class_createInstance */ OBJC_EXPORT id objc_constructInstance(Class cls, void *bytes) @@ -1163,7 +1174,7 @@ OBJC_EXPORT const char *protocol_getName(Protocol *p) * If the protocol does not contain the specified method, returns an \c objc_method_description structure * with the value \c {NULL, \c NULL}. * - * @note Methods in other protocols adopted by this protocol are not included. + * @note This function recursively searches any protocols that this protocol conforms to. */ OBJC_EXPORT struct objc_method_description protocol_getMethodDescription(Protocol *p, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); diff --git a/test/arr-weak.m b/test/arr-weak.m index 06e91da..3dab99f 100644 --- a/test/arr-weak.m +++ b/test/arr-weak.m @@ -15,26 +15,63 @@ static id weak; static id weak2; static bool did_dealloc; +static int state; + +@interface NSObject (WeakInternals) +-(BOOL)_tryRetain; +-(BOOL)_isDeallocating; +@end + @interface Test : NSObject @end @implementation Test -(void)dealloc { + // The value returned by objc_loadWeak() is now nil, + // but the storage is not yet cleared. testassert(weak == self); testassert(weak2 == self); + // objc_loadWeak() does not eagerly clear the storage. + testassert(objc_loadWeakRetained(&weak) == nil); + testassert(weak != nil); + + // dealloc clears the storage. testprintf("Weak references clear during super dealloc\n"); - testassert(weak2 != NULL); + testassert(weak2 != nil); [super dealloc]; - testassert(weak2 == NULL); + testassert(weak == nil); + testassert(weak2 == nil); did_dealloc = true; } @end +@interface CustomTryRetain : Test @end +@implementation CustomTryRetain +-(BOOL)_tryRetain { state++; return [super _tryRetain]; } +@end + +@interface CustomIsDeallocating : Test @end +@implementation CustomIsDeallocating +-(BOOL)_isDeallocating { state++; return [super _isDeallocating]; } +@end + +@interface CustomAllowsWeakReference : Test @end +@implementation CustomAllowsWeakReference +-(BOOL)allowsWeakReference { state++; return [super allowsWeakReference]; } +@end + +@interface CustomRetainWeakReference : Test @end +@implementation CustomRetainWeakReference +-(BOOL)retainWeakReference { state++; return [super retainWeakReference]; } +@end + @interface Crash : NSObject @end @implementation Crash -(void)dealloc { testassert(weak == self); testassert(weak2 == self); + testassert(objc_loadWeakRetained(&weak) == nil); + testassert(objc_loadWeakRetained(&weak2) == nil); testprintf("Weak store crashes while deallocating\n"); objc_storeWeak(&weak, self); @@ -44,50 +81,97 @@ static bool did_dealloc; @end -void cycle(Test *obj, Test *obj2) +void cycle(Class cls, Test *obj, Test *obj2) { + testprintf("Cycling class %s\n", class_getName(cls)); + id result; + // state counts calls to custom weak methods + // Difference test classes have different expected values. + int storeTarget; + int loadTarget; + if (cls == [Test class]) { + storeTarget = 0; + loadTarget = 0; + } + else if (cls == [CustomTryRetain class] || + cls == [CustomRetainWeakReference class]) + { + storeTarget = 0; + loadTarget = 1; + } + else if (cls == [CustomIsDeallocating class] || + cls == [CustomAllowsWeakReference class]) + { + storeTarget = 1; + loadTarget = 0; + } + else fail("wut"); + testprintf("Weak assignment\n"); + state = 0; result = objc_storeWeak(&weak, obj); + testassert(state == storeTarget); testassert(result == obj); testassert(weak == obj); testprintf("Weak assignment to the same value\n"); + state = 0; result = objc_storeWeak(&weak, obj); + testassert(state == storeTarget); testassert(result == obj); testassert(weak == obj); + testprintf("Weak load\n"); + state = 0; + result = objc_loadWeakRetained(&weak); + if (state != loadTarget) testprintf("state %d target %d\n", state, loadTarget); + testassert(state == loadTarget); + testassert(result == obj); + testassert(result == weak); + [result release]; + testprintf("Weak assignment to different value\n"); + state = 0; result = objc_storeWeak(&weak, obj2); + testassert(state == storeTarget); testassert(result == obj2); testassert(weak == obj2); testprintf("Weak assignment to NULL\n"); + state = 0; result = objc_storeWeak(&weak, NULL); + testassert(state == 0); testassert(result == NULL); testassert(weak == NULL); testprintf("Weak re-assignment to NULL\n"); + state = 0; result = objc_storeWeak(&weak, NULL); + testassert(state == 0); testassert(result == NULL); testassert(weak == NULL); testprintf("Weak move\n"); + state = 0; result = objc_storeWeak(&weak, obj); + testassert(state == storeTarget); testassert(result == obj); testassert(weak == obj); - weak2 = (id)(PAGE_SIZE-16); + weak2 = (id)(PAGE_MAX_SIZE-16); objc_moveWeak(&weak2, &weak); testassert(weak == nil); testassert(weak2 == obj); objc_storeWeak(&weak2, NULL); testprintf("Weak copy\n"); + state = 0; result = objc_storeWeak(&weak, obj); + testassert(state == storeTarget); testassert(result == obj); testassert(weak == obj); - weak2 = (id)(PAGE_SIZE-16); + weak2 = (id)(PAGE_MAX_SIZE-16); objc_copyWeak(&weak2, &weak); testassert(weak == obj); testassert(weak2 == obj); @@ -96,13 +180,17 @@ void cycle(Test *obj, Test *obj2) testprintf("Weak clear\n"); - id obj3 = [Test new]; + id obj3 = [cls new]; + state = 0; result = objc_storeWeak(&weak, obj3); + testassert(state == storeTarget); testassert(result == obj3); testassert(weak == obj3); + state = 0; result = objc_storeWeak(&weak2, obj3); + testassert(state == storeTarget); testassert(result == obj3); testassert(weak2 == obj3); @@ -114,19 +202,18 @@ void cycle(Test *obj, Test *obj2) } -int main() +void test_class(Class cls) { - Test *obj = [Test new]; - Test *obj2 = [Test new]; - id result; + Test *obj = [cls new]; + Test *obj2 = [cls new]; for (int i = 0; i < 100000; i++) { if (i == 10) leak_mark(); - cycle(obj, obj2); + cycle(cls, obj, obj2); } // allow some slop for [Test new] inside cycle() // to land in different side table stripes - leak_check(3072); + leak_check(8192); // rdar://14105994 @@ -137,7 +224,18 @@ int main() for (size_t i = 0; i < sizeof(weaks)/sizeof(weaks[0]); i++) { objc_storeWeak(&weaks[i], nil); } +} +int main() +{ + test_class([Test class]); + test_class([CustomTryRetain class]); + test_class([CustomIsDeallocating class]); + test_class([CustomAllowsWeakReference class]); + test_class([CustomRetainWeakReference class]); + + + id result; Crash *obj3 = [Crash new]; result = objc_storeWeak(&weak, obj3); diff --git a/test/atomicProperty.mm b/test/atomicProperty.mm index aeb06e0..ea8bade 100644 --- a/test/atomicProperty.mm +++ b/test/atomicProperty.mm @@ -24,33 +24,7 @@ public: @implementation TestAtomicProperty -#if 1 // with new enough compiler, this will be synthesized automatically. - -extern void objc_copyCppObjectAtomic(void *dest, const void *src, void (*copyHelper) (void *dest, const void *source)); - -static void copySerialNumber(void *d, const void *s) { - SerialNumber *dest = (SerialNumber *)d; - const SerialNumber *src = (const SerialNumber *)s; - dest->operator=(*src); -} - -- (SerialNumber)number { - SerialNumber result; - objc_copyCppObjectAtomic(&result, &number, copySerialNumber); - return result; -} - -- (void)setNumber:(SerialNumber)aNumber { - objc_copyCppObjectAtomic(&number, &aNumber, copySerialNumber); -} - -+(void)initialize { - testwarn("rdar://6137845 compiler should synthesize calls to objc_copyCppObjectAtomic"); -} - -#else @synthesize number; -#endif @end diff --git a/test/badCache.m b/test/badCache.m index a65b6b6..b9dfddb 100644 --- a/test/badCache.m +++ b/test/badCache.m @@ -18,7 +18,7 @@ END #include "test.h" -#if !__OBJC2__ +#if !__OBJC2__ || __arm__ int main() { diff --git a/test/badCache2.m b/test/badCache2.m index 609ef38..0532fef 100644 --- a/test/badCache2.m +++ b/test/badCache2.m @@ -18,7 +18,7 @@ END #include "test.h" -#if !__OBJC2__ +#if !__OBJC2__ || __arm__ int main() { diff --git a/test/badTagClass.m b/test/badTagClass.m index 0884c25..d7d10be 100644 --- a/test/badTagClass.m +++ b/test/badTagClass.m @@ -1,7 +1,7 @@ /* TEST_CRASHES TEST_RUN_OUTPUT -objc\[\d+\]: tag index 7 used for two different classes \(was 0x[0-9a-fA-F]+ NSObject, now 0x[0-9a-fA-F]+ Protocol\) +objc\[\d+\]: tag index 7 used for two different classes \(was 0x[0-9a-fA-F]+ NSObject, now 0x[0-9a-fA-F]+ TestRoot\) CRASHED: SIG(ILL|TRAP) OR no tagged pointers @@ -10,6 +10,7 @@ END */ #include "test.h" +#include "testroot.i" #include #include @@ -25,7 +26,7 @@ int main() _objc_registerTaggedPointerClass(OBJC_TAG_7, [NSObject class]); // colliding registration disallowed - _objc_registerTaggedPointerClass(OBJC_TAG_7, [Protocol class]); + _objc_registerTaggedPointerClass(OBJC_TAG_7, [TestRoot class]); fail(__FILE__); } diff --git a/test/bigrc.m b/test/bigrc.m new file mode 100644 index 0000000..84310b4 --- /dev/null +++ b/test/bigrc.m @@ -0,0 +1,135 @@ +// TEST_CONFIG MEM=mrc +/* +TEST_RUN_OUTPUT +objc\[\d+\]: Deallocator object 0x[0-9a-fA-F]+ overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug +OK: bigrc.m +OR +no overrelease enforcement +OK: bigrc.m +END + */ + +#include "test.h" +#include "testroot.i" + +static size_t LOTS; + +@interface Deallocator : TestRoot @end +@implementation Deallocator + +-(void)dealloc +{ + id o = self; + size_t rc = 1; + + + testprintf("Retain a lot during dealloc\n"); + + testassert(rc == 1); + testassert([o retainCount] == rc); + do { + [o retain]; + if (rc % 0x100000 == 0) testprintf("%zx/%zx ++\n", rc, LOTS); + } while (++rc < LOTS); + + testassert([o retainCount] == rc); + + do { + [o release]; + if (rc % 0x100000 == 0) testprintf("%zx/%zx --\n", rc, LOTS); + } while (--rc > 1); + + testassert(rc == 1); + testassert([o retainCount] == rc); + + + testprintf("Overrelease during dealloc\n"); + + // Not all architectures enforce this. +#if !SUPPORT_NONPOINTER_ISA + testwarn("no overrelease enforcement"); + fprintf(stderr, "no overrelease enforcement\n"); +#endif + [o release]; + + [super dealloc]; +} + +@end + + +int main() +{ + Deallocator *o = [Deallocator new]; + size_t rc = 1; + + [o retain]; + + uintptr_t isa = *(uintptr_t *)o; + if (isa & 1) { + // Assume refcount in high bits. + LOTS = 1 << (4 + __builtin_clzll(isa)); + testprintf("LOTS %zu via cntlzw\n", LOTS); + } else { + LOTS = 0x1000000; + testprintf("LOTS %zu via guess\n", LOTS); + } + + [o release]; + + + testprintf("Retain a lot\n"); + + testassert(rc == 1); + testassert([o retainCount] == rc); + do { + [o retain]; + if (rc % 0x100000 == 0) testprintf("%zx/%zx ++\n", rc, LOTS); + } while (++rc < LOTS); + + testassert([o retainCount] == rc); + + do { + [o release]; + if (rc % 0x100000 == 0) testprintf("%zx/%zx --\n", rc, LOTS); + } while (--rc > 1); + + testassert(rc == 1); + testassert([o retainCount] == rc); + + + testprintf("tryRetain a lot\n"); + + id w; + objc_storeWeak(&w, o); + testassert(w == o); + + testassert(rc == 1); + testassert([o retainCount] == rc); + do { + objc_loadWeakRetained(&w); + if (rc % 0x100000 == 0) testprintf("%zx/%zx ++\n", rc, LOTS); + } while (++rc < LOTS); + + testassert([o retainCount] == rc); + + do { + [o release]; + if (rc % 0x100000 == 0) testprintf("%zx/%zx --\n", rc, LOTS); + } while (--rc > 1); + + testassert(rc == 1); + testassert([o retainCount] == rc); + + objc_storeWeak(&w, nil); + + + testprintf("dealloc\n"); + + testassert(TestRootDealloc == 0); + [o release]; + testassert(TestRootDealloc == 1); + + + succeed(__FILE__); +} diff --git a/test/blocksAsImps.m b/test/blocksAsImps.m index 5376efb..360321a 100644 --- a/test/blocksAsImps.m +++ b/test/blocksAsImps.m @@ -13,8 +13,15 @@ #if !__clang__ // gcc and llvm-gcc will never support struct-return marking # define STRET_OK 0 +# define STRET_SPECIAL 0 +#elif __arm64__ + // stret supported, but is identical to non-stret +# define STRET_OK 1 +# define STRET_SPECIAL 0 #else + // stret supported and distinct from non-stret # define STRET_OK 1 +# define STRET_SPECIAL 1 #endif typedef struct BigStruct { @@ -71,15 +78,21 @@ void dealloc_imp(Deallocator *self, SEL _cmd) { /* Code copied from objc-block-trampolines.m to test Block innards */ typedef enum { ReturnValueInRegisterArgumentMode, +#if STRET_SPECIAL ReturnValueOnStackArgumentMode, +#endif ArgumentModeMax } ArgumentMode; static ArgumentMode _argumentModeForBlock(id block) { ArgumentMode aMode = ReturnValueInRegisterArgumentMode; +#if STRET_SPECIAL if ( _Block_use_stret((__bridge void *)block) ) aMode = ReturnValueOnStackArgumentMode; +#else + testassert(!_Block_use_stret((__bridge void *)block)); +#endif return aMode; } @@ -96,9 +109,13 @@ int main () { #if STRET_OK BigStruct (^stackReturn)() = ^() { BigStruct k; return k; }; aMode = _argumentModeForBlock(stackReturn); +# if STRET_SPECIAL testassert(aMode == ReturnValueOnStackArgumentMode); +# else + testassert(aMode == ReturnValueInRegisterArgumentMode); +# endif #endif - + #define TEST_QUANTITY 100000 static FuncPtr funcArray[TEST_QUANTITY]; diff --git a/test/cdtors.mm b/test/cdtors.mm index 597fc5a..9fc7688 100644 --- a/test/cdtors.mm +++ b/test/cdtors.mm @@ -1,5 +1,13 @@ // TEST_CONFIG +#if USE_FOUNDATION +#define SUPERCLASS NSObject +#define FILENAME "nscdtors.mm" +#else +#define SUPERCLASS TestRoot +#define FILENAME "cdtors.mm" +#endif + #include "test.h" #include @@ -41,7 +49,7 @@ class cxx2 { */ -@interface CXXBase : TestRoot { +@interface CXXBase : SUPERCLASS { cxx1 baseIvar; } @end @@ -293,5 +301,5 @@ int main() // Batch allocation, ctors fail for every object // Batch allocation, ctors fail for every other object - succeed(__FILE__); + succeed(FILENAME); } diff --git a/test/classpair.m b/test/classpair.m index 9ff124e..a4dff14 100644 --- a/test/classpair.m +++ b/test/classpair.m @@ -354,10 +354,11 @@ static void cycle(void) int main() { + int count = 1000; + cycle(); cycle(); - int count = 1000; leak_mark(); while (count--) { testonthread(^{ cycle(); }); diff --git a/test/customrr.m b/test/customrr.m index 1a1f0c2..7fa21ae 100644 --- a/test/customrr.m +++ b/test/customrr.m @@ -259,6 +259,9 @@ int main(int argc __unused, char **argv) void *dlh; +#if __x86_64__ + // vtable dispatch can introduce bypass just like the ARC entrypoints +#else testprintf("method dispatch does not bypass\n"); zero(); @@ -310,6 +313,7 @@ int main(int argc __unused, char **argv) testassert(PlusReleases == 3); [UnrealizedSubA3 autorelease]; testassert(PlusAutoreleases == 3); +#endif testprintf("objc_msgSend() does not bypass\n"); @@ -417,9 +421,6 @@ int main(int argc __unused, char **argv) testassert(SubPlusAutoreleases == 1); #if __OBJC2__ -#if 1 - testwarn("rdar://12961688 CustomRR is wrong for unrealized classes"); -#else objc_retain((Class)&OBJC_CLASS_$_UnrealizedSubC1); testassert(PlusRetains == 3); objc_release((Class)&OBJC_CLASS_$_UnrealizedSubC2); @@ -427,7 +428,6 @@ int main(int argc __unused, char **argv) objc_autorelease((Class)&OBJC_CLASS_$_UnrealizedSubC3); testassert(PlusAutoreleases == 3); #endif -#endif testprintf("unrelated addMethod does not clobber\n"); @@ -453,7 +453,7 @@ int main(int argc __unused, char **argv) objc_autorelease(obj); testassert(Autoreleases == 0); - class_addMethod(cls->isa, @selector(retain), (IMP)imp_fn, ""); + class_addMethod(object_getClass(cls), @selector(retain), (IMP)imp_fn, ""); objc_retain(obj); testassert(Retains == 0); diff --git a/test/designatedinit.m b/test/designatedinit.m new file mode 100644 index 0000000..27d6ffd --- /dev/null +++ b/test/designatedinit.m @@ -0,0 +1,27 @@ +// TEST_CONFIG +/* TEST_BUILD_OUTPUT +.*designatedinit.m:\d+:\d+: warning: designated initializer should only invoke a designated initializer on 'super'.* +.*designatedinit.m:\d+:\d+: note: .* +.*designatedinit.m:\d+:\d+: warning: designated initializer missing a 'super' call to a designated initializer of the super class.* +.*designatedinit.m:\d+:\d+: note: .* +.*designatedinit.m:\d+:\d+: warning: method override for the designated initializer of the superclass '-init' not found.* +.*NSObject.h:\d+:\d+: note: .* +END */ + +#include "test.h" +#include + +@interface C : NSObject +-(id) initWithInt:(int)i NS_DESIGNATED_INITIALIZER; +@end + +@implementation C +-(id) initWithInt:(int)__unused i { + return [self init]; +} +@end + +int main() +{ + succeed(__FILE__); +} diff --git a/test/duplicatedClasses.m b/test/duplicatedClasses.m new file mode 100644 index 0000000..30aea0b --- /dev/null +++ b/test/duplicatedClasses.m @@ -0,0 +1,21 @@ +// TEST_ENV OBJC_DEBUG_DUPLICATE_CLASSES=YES +// TEST_CRASHES +/* +TEST_RUN_OUTPUT +objc\[\d+\]: Class GKScore is implemented in both [^\s]+ and [^\s]+ One of the two will be used. Which one is undefined. +CRASHED: SIG(ILL|TRAP) +END + */ + +#include "test.h" +#include "testroot.i" + +@interface GKScore : TestRoot @end +@implementation GKScore @end + +int main() +{ + void *dl = dlopen("/System/Library/Frameworks/GameKit.framework/GameKit", RTLD_LAZY); + if (!dl) fail("couldn't open GameKit"); + fail("should have crashed already"); +} diff --git a/test/evil-category-def.m b/test/evil-category-def.m index 4f8bcb0..a4bd1dc 100644 --- a/test/evil-category-def.m +++ b/test/evil-category-def.m @@ -40,7 +40,7 @@ asm( ".long 1 \n" PTR "L_load \n" PTR "L_load \n" - PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-PAGE_SIZE) " \n" + PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-PAGE_MAX_SIZE) " \n" "L_good_methods: \n" ".long 24 \n" diff --git a/test/evil-class-def.m b/test/evil-class-def.m index 392e524..a5a5ba7 100644 --- a/test/evil-class-def.m +++ b/test/evil-class-def.m @@ -4,15 +4,19 @@ #if __LP64__ # define PTR " .quad " +# define PTRSIZE "8" +# define LOGPTRSIZE "3" #else # define PTR " .long " +# define PTRSIZE "4" +# define LOGPTRSIZE "2" #endif #define str(x) #x #define str2(x) str(x) __BEGIN_DECLS -void nop(void) { } +id nop(id self) { return self; } __END_DECLS asm( @@ -25,6 +29,34 @@ asm( PTR "__objc_empty_cache \n" PTR "0 \n" PTR "L_ro \n" + // pad to OBJC_MAX_CLASS_SIZE + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" "" "_OBJC_METACLASS_$_Super: \n" PTR "_OBJC_METACLASS_$_Super \n" @@ -32,11 +64,39 @@ asm( PTR "__objc_empty_cache \n" PTR "0 \n" PTR "L_meta_ro \n" + // pad to OBJC_MAX_CLASS_SIZE + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" "" "L_ro: \n" ".long 2 \n" ".long 0 \n" - ".long 0 \n" + ".long "PTRSIZE" \n" #if __LP64__ ".long 0 \n" #endif @@ -48,7 +108,7 @@ asm( PTR "L_good_methods \n" #endif PTR "0 \n" - PTR "0 \n" + PTR "L_super_ivars \n" PTR "0 \n" PTR "0 \n" "" @@ -80,6 +140,34 @@ asm( PTR "__objc_empty_cache \n" PTR "0 \n" PTR "L_sub_ro \n" + // pad to OBJC_MAX_CLASS_SIZE + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" "" "_OBJC_METACLASS_$_Sub: \n" PTR "_OBJC_METACLASS_$_Super \n" @@ -87,11 +175,39 @@ asm( PTR "__objc_empty_cache \n" PTR "0 \n" PTR "L_sub_meta_ro \n" + // pad to OBJC_MAX_CLASS_SIZE + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" + PTR "0 \n" "" "L_sub_ro: \n" ".long 2 \n" ".long 0 \n" - ".long 0 \n" + ".long "PTRSIZE" \n" #if __LP64__ ".long 0 \n" #endif @@ -103,7 +219,7 @@ asm( PTR "L_good_methods \n" #endif PTR "0 \n" - PTR "0 \n" + PTR "L_sub_ivars \n" PTR "0 \n" PTR "0 \n" "" @@ -127,23 +243,54 @@ asm( PTR "0 \n" "L_evil_methods: \n" - ".long 24 \n" + ".long 3*"PTRSIZE" \n" ".long 1 \n" PTR "L_load \n" PTR "L_load \n" - PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-PAGE_SIZE) " \n" + PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-PAGE_MAX_SIZE) " \n" "L_good_methods: \n" - ".long 24 \n" - ".long 1 \n" + ".long 3*"PTRSIZE" \n" + ".long 2 \n" PTR "L_load \n" PTR "L_load \n" PTR "_nop \n" + PTR "L_self \n" + PTR "L_self \n" + PTR "_nop \n" + + "L_super_ivars: \n" + ".long 4*"PTRSIZE" \n" + ".long 1 \n" + PTR "L_super_ivar_offset \n" + PTR "L_super_ivar_name \n" + PTR "L_super_ivar_type \n" + ".long "LOGPTRSIZE" \n" + ".long "PTRSIZE" \n" + + "L_sub_ivars: \n" + ".long 4*"PTRSIZE" \n" + ".long 1 \n" + PTR "L_sub_ivar_offset \n" + PTR "L_sub_ivar_name \n" + PTR "L_sub_ivar_type \n" + ".long "LOGPTRSIZE" \n" + ".long "PTRSIZE" \n" + + "L_super_ivar_offset: \n" + ".long 0 \n" + "L_sub_ivar_offset: \n" + ".long "PTRSIZE" \n" ".cstring \n" - "L_super_name: .ascii \"Super\\0\" \n" - "L_sub_name: .ascii \"Sub\\0\" \n" - "L_load: .ascii \"load\\0\" \n" + "L_super_name: .ascii \"Super\\0\" \n" + "L_sub_name: .ascii \"Sub\\0\" \n" + "L_load: .ascii \"load\\0\" \n" + "L_self: .ascii \"self\\0\" \n" + "L_super_ivar_name: .ascii \"super_ivar\\0\" \n" + "L_super_ivar_type: .ascii \"c\\0\" \n" + "L_sub_ivar_name: .ascii \"sub_ivar\\0\" \n" + "L_sub_ivar_type: .ascii \"@\\0\" \n" ".section __DATA,__objc_classlist \n" diff --git a/test/forward.m b/test/forward.m index 9f0c49e..1b87a8b 100644 --- a/test/forward.m +++ b/test/forward.m @@ -71,6 +71,11 @@ OBJC_ROOT_CLASS long long forward_handler(id self, SEL _cmd, long i1, long i2, long i3, long i4, long i5, long i6, long i7, long i8, long i9, long i10, long i11, long i12, long i13, double f1, double f2, double f3, double f4, double f5, double f6, double f7, double f8, double f9, double f10, double f11, double f12, double f13, double f14, double f15) { +#if __arm64__ + void *struct_addr; + __asm__ volatile("mov %0, x8" : "=r" (struct_addr) : : "x8"); +#endif + testassert(self == receiver); testassert(i1 == 1); @@ -141,6 +146,8 @@ long long forward_handler(id self, SEL _cmd, long i1, long i2, long i3, long i4, } result; result.fpval = FP_RESULT; return result.llval; +#elif defined(__arm64__) + __asm__ volatile("ldr d0, %0" : : "m" (FP_RESULT)); #else # error unknown architecture #endif @@ -150,7 +157,16 @@ long long forward_handler(id self, SEL _cmd, long i1, long i2, long i3, long i4, _cmd == @selector(stre2::::::::::::::::::::::::::::) || _cmd == @selector(stre3::::::::::::::::::::::::::::)) { +#if __i386__ || __x86_64__ || __arm__ fail("stret message sent to non-stret forward_handler"); +#elif __arm64__ + testassert(state == 17); + state = 18; + memcpy(struct_addr, &STRET_RESULT, sizeof(STRET_RESULT)); + return 0; +#else +# error unknown architecture +#endif } else { fail("unknown selector %s in forward_handler", sel_getName(_cmd)); @@ -223,6 +239,9 @@ struct stret forward_stret_handler(id self, SEL _cmd, long i1, long i2, long i3, +(void)initialize { } +(id)class { return self; } +#if __OBJC2__ +// forward:: not supported +#else -(long long) forward:(SEL)sel :(marg_list)args { char *p; @@ -378,6 +397,8 @@ struct stret forward_stret_handler(id self, SEL _cmd, long i1, long i2, long i3, return 0; } +#endif + @end typedef id (*id_fn_t)(id self, SEL _cmd, long i1, long i2, long i3, long i4, long i5, long i6, long i7, long i8, long i9, long i10, long i11, long i12, long i13, double f1, double f2, double f3, double f4, double f5, double f6, double f7, double f8, double f9, double f10, double f11, double f12, double f13, double f14, double f15); @@ -402,6 +423,8 @@ __END_DECLS asm(".text \n _getSP: movl %esp, %eax \n ret \n"); #elif defined(__arm__) asm(".text \n _getSP: mov r0, sp \n bx lr \n"); +#elif defined(__arm64__) + asm(".text \n _getSP: mov x0, sp \n ret \n"); #else # error unknown architecture #endif @@ -418,8 +441,18 @@ int main() void *sp1 = (void*)1; void *sp2 = (void*)2; + st_fn_t stret_fwd; +#if __arm64__ + stret_fwd = (st_fn_t)_objc_msgForward; +#else + stret_fwd = (st_fn_t)_objc_msgForward_stret; +#endif + receiver = [Super class]; +#if __OBJC2__ + // forward:: not supported +#else // Test default forward handler state = 1; @@ -591,7 +624,7 @@ int main() state = 7; sp1 = getSP(); - stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); + stval = stret_fwd(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); sp2 = getSP(); testassert(sp1 == sp2); testassert(state == 8); @@ -638,7 +671,7 @@ int main() state = 7; sp1 = getSP(); - stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); + stval = stret_fwd(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); sp2 = getSP(); testassert(sp1 == sp2); testassert(state == 8); @@ -687,7 +720,7 @@ int main() state = 7; sp1 = getSP(); - stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); + stval = stret_fwd(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); sp2 = getSP(); testassert(sp1 == sp2); testassert(state == 8); @@ -705,6 +738,9 @@ int main() testassert(stptr == &stval); #endif +// !__OBJC2__ +#endif + // Test user-defined forward handler @@ -880,7 +916,7 @@ int main() state = 17; sp1 = getSP(); - stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); + stval = stret_fwd(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); sp2 = getSP(); testassert(sp1 == sp2); testassert(state == 18); @@ -915,7 +951,7 @@ int main() state = 17; sp1 = getSP(); - stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); + stval = stret_fwd(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); sp2 = getSP(); testassert(sp1 == sp2); testassert(state == 18); @@ -952,7 +988,7 @@ int main() state = 17; sp1 = getSP(); - stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); + stval = stret_fwd(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0); sp2 = getSP(); testassert(sp1 == sp2); testassert(state == 18); diff --git a/test/forwardDefault.m b/test/forwardDefault.m new file mode 100644 index 0000000..2d8b968 --- /dev/null +++ b/test/forwardDefault.m @@ -0,0 +1,31 @@ +/* +no arc, rdar://11368528 confused by Foundation +TEST_CONFIG MEM=mrc,gc +TEST_CRASHES +TEST_RUN_OUTPUT +objc\[\d+\]: \+\[NSObject fakeorama\]: unrecognized selector sent to instance 0x[0-9a-fA-F]+ \(no message forward handler is installed\) +CRASHED: SIG(ILL|TRAP) +OR +not OBJC2 +objc\[\d+\]: NSObject: Does not recognize selector forward:: \(while forwarding fakeorama\) +CRASHED: SIG(ILL|TRAP) +END +*/ + +#include "test.h" + +#include + +@interface NSObject (Fake) +-(void)fakeorama; +@end + +int main() +{ +#if !__OBJC2__ + fprintf(stderr, "not OBJC2\n"); +#endif + [NSObject fakeorama]; + fail("should have crashed"); +} + diff --git a/test/forwardDefaultStret.m b/test/forwardDefaultStret.m new file mode 100644 index 0000000..6c6229d --- /dev/null +++ b/test/forwardDefaultStret.m @@ -0,0 +1,31 @@ +/* +no arc, rdar://11368528 confused by Foundation +TEST_CONFIG MEM=mrc,gc +TEST_CRASHES +TEST_RUN_OUTPUT +objc\[\d+\]: \+\[NSObject fakeorama\]: unrecognized selector sent to instance 0x[0-9a-fA-F]+ \(no message forward handler is installed\) +CRASHED: SIG(ILL|TRAP) +OR +not OBJC2 +objc\[\d+\]: NSObject: Does not recognize selector forward:: \(while forwarding fakeorama\) +CRASHED: SIG(ILL|TRAP) +END +*/ + +#include "test.h" + +#include + +@interface NSObject (Fake) +-(struct stret)fakeorama; +@end + +int main() +{ +#if !__OBJC2__ + fprintf(stderr, "not OBJC2\n"); +#endif + [NSObject fakeorama]; + fail("should have crashed"); +} + diff --git a/test/getMethod.m b/test/getMethod.m index 7568330..84408a8 100644 --- a/test/getMethod.m +++ b/test/getMethod.m @@ -114,8 +114,10 @@ int main() testassert(class_getMethodImplementation(Sub_cls, sel) == (IMP)&_objc_msgForward); buf[0] = Sub_cls; testassert(object_getMethodImplementation(objc_unretainedObject(buf), sel) == (IMP)&_objc_msgForward); +#if !__arm64__ testassert(class_getMethodImplementation_stret(Sub_cls, sel) == (IMP)&_objc_msgForward_stret); testassert(object_getMethodImplementation_stret(objc_unretainedObject(buf), sel) == (IMP)&_objc_msgForward_stret); +#endif testassert(! class_getInstanceMethod(NULL, NULL)); testassert(! class_getInstanceMethod(NULL, sel)); diff --git a/test/includes.c b/test/includes.c new file mode 100644 index 0000000..01c1686 --- /dev/null +++ b/test/includes.c @@ -0,0 +1,38 @@ +// TEST_CONFIG + +// Verify that all headers can be included in any language. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if !TARGET_OS_IPHONE +#include +#include +#include +#endif + +#include "test.h" + +int main() +{ + succeed(__FILE__); +} diff --git a/test/load-parallel.m b/test/load-parallel.m index 54b0f6b..f050ea5 100644 --- a/test/load-parallel.m +++ b/test/load-parallel.m @@ -47,10 +47,6 @@ void *thread(void *arg) int main() { -#if TARGET_IPHONE_SIMULATOR - testwarn("simulator hangs calling dlopen() from +load"); - succeed(__FILE__); -#else pthread_t t[COUNT]; uintptr_t i; @@ -66,5 +62,4 @@ int main() testassert(state == COUNT*26); succeed(__FILE__); -#endif } diff --git a/test/method_getName.m b/test/method_getName.m index 45f6581..f3f7413 100644 --- a/test/method_getName.m +++ b/test/method_getName.m @@ -3,6 +3,8 @@ #include "test.h" #include #include + +#undef SUPPORT_NONPOINTER_ISA // remove test.h's definition #include "../runtime/objc-config.h" int main() { diff --git a/test/msgSend.m b/test/msgSend.m index fd71539..833f53f 100644 --- a/test/msgSend.m +++ b/test/msgSend.m @@ -18,6 +18,19 @@ int main() #include #include +#if __arm64__ + // no stret dispatchers +# define SUPPORT_STRET 0 +# define objc_msgSend_stret objc_msgSend +# define objc_msgSendSuper2_stret objc_msgSendSuper2 +# define objc_msgSend_stret_debug objc_msgSend_debug +# define objc_msgSendSuper2_stret_debug objc_msgSendSuper2_debug +# define method_invoke_stret method_invoke +#else +# define SUPPORT_STRET 1 +#endif + + #if defined(__arm__) // rdar://8331406 # define ALIGN_() @@ -90,6 +103,130 @@ long double LFP_RESULT = __LDBL_MIN__ + __LDBL_EPSILON__; static struct stret zero; +struct stret_i1 { + uintptr_t i1; +}; +struct stret_i2 { + uintptr_t i1; + uintptr_t i2; +}; +struct stret_i3 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; +}; +struct stret_i4 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; +}; +struct stret_i5 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; + uintptr_t i4; + uintptr_t i5; +}; +struct stret_i6 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; + uintptr_t i4; + uintptr_t i5; + uintptr_t i6; +}; +struct stret_i7 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; + uintptr_t i4; + uintptr_t i5; + uintptr_t i6; + uintptr_t i7; +}; +struct stret_i8 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; + uintptr_t i4; + uintptr_t i5; + uintptr_t i8; + uintptr_t i9; +}; +struct stret_i9 { + uintptr_t i1; + uintptr_t i2; + uintptr_t i3; + uintptr_t i4; + uintptr_t i5; + uintptr_t i6; + uintptr_t i7; + uintptr_t i8; + uintptr_t i9; +}; + +struct stret_d1 { + double d1; +}; +struct stret_d2 { + double d1; + double d2; +}; +struct stret_d3 { + double d1; + double d2; + double d3; +}; +struct stret_d4 { + double d1; + double d2; + double d3; +}; +struct stret_d5 { + double d1; + double d2; + double d3; + double d4; + double d5; +}; +struct stret_d6 { + double d1; + double d2; + double d3; + double d4; + double d5; + double d6; +}; +struct stret_d7 { + double d1; + double d2; + double d3; + double d4; + double d5; + double d6; + double d7; +}; +struct stret_d8 { + double d1; + double d2; + double d3; + double d4; + double d5; + double d8; + double d9; +}; +struct stret_d9 { + double d1; + double d2; + double d3; + double d4; + double d5; + double d6; + double d7; + double d8; + double d9; +}; + @implementation Super -(struct stret)stret { return STRET_RESULT; } @@ -176,6 +313,11 @@ static struct stret zero; return; } +-(void)voidret_nop2 +{ + return; +} + -(id)idret_nop { return ID_RESULT; @@ -201,6 +343,39 @@ static struct stret zero; return LFP_RESULT; } +#define STRET_IMP(n) \ ++(struct stret_##n)stret_##n##_zero \ +{ \ + struct stret_##n ret; \ + bzero(&ret, sizeof(ret)); \ + return ret; \ +} \ ++(struct stret_##n)stret_##n##_nonzero \ +{ \ + struct stret_##n ret; \ + memset(&ret, 0xff, sizeof(ret)); \ + return ret; \ +} + +STRET_IMP(i1) +STRET_IMP(i2) +STRET_IMP(i3) +STRET_IMP(i4) +STRET_IMP(i5) +STRET_IMP(i6) +STRET_IMP(i7) +STRET_IMP(i8) +STRET_IMP(i9) + +STRET_IMP(d1) +STRET_IMP(d2) +STRET_IMP(d3) +STRET_IMP(d4) +STRET_IMP(d5) +STRET_IMP(d6) +STRET_IMP(d7) +STRET_IMP(d8) +STRET_IMP(d9) +(id)idret: @@ -415,8 +590,33 @@ static struct stret zero; @end +#endif + + // DWARF checking machinery +#if TARGET_OS_WIN32 +// unimplemented on this platform +#elif !__OBJC2__ +// 32-bit Mac doesn't use DWARF unwind +#elif TARGET_OS_IPHONE && __arm__ +// 32-bit iOS device doesn't use DWARF unwind +#elif __has_feature(objc_arc) +// ARC's extra RR calls hit the traps at the wrong times +#else + +#define TEST_DWARF 1 + +// Classes with no implementations and no cache contents from elsewhere. +@interface SuperDW : TestRoot @end +@implementation SuperDW @end + +@interface Sub0DW : SuperDW @end +@implementation Sub0DW @end + +@interface SubDW : Sub0DW @end +@implementation SubDW @end + #include #include #include @@ -428,6 +628,18 @@ static struct stret zero; bool caught = false; uintptr_t clobbered; +__BEGIN_DECLS +extern void callit(void *obj, void *sel, void *fn); +extern struct stret callit_stret(void *obj, void *sel, void *fn); +__END_DECLS + +#if __x86_64__ + +#define OTOOL "/usr/bin/xcrun otool -arch x86_64 " + +typedef uint8_t insn_t; +#define BREAK_INSN ((insn_t)0xcc) // int3 + uintptr_t r12 = 0; uintptr_t r13 = 0; uintptr_t r14 = 0; @@ -511,39 +723,11 @@ void sigtrap(int sig, siginfo_t *info, void *cc) // handle_exception changed register state for continuation } - -uint8_t set(uintptr_t dst, uint8_t newvalue) -{ - uintptr_t start = dst & ~(PAGE_SIZE-1); - mprotect((void*)start, PAGE_SIZE, PROT_READ|PROT_WRITE); - // int3 - uint8_t oldvalue = *(uint8_t *)dst; - *(uint8_t *)dst = newvalue; - mprotect((void*)start, PAGE_SIZE, PROT_READ|PROT_EXEC); - return oldvalue; -} - -uint8_t clobber(void *fn, uintptr_t offset) -{ - clobbered = (uintptr_t)fn + offset; - return set((uintptr_t)fn + offset, 0xcc /*int3*/); -} - -void unclobber(void *fn, uintptr_t offset, uint8_t oldvalue) -{ - set((uintptr_t)fn + offset, oldvalue); -} - -__BEGIN_DECLS -extern void callit(void *obj, void *sel, void *fn); -extern struct stret callit_stret(void *obj, void *sel, void *fn); -__END_DECLS - __asm__( "\n .text" "\n .globl _callit" "\n _callit:" -// save rsp and rip registers to variables +// save sp and return address to variables "\n movq (%rsp), %r10" "\n movq %r10, _rip(%rip)" "\n movq %rsp, _rsp(%rip)" @@ -562,7 +746,7 @@ __asm__( "\n .text" "\n .globl _callit_stret" "\n _callit_stret:" -// save rsp and rip registers to variables +// save sp and return address to variables "\n movq (%rsp), %r10" "\n movq %r10, _rip(%rip)" "\n movq %rsp, _rsp(%rip)" @@ -577,10 +761,330 @@ __asm__( "\n jmpq *%rcx" ); -uintptr_t *getOffsets(void *symbol, const char *symname) + +// x86_64 + +#elif __i386__ + +#define OTOOL "/usr/bin/xcrun otool -arch i386 " + +typedef uint8_t insn_t; +#define BREAK_INSN ((insn_t)0xcc) // int3 + +uintptr_t eip = 0; +uintptr_t esp = 0; +uintptr_t ebx = 0; +uintptr_t ebp = 0; +uintptr_t edi = 0; +uintptr_t esi = 0; +uintptr_t espfix = 0; + +void handle_exception(i386_thread_state_t *state) +{ + unw_cursor_t curs; + unw_word_t reg; + int err; + int step; + + err = unw_init_local(&curs, (unw_context_t *)state); + testassert(!err); + + step = unw_step(&curs); + testassert(step == UNW_STEP_SUCCESS); + + err = unw_get_reg(&curs, UNW_REG_IP, ®); + testassert(!err); + testassert(reg == eip); + + err = unw_get_reg(&curs, UNW_X86_ESP, ®); + testassert(!err); + testassert(reg == esp); + + err = unw_get_reg(&curs, UNW_X86_EBX, ®); + testassert(!err); + testassert(reg == ebx); + + err = unw_get_reg(&curs, UNW_X86_EBP, ®); + testassert(!err); + testassert(reg == ebp); + + err = unw_get_reg(&curs, UNW_X86_EDI, ®); + testassert(!err); + testassert(reg == edi); + + err = unw_get_reg(&curs, UNW_X86_ESI, ®); + testassert(!err); + testassert(reg == esi); + + + // set thread state to unwound state + state->__eip = eip; + state->__esp = esp + espfix; + state->__ebx = ebx; + state->__ebp = ebp; + state->__edi = edi; + state->__esi = esi; + + caught = true; +} + + +void sigtrap(int sig, siginfo_t *info, void *cc) +{ + ucontext_t *uc = (ucontext_t *)cc; + mcontext_t mc = (mcontext_t)uc->uc_mcontext; + + testprintf(" handled\n"); + + testassert(sig == SIGTRAP); + testassert((uintptr_t)info->si_addr-1 == clobbered); + + handle_exception(&mc->__ss); + // handle_exception changed register state for continuation +} + +__asm__( +"\n .text" +"\n .globl _callit" +"\n _callit:" +// save sp and return address to variables +"\n call 1f" +"\n 1: popl %edx" +"\n movl (%esp), %eax" +"\n movl %eax, _eip-1b(%edx)" +"\n movl %esp, _esp-1b(%edx)" +"\n addl $4, _esp-1b(%edx)" // rewind to pre-call value +"\n movl $0, _espfix-1b(%edx)" +// save other non-volatile registers to variables +"\n movl %ebx, _ebx-1b(%edx)" +"\n movl %ebp, _ebp-1b(%edx)" +"\n movl %edi, _edi-1b(%edx)" +"\n movl %esi, _esi-1b(%edx)" +"\n jmpl *12(%esp)" + ); + +__asm__( +"\n .text" +"\n .globl _callit_stret" +"\n _callit_stret:" +// save sp and return address to variables +"\n call 1f" +"\n 1: popl %edx" +"\n movl (%esp), %eax" +"\n movl %eax, _eip-1b(%edx)" +"\n movl %esp, _esp-1b(%edx)" +"\n addl $4, _esp-1b(%edx)" // rewind to pre-call value +"\n movl $4, _espfix-1b(%edx)" +// save other non-volatile registers to variables +"\n movl %ebx, _ebx-1b(%edx)" +"\n movl %ebp, _ebp-1b(%edx)" +"\n movl %edi, _edi-1b(%edx)" +"\n movl %esi, _esi-1b(%edx)" +"\n jmpl *16(%esp)" + ); + + +// i386 +#elif __arm64__ + +#include + +// runs on iOS device, no xcrun command present +#define OTOOL "/usr/bin/otool -arch arm64 " + +typedef uint32_t insn_t; +#define BREAK_INSN ((insn_t)0xd4200020) // brk #1 + +uintptr_t x19 = 0; +uintptr_t x20 = 0; +uintptr_t x21 = 0; +uintptr_t x22 = 0; +uintptr_t x23 = 0; +uintptr_t x24 = 0; +uintptr_t x25 = 0; +uintptr_t x26 = 0; +uintptr_t x27 = 0; +uintptr_t x28 = 0; +uintptr_t fp = 0; +uintptr_t sp = 0; +uintptr_t pc = 0; + +void handle_exception(arm_thread_state64_t *state) +{ + unw_cursor_t curs; + unw_word_t reg; + int err; + int step; + + err = unw_init_local(&curs, (unw_context_t *)state); + testassert(!err); + + step = unw_step(&curs); + testassert(step == UNW_STEP_SUCCESS); + + err = unw_get_reg(&curs, UNW_ARM64_X19, ®); + testassert(!err); + testassert(reg == x19); + + err = unw_get_reg(&curs, UNW_ARM64_X20, ®); + testassert(!err); + testassert(reg == x20); + + err = unw_get_reg(&curs, UNW_ARM64_X21, ®); + testassert(!err); + testassert(reg == x21); + + err = unw_get_reg(&curs, UNW_ARM64_X22, ®); + testassert(!err); + testassert(reg == x22); + + err = unw_get_reg(&curs, UNW_ARM64_X23, ®); + testassert(!err); + testassert(reg == x23); + + err = unw_get_reg(&curs, UNW_ARM64_X24, ®); + testassert(!err); + testassert(reg == x24); + + err = unw_get_reg(&curs, UNW_ARM64_X25, ®); + testassert(!err); + testassert(reg == x25); + + err = unw_get_reg(&curs, UNW_ARM64_X26, ®); + testassert(!err); + testassert(reg == x26); + + err = unw_get_reg(&curs, UNW_ARM64_X27, ®); + testassert(!err); + testassert(reg == x27); + + err = unw_get_reg(&curs, UNW_ARM64_X28, ®); + testassert(!err); + testassert(reg == x28); + + err = unw_get_reg(&curs, UNW_ARM64_FP, ®); + testassert(!err); + testassert(reg == fp); + + err = unw_get_reg(&curs, UNW_ARM64_SP, ®); + testassert(!err); + testassert(reg == sp); + + err = unw_get_reg(&curs, UNW_REG_IP, ®); + testassert(!err); + testassert(reg == pc); + + // libunwind restores PC into LR and doesn't track LR + // err = unw_get_reg(&curs, UNW_ARM64_LR, ®); + // testassert(!err); + // testassert(reg == lr); + + // set thread state to unwound state + state->__x[19] = x19; + state->__x[20] = x20; + state->__x[20] = x21; + state->__x[22] = x22; + state->__x[23] = x23; + state->__x[24] = x24; + state->__x[25] = x25; + state->__x[26] = x26; + state->__x[27] = x27; + state->__x[28] = x28; + state->__fp = fp; + state->__lr = pc; // libunwind restores PC into LR + state->__sp = sp; + state->__pc = pc; + + caught = true; +} + + +void sigtrap(int sig, siginfo_t *info, void *cc) +{ + ucontext_t *uc = (ucontext_t *)cc; + struct __darwin_mcontext64 *mc = (struct __darwin_mcontext64 *)uc->uc_mcontext; + + testprintf(" handled\n"); + + testassert(sig == SIGTRAP); + testassert((uintptr_t)info->si_addr == clobbered); + + handle_exception(&mc->__ss); + // handle_exception changed register state for continuation +} + + +__asm__( +"\n .text" +"\n .globl _callit" +"\n _callit:" +// save sp and return address to variables +"\n mov x16, sp" +"\n adrp x17, _sp@PAGE" +"\n str x16, [x17, _sp@PAGEOFF]" +"\n adrp x17, _pc@PAGE" +"\n str lr, [x17, _pc@PAGEOFF]" +// save other non-volatile registers to variables +"\n adrp x17, _x19@PAGE" +"\n str x19, [x17, _x19@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x20, [x17, _x20@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x21, [x17, _x21@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x22, [x17, _x22@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x23, [x17, _x23@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x24, [x17, _x24@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x25, [x17, _x25@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x26, [x17, _x26@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x27, [x17, _x27@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str x28, [x17, _x28@PAGEOFF]" +"\n adrp x17, _x19@PAGE" +"\n str fp, [x17, _fp@PAGEOFF]" +"\n br x2" + ); + + +// arm64 +#else + +#error unknown architecture + +#endif + + +insn_t set(uintptr_t dst, insn_t newvalue) { - uintptr_t *result = (uintptr_t *)malloc(PAGE_SIZE * sizeof(uintptr_t)); - uintptr_t *end = result + PAGE_SIZE; + uintptr_t start = dst & ~(PAGE_MAX_SIZE-1); + mprotect((void*)start, PAGE_MAX_SIZE, PROT_READ|PROT_WRITE); + insn_t oldvalue = *(insn_t *)dst; + *(insn_t *)dst = newvalue; + mprotect((void*)start, PAGE_MAX_SIZE, PROT_READ|PROT_EXEC); + return oldvalue; +} + +insn_t clobber(void *fn, uintptr_t offset) +{ + clobbered = (uintptr_t)fn + offset; + return set((uintptr_t)fn + offset, BREAK_INSN); +} + +void unclobber(void *fn, uintptr_t offset, insn_t oldvalue) +{ + set((uintptr_t)fn + offset, oldvalue); +} + + +uintptr_t *getOffsets(void *symbol, const char *symname, uintptr_t *outBase) +{ + uintptr_t *result = (uintptr_t *)malloc(1000 * sizeof(uintptr_t)); + uintptr_t *end = result + 1000; uintptr_t *p = result; // find library @@ -591,8 +1095,11 @@ uintptr_t *getOffsets(void *symbol, const char *symname) unsetenv("DYLD_LIBRARY_PATH"); unsetenv("DYLD_ROOT_PATH"); unsetenv("DYLD_INSERT_LIBRARIES"); + unsetenv("DYLD_SHARED_REGION"); + unsetenv("DYLD_SHARED_CACHE_DIR"); + unsetenv("DYLD_SHARED_CACHE_DONT_VALIDATE"); char *cmd; - asprintf(&cmd, "/usr/bin/xcrun otool -arch x86_64 -tv -p _%s %s", + asprintf(&cmd, OTOOL "-tv -p _%s %s", symname, dl.dli_fname); testprintf("%s\n", cmd); FILE *disa = popen(cmd, "r"); @@ -621,17 +1128,39 @@ uintptr_t *getOffsets(void *symbol, const char *symname) } pclose(disa); +#if __arm64__ + // Also add breakpoints in _objc_msgSend_uncached_impcache + // (which is the slow path and has a frame to unwind) + if (0 != strcmp(symname, "_objc_msgSend_uncached_impcache")) { + uintptr_t base2; + uintptr_t *more_offsets = getOffsets(symbol, "_objc_msgSend_uncached_impcache", &base2); + uintptr_t *q = more_offsets; + // Skip prologue because it's imprecisely modeled in compact unwind + testassert(*q != ~0UL); + q++; + testassert(*q != ~0UL); + q++; + while (*q != ~0UL) *p++ = *q++ + base2 - base; + // Skip return because it's imprecisely modeled in compact unwind + p--; + free(more_offsets); + } +#endif + testassert(p > result); testassert(p < end); *p = ~0UL; +#if __x86_64__ // hack: skip last instruction because libunwind blows up if it's // one byte long and followed by the next function with no NOPs first - if (p > result) p[-1] = ~0UL; + p[-1] = ~0UL; +#endif + if (outBase) *outBase = base; return result; } -void CALLIT(void *o, void *sel_arg, SEL s, void *f) __attribute__((noinline)); -void CALLIT(void *o, void *sel_arg, SEL s, void *f) +void CALLIT(void *o, void *sel_arg, SEL s, void *f, bool stret) __attribute__((noinline)); +void CALLIT(void *o, void *sel_arg, SEL s, void *f, bool stret) { uintptr_t message_ref[2]; if (sel_arg != s) { @@ -640,10 +1169,22 @@ void CALLIT(void *o, void *sel_arg, SEL s, void *f) memcpy(message_ref, sel_arg, sizeof(message_ref)); sel_arg = message_ref; } - if (s == @selector(idret_nop)) callit(o, sel_arg, f); - else if (s == @selector(fpret_nop)) callit(o, sel_arg, f); - else if (s == @selector(stret_nop)) callit_stret(o, sel_arg, f); - else fail("test_dw selector"); + if (!stret) callit(o, sel_arg, f); +#if SUPPORT_STRET + else callit_stret(o, sel_arg, f); +#else + else fail("stret?"); +#endif +} + +void test_dw_forward(void) +{ + return; +} + +struct stret test_dw_forward_stret(void) +{ + return zero; } // sub = ordinary receiver object @@ -652,9 +1193,60 @@ void CALLIT(void *o, void *sel_arg, SEL s, void *f) // sub_arg = arg to pass in receiver register (may be objc_super struct) // tagged_arg = arg to pass in receiver register (may be objc_super struct) // sel_arg = arg to pass in sel register (may be message_ref) -void test_dw(const char *name, id sub, id tagged, SEL sel) +// uncaughtAllowed is the number of acceptable unreachable instructions +// (for example, the ones that handle the corrupt-cache-error case) +void test_dw(const char *name, id sub, id tagged, bool stret, + int uncaughtAllowed) { - testprintf("DWARF FOR %s\n", name); + SEL sel = @selector(a); + + testprintf("DWARF FOR %s%s\n", name, stret ? " (stret)" : ""); + + // We need 2 SELs of each alignment so we can generate hash collisions. + // sel_registerName() never returns those alignments because they + // differ from malloc's alignment. So we create lots of compiled-in + // SELs here and hope something fits. + SEL lotsOfSels[] = { + @selector(a1), @selector(a2), @selector(a3), @selector(a4), + @selector(a5), @selector(a6), @selector(a7), @selector(a8), + @selector(aa), @selector(ab), @selector(ac), @selector(ad), + @selector(ae), @selector(af), @selector(ag), @selector(ah), + @selector(A1), @selector(A2), @selector(A3), @selector(A4), + @selector(A5), @selector(A6), @selector(A7), @selector(A8), + @selector(AA), @selector(Ab), @selector(Ac), @selector(Ad), + @selector(Ae), @selector(Af), @selector(Ag), @selector(Ah), + @selector(bb1), @selector(bb2), @selector(bb3), @selector(bb4), + @selector(bb5), @selector(bb6), @selector(bb7), @selector(bb8), + @selector(bba), @selector(bbb), @selector(bbc), @selector(bbd), + @selector(bbe), @selector(bbf), @selector(bbg), @selector(bbh), + @selector(BB1), @selector(BB2), @selector(BB3), @selector(BB4), + @selector(BB5), @selector(BB6), @selector(BB7), @selector(BB8), + @selector(BBa), @selector(BBb), @selector(BBc), @selector(BBd), + @selector(BBe), @selector(BBf), @selector(BBg), @selector(BBh), + @selector(ccc1), @selector(ccc2), @selector(ccc3), @selector(ccc4), + @selector(ccc5), @selector(ccc6), @selector(ccc7), @selector(ccc8), + @selector(ccca), @selector(cccb), @selector(cccc), @selector(cccd), + @selector(ccce), @selector(cccf), @selector(cccg), @selector(ccch), + @selector(CCC1), @selector(CCC2), @selector(CCC3), @selector(CCC4), + @selector(CCC5), @selector(CCC6), @selector(CCC7), @selector(CCC8), + @selector(CCCa), @selector(CCCb), @selector(CCCc), @selector(CCCd), + @selector(CCCe), @selector(CCCf), @selector(CCCg), @selector(CCCh), + }; + #define ALIGNCOUNT 16 + SEL sels[ALIGNCOUNT][2] = {{0}}; + for (int align = 0; align < ALIGNCOUNT; align++) { + for (size_t i = 0; i < sizeof(lotsOfSels)/sizeof(lotsOfSels[0]); i++) { + if ((uintptr_t)(void*)lotsOfSels[i] % ALIGNCOUNT == align) { + if (sels[align][0]) { + sels[align][1] = lotsOfSels[i]; + } else { + sels[align][0] = lotsOfSels[i]; + } + } + } + if (!sels[align][0]) fail("no SEL with alignment %d", align); + if (!sels[align][1]) fail("only one SEL with alignment %d", align); + } void *fn = dlsym(RTLD_DEFAULT, name); testassert(fn); @@ -669,8 +1261,12 @@ void test_dw(const char *name, id sub, id tagged, SEL sel) struct objc_super tagged_sup_st = { tagged, object_getClass(tagged) }; struct { void *imp; SEL sel; } message_ref = { fn, sel }; + Class cache_cls = object_getClass(sub); + if (strstr(name, "Super")) { // super version - replace receiver with objc_super + // clear caches of superclass + cache_cls = class_getSuperclass(cache_cls); sub_arg = &sup_st; tagged_arg = &tagged_sup_st; } @@ -681,60 +1277,109 @@ void test_dw(const char *name, id sub, id tagged, SEL sel) } - uintptr_t *insnOffsets = getOffsets(fn, name); - uintptr_t *offsetp = insnOffsets; + uintptr_t *insnOffsets = getOffsets(fn, name, nil); uintptr_t offset; - while ((offset = *offsetp++) != ~0UL) { + int uncaughtCount = 0; + for (int oo = 0; insnOffsets[oo] != ~0UL; oo++) { + offset = insnOffsets[oo]; testprintf("OFFSET %lu\n", offset); - uint8_t insn_byte = clobber(fn, offset); + insn_t saved_insn = clobber(fn, offset); caught = false; // nil if ((void*)objc_unretainedPointer(sub) == sub_arg) { SELF = nil; testprintf(" nil\n"); - CALLIT(nil, sel_arg, sel, fn); - CALLIT(nil, sel_arg, sel, fn); + CALLIT(nil, sel_arg, sel, fn, stret); + CALLIT(nil, sel_arg, sel, fn, stret); } // uncached SELF = sub; testprintf(" uncached\n"); - _objc_flush_caches(object_getClass(sub)); - CALLIT(sub_arg, sel_arg, sel, fn); - _objc_flush_caches(object_getClass(sub)); - CALLIT(sub_arg, sel_arg, sel, fn); + _objc_flush_caches(cache_cls); + CALLIT(sub_arg, sel_arg, sel, fn, stret); + _objc_flush_caches(cache_cls); + CALLIT(sub_arg, sel_arg, sel, fn, stret); // cached SELF = sub; testprintf(" cached\n"); - CALLIT(sub_arg, sel_arg, sel, fn); - CALLIT(sub_arg, sel_arg, sel, fn); + CALLIT(sub_arg, sel_arg, sel, fn, stret); + CALLIT(sub_arg, sel_arg, sel, fn, stret); // uncached,tagged SELF = tagged; testprintf(" uncached,tagged\n"); - _objc_flush_caches(object_getClass(tagged)); - CALLIT(tagged_arg, sel_arg, sel, fn); - _objc_flush_caches(object_getClass(tagged)); - CALLIT(tagged_arg, sel_arg, sel, fn); + _objc_flush_caches(cache_cls); + CALLIT(tagged_arg, sel_arg, sel, fn, stret); + _objc_flush_caches(cache_cls); + CALLIT(tagged_arg, sel_arg, sel, fn, stret); // cached,tagged SELF = tagged; testprintf(" cached,tagged\n"); - CALLIT(tagged_arg, sel_arg, sel, fn); - CALLIT(tagged_arg, sel_arg, sel, fn); + CALLIT(tagged_arg, sel_arg, sel, fn, stret); + CALLIT(tagged_arg, sel_arg, sel, fn, stret); + + // multiple SEL alignments, collisions, wraps + SELF = sub; + for (int a = 0; a < ALIGNCOUNT; a++) { + testprintf(" cached, SEL alignment %d\n", a); + + // Count both up and down to be independent of + // implementation's cache scan direction + + _objc_flush_caches(cache_cls); + for (int x2 = 0; x2 < 1; x2++) { + for (int s = 0; s < 4; s++) { + int align = (a+s) % ALIGNCOUNT; + CALLIT(sub_arg, sels[align][0], sels[align][0], fn, stret); + CALLIT(sub_arg, sels[align][1], sels[align][1], fn, stret); + } + } + + _objc_flush_caches(cache_cls); + for (int x2 = 0; x2 < 1; x2++) { + for (int s = 0; s < 4; s++) { + int align = abs(a-s) % ALIGNCOUNT; + CALLIT(sub_arg, sels[align][0], sels[align][0], fn, stret); + CALLIT(sub_arg, sels[align][1], sels[align][1], fn, stret); + } + } + } - unclobber(fn, offset, insn_byte); + unclobber(fn, offset, saved_insn); + + // remember offsets that were caught by none of the above + if (caught) { + insnOffsets[oo] = 0; + } else { + uncaughtCount++; + testprintf("offset %s+%lu not caught (%d/%d)\n", + name, offset, uncaughtCount, uncaughtAllowed); + } + } - // require at least one path above to trip this offset - if (!caught) fprintf(stderr, "OFFSET %s+%lu NOT CAUGHT\n", name, offset); + // Complain if too many offsets went uncaught. + // Acceptably-uncaught offsets include the corrupt-cache-error handler. + if (uncaughtCount != uncaughtAllowed) { + for (int oo = 0; insnOffsets[oo] != ~0UL; oo++) { + if (insnOffsets[oo]) { + fprintf(stderr, "BAD: offset %s+%lu not caught\n", + name, insnOffsets[oo]); + } + } + fail("wrong instructions not reached for %s (missed %d, expected %d)", + name, uncaughtCount, uncaughtAllowed); } + free(insnOffsets); } -// x86_64 + +// TEST_DWARF #endif @@ -761,27 +1406,32 @@ void test_basic(id receiver) // fixme verify that uncached lookup didn't happen the 2nd time? SELF = receiver; for (int i = 0; i < 5; i++) { + testprintf("idret\n"); state = 0; idval = nil; idval = [receiver idret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0]; testassert(state == 101); testassert(idval == ID_RESULT); + testprintf("llret\n"); llval = 0; llval = [receiver llret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0]; testassert(state == 102); testassert(llval == LL_RESULT); + testprintf("stret\n"); stretval = zero; stretval = [receiver stret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0]; testassert(state == 103); testassert(stret_equal(stretval, STRET_RESULT)); + testprintf("fpret\n"); fpval = 0; fpval = [receiver fpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0]; testassert(state == 104); testassert(fpval == FP_RESULT); + testprintf("lfpret\n"); lfpval = 0; lfpval = [receiver lfpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0]; testassert(state == 105); @@ -790,12 +1440,14 @@ void test_basic(id receiver) #if __OBJC2__ // explicitly call noarg messenger, even if compiler doesn't emit it state = 0; + testprintf("idret noarg\n"); idval = nil; idval = ((typeof(idmsg0))objc_msgSend_noarg)(receiver, @selector(idret_noarg)); testassert(state == 111); testassert(idval == ID_RESULT); llval = 0; + testprintf("llret noarg\n"); llval = ((typeof(llmsg0))objc_msgSend_noarg)(receiver, @selector(llret_noarg)); testassert(state == 112); testassert(llval == LL_RESULT); @@ -808,12 +1460,14 @@ void test_basic(id receiver) testassert(stret_equal(stretval, STRET_RESULT)); */ # if !__i386__ + testprintf("fpret noarg\n"); fpval = 0; fpval = ((typeof(fpmsg0))objc_msgSend_noarg)(receiver, @selector(fpret_noarg)); testassert(state == 114); testassert(fpval == FP_RESULT); # endif # if !__i386__ && !__x86_64__ + testprintf("lfpret noarg\n"); lfpval = 0; lfpval = ((typeof(lfpmsg0))objc_msgSend_noarg)(receiver, @selector(lfpret_noarg)); testassert(state == 115); @@ -821,6 +1475,8 @@ void test_basic(id receiver) # endif #endif } + + testprintf("basic done\n"); } int main() @@ -911,16 +1567,19 @@ int main() SELF = sub; [sub voidret_nop]; + [sub voidret_nop2]; [sub llret_nop]; [sub stret_nop]; [sub fpret_nop]; [sub lfpret_nop]; [sub voidret_nop]; + [sub voidret_nop2]; [sub llret_nop]; [sub stret_nop]; [sub fpret_nop]; [sub lfpret_nop]; [sub voidret_nop]; + [sub voidret_nop2]; [sub llret_nop]; [sub stret_nop]; [sub fpret_nop]; @@ -930,24 +1589,30 @@ int main() // The errors we're trying to catch should be catastrophically slow, // so the margins here are generous to avoid false failures. + // Use voidret because id return is too slow for perf test with ARC. + + // Pick smallest of voidret_nop and voidret_nop2 time + // in the hopes that one of them didn't collide in the method cache. + #define COUNT 1000000 + startTime = mach_absolute_time(); ALIGN_(); for (i = 0; i < COUNT; i++) { - [sub voidret_nop]; // id return is too slow for perf test with ARC + [sub voidret_nop]; } totalTime = mach_absolute_time() - startTime; - testprintf("time: idret %llu\n", totalTime); + testprintf("time: voidret %llu\n", totalTime); targetTime = totalTime; startTime = mach_absolute_time(); ALIGN_(); for (i = 0; i < COUNT; i++) { - [sub voidret_nop]; // id return is too slow for perf test with ARC + [sub voidret_nop2]; } totalTime = mach_absolute_time() - startTime; - testprintf("time: idret %llu\n", totalTime); - targetTime = totalTime; + testprintf("time: voidret2 %llu\n", totalTime); + if (totalTime < targetTime) targetTime = totalTime; startTime = mach_absolute_time(); ALIGN_(); @@ -980,6 +1645,13 @@ int main() } totalTime = mach_absolute_time() - startTime; timecheck("lfpret", totalTime, targetTime * 0.7, targetTime * 4.0); + +#if __arm64__ + // Removing this testwarn(), or changing voidret_nop to nop;ret, + // changes the voidret_nop and stret_nop times above by a factor of 2. + testwarn("rdar://13896922 nop;ret is faster than ret?"); +#endif + #undef COUNT // method_invoke @@ -1059,6 +1731,15 @@ int main() // no stret result guarantee for hand-written calls, even with clang #endif +#if __i386__ + // check struct-return address stack pop + for (int i = 0; i < 10000000; i++) { + state = 0; + ((struct stret (*)(id, SEL))objc_msgSend_stret) + (nil, @selector(stret_nop)); + } +#endif + state = 0; fpval = FP_RESULT; fpval = [(id)NIL_RECEIVER fpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0]; @@ -1071,6 +1752,44 @@ int main() testassert(state == 0); testassert(lfpval == 0.0); + // message to nil, different struct types + // This verifies that ordinary objc_msgSend() erases enough registers + // for structs that return in registers. +#define TEST_NIL_STRUCT(i,n) \ + do { \ + struct stret_##i##n z; \ + bzero(&z, sizeof(z)); \ + [Super stret_i##n##_nonzero]; \ + [Super stret_d##n##_nonzero]; \ + struct stret_##i##n val = [(id)NIL_RECEIVER stret_##i##n##_zero]; \ + testassert(0 == memcmp(&z, &val, sizeof(val))); \ + } while (0) + + TEST_NIL_STRUCT(i,1); + TEST_NIL_STRUCT(i,2); + TEST_NIL_STRUCT(i,3); + TEST_NIL_STRUCT(i,4); + TEST_NIL_STRUCT(i,5); + TEST_NIL_STRUCT(i,6); + TEST_NIL_STRUCT(i,7); + TEST_NIL_STRUCT(i,8); + TEST_NIL_STRUCT(i,9); + +#if __i386__ + testwarn("rdar://16267205 i386 struct{float} and struct{double}"); +#else + TEST_NIL_STRUCT(d,1); +#endif + TEST_NIL_STRUCT(d,2); + TEST_NIL_STRUCT(d,3); + TEST_NIL_STRUCT(d,4); + TEST_NIL_STRUCT(d,5); + TEST_NIL_STRUCT(d,6); + TEST_NIL_STRUCT(d,7); + TEST_NIL_STRUCT(d,8); + TEST_NIL_STRUCT(d,9); + + #if __OBJC2__ // message to nil noarg // explicitly call noarg messenger, even if compiler doesn't emit it @@ -1132,7 +1851,7 @@ int main() testassert(sup_st.super_class == object_getClass(sub)); #endif -#if __OBJC2__ +#if __OBJC2__ && !__arm64__ // Debug messengers. testprintf("debug messengers\n"); @@ -1198,9 +1917,10 @@ int main() #endif -#if __x86_64__ && !__has_feature(objc_arc) +#if !TEST_DWARF + testwarn("no unwind tables in this configuration"); +#else // DWARF unwind tables - // Not for ARC because the extra RR calls hit the traps at the wrong times testprintf("unwind tables\n"); // install exception handler @@ -1210,20 +1930,38 @@ int main() act.sa_flags = SA_SIGINFO; sigaction(SIGTRAP, &act, NULL); - // use _nop methods because other methods make more calls - // which can die in the trapped messenger - - test_dw("objc_msgSend", sub,tagged,@selector(idret_nop)); - test_dw("objc_msgSend_stret", sub,tagged,@selector(stret_nop)); - test_dw("objc_msgSend_fpret", sub,tagged,@selector(fpret_nop)); - // fixme fp2ret - test_dw("objc_msgSendSuper", sub,tagged,@selector(idret_nop)); - test_dw("objc_msgSendSuper2", sub,tagged,@selector(idret_nop)); - test_dw("objc_msgSendSuper_stret", sub,tagged,@selector(stret_nop)); - test_dw("objc_msgSendSuper2_stret", sub,tagged,@selector(stret_nop)); + SubDW *dw = [[SubDW alloc] init]; + + objc_setForwardHandler((void*)test_dw_forward, (void*)test_dw_forward_stret); + +# if __x86_64__ + test_dw("objc_msgSend", dw, tagged, false, 2); + test_dw("objc_msgSend_stret", dw, tagged, true, 4); + test_dw("objc_msgSend_fpret", dw, tagged, false, 2); + test_dw("objc_msgSend_fp2ret", dw, tagged, false, 2); + test_dw("objc_msgSendSuper", dw, tagged, false, 2); + test_dw("objc_msgSendSuper2", dw, tagged, false, 2); + test_dw("objc_msgSendSuper_stret", dw, tagged, true, 4); + test_dw("objc_msgSendSuper2_stret", dw, tagged, true, 4); +# elif __i386__ + test_dw("objc_msgSend", dw, dw, false, 10); + test_dw("objc_msgSend_stret", dw, dw, true, 10); + test_dw("objc_msgSend_fpret", dw, dw, false, 10); + test_dw("objc_msgSendSuper", dw, dw, false, 10); + test_dw("objc_msgSendSuper2", dw, dw, false, 10); + test_dw("objc_msgSendSuper_stret", dw, dw, true, 10); + test_dw("objc_msgSendSuper2_stret", dw, dw, true, 10); +# elif __arm64__ + test_dw("objc_msgSend", dw, tagged, false, 2); + test_dw("objc_msgSendSuper", dw, tagged, false, 2); + test_dw("objc_msgSendSuper2", dw, tagged, false, 2); +# else +# error unknown architecture +# endif // DWARF unwind tables #endif + } POP_POOL; succeed(__FILE__); } diff --git a/test/nonpointerisa.m b/test/nonpointerisa.m new file mode 100644 index 0000000..83ba2ce --- /dev/null +++ b/test/nonpointerisa.m @@ -0,0 +1,223 @@ +// TEST_CFLAGS -framework Foundation +// TEST_CONFIG MEM=mrc + +#include "test.h" + +#if !__OBJC2__ + +int main() +{ + succeed(__FILE__); +} + +#else + +#include + +#include +#include + +#define ISA(x) (*((uintptr_t *)(x))) +#define INDEXED(x) (ISA(x) & 1) + +#if SUPPORT_NONPOINTER_ISA +# if __x86_64__ +# define RC_ONE (1ULL<<50) +# elif __arm64__ +# define RC_ONE (1ULL<<45) +# else +# error unknown architecture +# endif +#endif + + +void check_unindexed(id obj, Class cls) +{ + testassert(object_getClass(obj) == cls); + testassert(!INDEXED(obj)); + + uintptr_t isa = ISA(obj); + testassert((Class)isa == cls); + testassert((Class)(isa & objc_debug_isa_class_mask) == cls); + testassert((Class)(isa & ~objc_debug_isa_class_mask) == 0); + + CFRetain(obj); + testassert(ISA(obj) == isa); + testassert([obj retainCount] == 2); + [obj retain]; + testassert(ISA(obj) == isa); + testassert([obj retainCount] == 3); + CFRelease(obj); + testassert(ISA(obj) == isa); + testassert([obj retainCount] == 2); + [obj release]; + testassert(ISA(obj) == isa); + testassert([obj retainCount] == 1); +} + + +#if ! SUPPORT_NONPOINTER_ISA + +int main() +{ + testprintf("Isa with index\n"); + id index_o = [NSObject new]; + check_unindexed(index_o, [NSObject class]); + + // These variables DO exist even without non-pointer isa support + testassert(dlsym(RTLD_DEFAULT, "objc_debug_isa_class_mask")); + testassert(dlsym(RTLD_DEFAULT, "objc_debug_isa_magic_mask")); + testassert(dlsym(RTLD_DEFAULT, "objc_debug_isa_magic_value")); + + succeed(__FILE__); +} + +#else +// SUPPORT_NONPOINTER_ISA + +void check_indexed(id obj, Class cls) +{ + testassert(object_getClass(obj) == cls); + testassert(INDEXED(obj)); + + uintptr_t isa = ISA(obj); + testassert((Class)(isa & objc_debug_isa_class_mask) == cls); + testassert((Class)(isa & ~objc_debug_isa_class_mask) != 0); + testassert((isa & objc_debug_isa_magic_mask) == objc_debug_isa_magic_value); + + CFRetain(obj); + testassert(ISA(obj) == isa + RC_ONE); + testassert([obj retainCount] == 2); + [obj retain]; + testassert(ISA(obj) == isa + RC_ONE*2); + testassert([obj retainCount] == 3); + CFRelease(obj); + testassert(ISA(obj) == isa + RC_ONE); + testassert([obj retainCount] == 2); + [obj release]; + testassert(ISA(obj) == isa); + testassert([obj retainCount] == 1); +} + + +@interface OS_object ++(id)new; +@end + +@interface Fake_OS_object : NSObject { + int refcnt; + int xref_cnt; +} +@end + +@implementation Fake_OS_object ++(void)initialize { + static bool initialized; + if (!initialized) { + initialized = true; + testprintf("Indexed during +initialize\n"); + testassert(INDEXED(self)); + id o = [Fake_OS_object new]; + check_indexed(o, self); + [o release]; + } +} +@end + +@interface Sub_OS_object : OS_object @end + +@implementation Sub_OS_object +@end + + + +int main() +{ + uintptr_t isa; + + testprintf("Isa with index\n"); + id index_o = [Fake_OS_object new]; + check_indexed(index_o, [Fake_OS_object class]); + + testprintf("Weakly referenced\n"); + isa = ISA(index_o); + id weak; + objc_storeWeak(&weak, index_o); + testassert(__builtin_popcountl(isa ^ ISA(index_o)) == 1); + + testprintf("Has associated references\n"); + id assoc = @"thing"; + isa = ISA(index_o); + objc_setAssociatedObject(index_o, assoc, assoc, OBJC_ASSOCIATION_ASSIGN); + testassert(__builtin_popcountl(isa ^ ISA(index_o)) == 1); + + + testprintf("Isa without index\n"); + id unindex_o = [OS_object new]; + check_unindexed(unindex_o, [OS_object class]); + + + id buf[4]; + id bufo = (id)buf; + + testprintf("Change isa 0 -> unindexed\n"); + bzero(buf, sizeof(buf)); + object_setClass(bufo, [OS_object class]); + check_unindexed(bufo, [OS_object class]); + + testprintf("Change isa 0 -> indexed\n"); + bzero(buf, sizeof(buf)); + object_setClass(bufo, [NSObject class]); + check_indexed(bufo, [NSObject class]); + + testprintf("Change isa indexed -> indexed\n"); + testassert(INDEXED(bufo)); + _objc_rootRetain(bufo); + testassert(_objc_rootRetainCount(bufo) == 2); + object_setClass(bufo, [Fake_OS_object class]); + testassert(_objc_rootRetainCount(bufo) == 2); + _objc_rootRelease(bufo); + testassert(_objc_rootRetainCount(bufo) == 1); + check_indexed(bufo, [Fake_OS_object class]); + + testprintf("Change isa indexed -> unindexed\n"); + // Retain count must be preserved. + // Use root* to avoid OS_object's overrides. + testassert(INDEXED(bufo)); + _objc_rootRetain(bufo); + testassert(_objc_rootRetainCount(bufo) == 2); + object_setClass(bufo, [OS_object class]); + testassert(_objc_rootRetainCount(bufo) == 2); + _objc_rootRelease(bufo); + testassert(_objc_rootRetainCount(bufo) == 1); + check_unindexed(bufo, [OS_object class]); + + testprintf("Change isa unindexed -> indexed (doesn't happen)\n"); + testassert(!INDEXED(bufo)); + _objc_rootRetain(bufo); + testassert(_objc_rootRetainCount(bufo) == 2); + object_setClass(bufo, [Fake_OS_object class]); + testassert(_objc_rootRetainCount(bufo) == 2); + _objc_rootRelease(bufo); + testassert(_objc_rootRetainCount(bufo) == 1); + check_unindexed(bufo, [Fake_OS_object class]); + + testprintf("Change isa unindexed -> unindexed\n"); + testassert(!INDEXED(bufo)); + _objc_rootRetain(bufo); + testassert(_objc_rootRetainCount(bufo) == 2); + object_setClass(bufo, [Sub_OS_object class]); + testassert(_objc_rootRetainCount(bufo) == 2); + _objc_rootRelease(bufo); + testassert(_objc_rootRetainCount(bufo) == 1); + check_unindexed(bufo, [Sub_OS_object class]); + + + succeed(__FILE__); +} + +// SUPPORT_NONPOINTER_ISA +#endif + +// __OBJC2__ +#endif diff --git a/test/nscdtors.mm b/test/nscdtors.mm new file mode 100644 index 0000000..876f6b0 --- /dev/null +++ b/test/nscdtors.mm @@ -0,0 +1,6 @@ +// TEST_CONFIG +// test cdtors, with NSObject instead of TestRoot as the root class + +#define USE_FOUNDATION 1 +#include "cdtors.mm" + diff --git a/test/nsobject.m b/test/nsobject.m index e85ffe2..4a79f32 100644 --- a/test/nsobject.m +++ b/test/nsobject.m @@ -4,7 +4,7 @@ #import -@interface Sub : NSObject { } @end +@interface Sub : NSObject @end @implementation Sub +(id)allocWithZone:(NSZone *)zone { testprintf("in +[Sub alloc]\n"); @@ -16,11 +16,99 @@ } @end + +// These declarations and definitions can be used +// to check the compile-time type of an object. +@interface NSObject (Checker) +// fixme this isn't actually enforced ++(void)NSObjectInstance __attribute__((unavailable)); +@end +@implementation NSObject (Checker) +-(void)NSObjectInstance { } ++(void)NSObjectClass { } +@end +@interface Sub (Checker) +-(void)NSObjectInstance __attribute__((unavailable)); ++(void)NSObjectClass __attribute__((unavailable)); +@end +@implementation Sub (Checker) +-(void)SubInstance { } ++(void)SubClass { } +@end + int main() { PUSH_POOL { [[Sub new] autorelease]; } POP_POOL; + // Verify that dot syntax on class objects works with some instance methods + // (void)NSObject.self; fixme + (void)NSObject.class; + (void)NSObject.superclass; + (void)NSObject.hash; + (void)NSObject.description; + (void)NSObject.debugDescription; + + // Verify that some methods return the correct type. + Class cls; + NSObject *nsobject = nil; + Sub *subobject = nil; + + cls = [NSObject self]; + cls = [Sub self]; + nsobject = [nsobject self]; + subobject = [subobject self]; + [[NSObject self] NSObjectClass]; + [[nsobject self] NSObjectInstance]; + [[Sub self] SubClass]; + [[subobject self] SubInstance]; + + // fixme + // cls = NSObject.self; + // cls = Sub.self; + // [NSObject.self NSObjectClass]; + // [nsobject.self NSObjectInstance]; + // [Sub.self SubClass]; + // [subobject.self SubInstance]; + + cls = [NSObject class]; + cls = [nsobject class]; + cls = [Sub class]; + cls = [subobject class]; + [[NSObject class] NSObjectClass]; + [[nsobject class] NSObjectClass]; + [[Sub class] SubClass]; + [[subobject class] SubClass]; + + cls = NSObject.class; + cls = nsobject.class; + cls = Sub.class; + cls = subobject.class; + [NSObject.class NSObjectClass]; + [nsobject.class NSObjectClass]; + [Sub.class SubClass]; + [subobject.class SubClass]; + + + cls = [NSObject superclass]; + cls = [nsobject superclass]; + cls = [Sub superclass]; + cls = [subobject superclass]; + [[NSObject superclass] NSObjectClass]; + [[nsobject superclass] NSObjectClass]; + [[Sub superclass] NSObjectClass]; + [[subobject superclass] NSObjectClass]; + + cls = NSObject.superclass; + cls = nsobject.superclass; + cls = Sub.superclass; + cls = subobject.superclass; + [NSObject.superclass NSObjectClass]; + [nsobject.superclass NSObjectClass]; + [Sub.superclass NSObjectClass]; + [subobject.superclass NSObjectClass]; + + succeed(__FILE__); } diff --git a/test/objectCopy.m b/test/objectCopy.m new file mode 100644 index 0000000..f757cb5 --- /dev/null +++ b/test/objectCopy.m @@ -0,0 +1,21 @@ +// TEST_CONFIG MEM=mrc,gc + +#include "test.h" +#include + +@interface Test : NSObject { + char bytes[16-sizeof(void*)]; +} +@end +@implementation Test +@end + + +int main() +{ + id o1 = [Test new]; + id o2 = object_copy(o1, 16); + testassert(malloc_size(o1) == 16); + testassert(malloc_size(o2) == 32); + succeed(__FILE__); +} diff --git a/test/protocol.m b/test/protocol.m index 4d0e28f..c68bd33 100644 --- a/test/protocol.m +++ b/test/protocol.m @@ -31,7 +31,6 @@ @property int i; @end - // Force some of Proto5's selectors out of address order rdar://10582325 SEL fn(int x) { if (x) return @selector(m12:); else return @selector(m22:); } @@ -61,6 +60,17 @@ SEL fn(int x) { if (x) return @selector(m12:); else return @selector(m22:); } @protocol ProtoEmpty @end +#if __OBJC2__ +#define TEST_SWIFT 1 +#define SwiftV1MangledName "_TtP6Module15SwiftV1Protocol_" +#endif + +#if TEST_SWIFT +__attribute__((objc_runtime_name(SwiftV1MangledName))) +@protocol SwiftV1Protocol +@end +#endif + @interface Super : TestRoot @end @implementation Super +(id)proto1ClassMethod { return self; } @@ -280,12 +290,7 @@ int main() sel_registerName("m42:") < sel_registerName("m41:") ); if (!_protocol_getMethodTypeEncoding(@protocol(Proto5), @selector(m11:), true, true)) { -#if __clang__ - testwarn("rdar://10492418 extended type encodings not present (is compiler old?)"); -#else - // extended type encodings quietly not supported - testwarn("rdar://10492418 extended type encodings not present (compiler is not clang?)"); -#endif + fail("rdar://10492418 extended type encodings not present (is compiler old?)"); } else { testassert(0 == strcmp(_protocol_getMethodTypeEncoding(@protocol(Proto5), @selector(m11:), true, true), types11)); testassert(0 == strcmp(_protocol_getMethodTypeEncoding(@protocol(Proto5), @selector(m12:), true, true), types12)); @@ -304,5 +309,12 @@ int main() testassert(0 == strcmp(_protocol_getMethodTypeEncoding(@protocol(Proto6), @selector(m41:), false, false), types41)); } +#if TEST_SWIFT + testassert(@protocol(SwiftV1Protocol) == objc_getProtocol("Module.SwiftV1Protocol")); + testassert(@protocol(SwiftV1Protocol) == objc_getProtocol(SwiftV1MangledName)); + testassert(0 == strcmp(protocol_getName(@protocol(SwiftV1Protocol)), "Module.SwiftV1Protocol")); + testassert(!objc_getProtocol("SwiftV1Protocol")); +#endif + succeed(__FILE__); } diff --git a/test/readClassPair.m b/test/readClassPair.m new file mode 100644 index 0000000..6f26331 --- /dev/null +++ b/test/readClassPair.m @@ -0,0 +1,77 @@ +// TEST_CONFIG + +#include "test.h" + +#if !__OBJC2__ + +int main() +{ + succeed(__FILE__) +} + +#else + +#include + +// Reuse evil-class-def.m as a non-evil class definition. + +#define EVIL_SUPER 0 +#define EVIL_SUPER_META 0 +#define EVIL_SUB 0 +#define EVIL_SUB_META 0 + +#define OMIT_SUPER 1 +#define OMIT_NL_SUPER 1 +#define OMIT_SUB 1 +#define OMIT_NL_SUB 1 + +#include "evil-class-def.m" + +int main() +{ + // This definition is ABI and is never allowed to change. + testassert(OBJC_MAX_CLASS_SIZE == 32*sizeof(void*)); + + struct objc_image_info ii = { 0, 0 }; + + // Read a root class. + testassert(!objc_getClass("Super")); + + extern intptr_t OBJC_CLASS_$_Super[OBJC_MAX_CLASS_SIZE/sizeof(void*)]; + Class Super = objc_readClassPair((__bridge Class)(void*)&OBJC_CLASS_$_Super, &ii); + testassert(Super); + + testassert(objc_getClass("Super") == Super); + testassert(0 == strcmp(class_getName(Super), "Super")); + testassert(class_getSuperclass(Super) == nil); + testassert(class_getClassMethod(Super, @selector(load))); + testassert(class_getInstanceMethod(Super, @selector(load))); + testassert(class_getInstanceVariable(Super, "super_ivar")); + testassert(class_getInstanceSize(Super) == sizeof(void*)); + [Super load]; + + // Read a non-root class. + testassert(!objc_getClass("Sub")); + + extern intptr_t OBJC_CLASS_$_Sub[OBJC_MAX_CLASS_SIZE/sizeof(void*)]; + intptr_t Sub2_buf[OBJC_MAX_CLASS_SIZE/sizeof(void*)]; + memcpy(Sub2_buf, &OBJC_CLASS_$_Sub, sizeof(Sub2_buf)); + Class Sub = objc_readClassPair((__bridge Class)(void*)&OBJC_CLASS_$_Sub, &ii); + testassert(Sub); + + testassert(0 == strcmp(class_getName(Sub), "Sub")); + testassert(objc_getClass("Sub") == Sub); + testassert(class_getSuperclass(Sub) == Super); + testassert(class_getClassMethod(Sub, @selector(load))); + testassert(class_getInstanceMethod(Sub, @selector(load))); + testassert(class_getInstanceVariable(Sub, "sub_ivar")); + testassert(class_getInstanceSize(Sub) == 2*sizeof(void*)); + [Sub load]; + + // Reading a class whose name already exists fails. + testassert(! objc_readClassPair((__bridge Class)(void*)Sub2_buf, &ii)); + + succeed(__FILE__); +} + +#endif diff --git a/test/rr-autorelease-fast.m b/test/rr-autorelease-fast.m index 6558b4d..ed81949 100644 --- a/test/rr-autorelease-fast.m +++ b/test/rr-autorelease-fast.m @@ -59,6 +59,12 @@ main() tmp = _objc_rootAutorelease(obj); #ifdef __arm__ asm volatile("mov r7, r7"); +#elif __arm64__ + asm volatile("mov fp, fp"); +#elif __x86_64__ + // nothing to do +#else +#error unknown architecture #endif tmp = objc_retainAutoreleasedReturnValue(tmp); testassert(!did_dealloc); @@ -82,8 +88,12 @@ main() tmp = _objc_rootAutorelease(obj); #ifdef __arm__ asm volatile("mov r6, r6"); +#elif __arm64__ + asm volatile("mov x6, x6"); #elif __x86_64__ asm volatile("mov %rdi, %rdi"); +#else +#error unknown architecture #endif tmp = objc_retainAutoreleasedReturnValue(tmp); testassert(!did_dealloc); diff --git a/test/rr-autorelease2.m b/test/rr-autorelease2.m index cb5c541..fd00441 100644 --- a/test/rr-autorelease2.m +++ b/test/rr-autorelease2.m @@ -9,12 +9,14 @@ # define RR_RETAIN(o) [o retain] # define RR_RELEASE(o) [o release] # define RR_AUTORELEASE(o) [o autorelease] +# define RR_RETAINCOUNT(o) [o retainCount] #else # define RR_PUSH() _objc_autoreleasePoolPush() # define RR_POP(p) _objc_autoreleasePoolPop(p) # define RR_RETAIN(o) _objc_rootRetain((id)o) # define RR_RELEASE(o) _objc_rootRelease((id)o) # define RR_AUTORELEASE(o) _objc_rootAutorelease((id)o) +# define RR_RETAINCOUNT(o) _objc_rootRetainCount((id)o) #endif #include @@ -97,6 +99,9 @@ void *autorelease_lots_fn(void *singlePool) id obj = RR_AUTORELEASE([[Deallocator alloc] init]); + // last pool has only 1 autorelease in it + pools[p++] = RR_PUSH(); + for (int i = 0; i < COUNT; i++) { if (rand() % 1000 == 0 && !singlePool) { pools[p++] = RR_PUSH(); @@ -110,6 +115,7 @@ void *autorelease_lots_fn(void *singlePool) RR_POP(pools[p]); } testassert(state == 0); + testassert(RR_RETAINCOUNT(obj) == 1); RR_POP(pools[0]); testassert(state == 1); free(pools); diff --git a/test/runtime.m b/test/runtime.m index 22f8e92..5700333 100644 --- a/test/runtime.m +++ b/test/runtime.m @@ -1,5 +1,6 @@ /* TEST_RUN_OUTPUT +objc\[\d+\]: class `SwiftV1Class\' not linked into application objc\[\d+\]: class `DoesNotExist\' not linked into application OK: runtime.m OR @@ -30,6 +31,21 @@ int main() @interface Sub : TestRoot @end @implementation Sub @end +#if __OBJC2__ +# define TEST_SWIFT 1 +#else +# define TEST_SWIFT 0 +#endif + +#define SwiftV1MangledName "_TtC6Module12SwiftV1Class" + +#if TEST_SWIFT +__attribute__((objc_runtime_name(SwiftV1MangledName))) +@interface SwiftV1Class : TestRoot @end +@implementation SwiftV1Class @end +#endif + + int main() { Class list[100]; @@ -38,6 +54,7 @@ int main() unsigned int i; int foundTestRoot; int foundSub; + int foundSwiftV1; const char **names; Dl_info info; @@ -47,54 +64,99 @@ int main() dladdr(&_mh_execute_header, &info); names = objc_copyClassNamesForImage(info.dli_fname, &count); testassert(names); +#if TEST_SWIFT + testassert(count == 3); +#else testassert(count == 2); +#endif testassert(names[count] == NULL); foundTestRoot = 0; foundSub = 0; + foundSwiftV1 = 0; for (i = 0; i < count; i++) { if (0 == strcmp(names[i], "TestRoot")) foundTestRoot++; if (0 == strcmp(names[i], "Sub")) foundSub++; + if (0 == strcmp(names[i], "Module.SwiftV1Class")) foundSwiftV1++; } testassert(foundTestRoot == 1); - testassert(foundSub == 1); - - + testassert(foundSub == 1); +#if TEST_SWIFT + testassert(foundSwiftV1 == 1); +#endif + + // class Sub hasn't been touched - make sure it's in the class list too count0 = objc_getClassList(NULL, 0); testassert(count0 >= 2 && count0 < 100); - + list[count0-1] = NULL; count = objc_getClassList(list, count0-1); testassert(list[count0-1] == NULL); testassert(count == count0); - + count = objc_getClassList(list, count0); testassert(count == count0); + + for (i = 0; i < count; i++) { + testprintf("%s\n", class_getName(list[i])); + } + foundTestRoot = 0; foundSub = 0; + foundSwiftV1 = 0; for (i = 0; i < count; i++) { if (0 == strcmp(class_getName(list[i]), "TestRoot")) foundTestRoot++; if (0 == strcmp(class_getName(list[i]), "Sub")) foundSub++; + if (0 == strcmp(class_getName(list[i]), "Module.SwiftV1Class")) foundSwiftV1++; // list should be non-meta classes only testassert(!class_isMetaClass(list[i])); } testassert(foundTestRoot == 1); testassert(foundSub == 1); +#if TEST_SWIFT + testassert(foundSwiftV1 == 1); +#endif // fixme check class handler testassert(objc_getClass("TestRoot") == [TestRoot class]); +#if TEST_SWIFT + testassert(objc_getClass("Module.SwiftV1Class") == [SwiftV1Class class]); + testassert(objc_getClass(SwiftV1MangledName) == [SwiftV1Class class]); +#endif + testassert(objc_getClass("SwiftV1Class") == nil); testassert(objc_getClass("DoesNotExist") == nil); testassert(objc_getClass(NULL) == nil); testassert(objc_getMetaClass("TestRoot") == object_getClass([TestRoot class])); +#if TEST_SWIFT + testassert(objc_getMetaClass("Module.SwiftV1Class") == object_getClass([SwiftV1Class class])); + testassert(objc_getMetaClass(SwiftV1MangledName) == object_getClass([SwiftV1Class class])); +#endif + testassert(objc_getMetaClass("SwiftV1Class") == nil); testassert(objc_getMetaClass("DoesNotExist") == nil); testassert(objc_getMetaClass(NULL) == nil); // fixme check class no handler testassert(objc_lookUpClass("TestRoot") == [TestRoot class]); +#if TEST_SWIFT + testassert(objc_lookUpClass("Module.SwiftV1Class") == [SwiftV1Class class]); + testassert(objc_lookUpClass(SwiftV1MangledName) == [SwiftV1Class class]); +#endif + testassert(objc_lookUpClass("SwiftV1Class") == nil); testassert(objc_lookUpClass("DoesNotExist") == nil); testassert(objc_lookUpClass(NULL) == nil); + testassert(! object_isClass(nil)); + testassert(! object_isClass([TestRoot new])); + testassert(object_isClass([TestRoot class])); + testassert(object_isClass(object_getClass([TestRoot class]))); + testassert(object_isClass([Sub class])); + testassert(object_isClass(object_getClass([Sub class]))); +#if TEST_SWIFT + testassert(object_isClass([SwiftV1Class class])); + testassert(object_isClass(object_getClass([SwiftV1Class class]))); +#endif + list2 = objc_copyClassList(&count2); testassert(count2 == count); testassert(list2); diff --git a/test/synchronized.m b/test/synchronized.m index baa8c3f..11922be 100644 --- a/test/synchronized.m +++ b/test/synchronized.m @@ -87,6 +87,7 @@ int main() testassert(err == OBJC_SYNC_SUCCESS); // should have waited more than WAIT_SEC but less than WAIT_SEC+1 // fixme hack: sleep(1) is ending 500 usec too early on x86_64 buildbot + // (rdar://6456975) testassert(end.tv_sec*1000000LL+end.tv_usec >= start.tv_sec*1000000LL+start.tv_usec + WAIT_SEC*1000000LL - 3*500 /*hack*/); diff --git a/test/taggedNSPointers.m b/test/taggedNSPointers.m index 13cc857..1d6570b 100644 --- a/test/taggedNSPointers.m +++ b/test/taggedNSPointers.m @@ -32,15 +32,15 @@ void testTaggedNumber() [dict setObject: @"bob" forKey: taggedNS]; testassert([@"bob" isEqualToString: [dict objectForKey: taggedNS]]); - NSNumber *i12345 = [NSNumber numberWithInt: 12345]; + NSNumber *iM88 = [NSNumber numberWithInt:-88]; NSNumber *i12346 = [NSNumber numberWithInt: 12346]; NSNumber *i12347 = [NSNumber numberWithInt: 12347]; - NSArray *anArray = [NSArray arrayWithObjects: i12345, i12346, i12347, nil]; + NSArray *anArray = [NSArray arrayWithObjects: iM88, i12346, i12347, nil]; testassert([anArray count] == 3); testassert([anArray indexOfObject: i12346] == 1); - NSSet *aSet = [NSSet setWithObjects: i12345, i12346, i12347, nil]; + NSSet *aSet = [NSSet setWithObjects: iM88, i12346, i12347, nil]; testassert([aSet count] == 3); testassert([aSet containsObject: i12346]); diff --git a/test/taggedPointers.m b/test/taggedPointers.m index d5cd4c8..a6d7be9 100644 --- a/test/taggedPointers.m +++ b/test/taggedPointers.m @@ -9,7 +9,7 @@ #if OBJC_HAVE_TAGGED_POINTERS -#if !__OBJC2__ || !__x86_64__ +#if !__OBJC2__ || (!__x86_64__ && !__arm64__) #error wrong architecture for tagged pointers #endif @@ -41,6 +41,8 @@ OBJC_ROOT_CLASS @end @implementation TaggedBaseClass +-(id) self { return self; } + + (void) initialize { } @@ -142,42 +144,64 @@ retaincount_fn(void *self, SEL _cmd __unused) { } @end -void testGenericTaggedPointer(objc_tag_index_t tag, const char *classname) +void testTaggedPointerValue(Class cls, objc_tag_index_t tag, uintptr_t value) { - testprintf("%s\n", classname); + void *taggedAddress = _objc_makeTaggedPointer(tag, value); + testprintf("obj %p, tag %p, value %p\n", + taggedAddress, (void*)tag, (void*)value); - Class cls = objc_getClass(classname); - testassert(cls); + // _objc_makeTaggedPointer must quietly mask out of range values for now + value = (value << 4) >> 4; - void *taggedAddress = _objc_makeTaggedPointer(tag, 1234); testassert(_objc_isTaggedPointer(taggedAddress)); testassert(_objc_getTaggedPointerTag(taggedAddress) == tag); - testassert(_objc_getTaggedPointerValue(taggedAddress) == 1234); + testassert(_objc_getTaggedPointerValue(taggedAddress) == value); testassert((uintptr_t)taggedAddress & objc_debug_taggedpointer_mask); uintptr_t slot = ((uintptr_t)taggedAddress >> objc_debug_taggedpointer_slot_shift) & objc_debug_taggedpointer_slot_mask; testassert(objc_debug_taggedpointer_classes[slot] == cls); - testassert((((uintptr_t)taggedAddress << objc_debug_taggedpointer_payload_lshift) >> objc_debug_taggedpointer_payload_rshift) == 1234); - + testassert((((uintptr_t)taggedAddress << objc_debug_taggedpointer_payload_lshift) >> objc_debug_taggedpointer_payload_rshift) == value); + id taggedPointer = objc_unretainedObject(taggedAddress); + testassert(!object_isClass(taggedPointer)); testassert(object_getClass(taggedPointer) == cls); - testassert([taggedPointer taggedValue] == 1234); + testassert([taggedPointer taggedValue] == value); didIt = NO; [taggedPointer instanceMethod]; - testassert(didIt); + testassert(didIt); struct stret orig = STRET_RESULT; testassert(stret_equal(orig, [taggedPointer stret: orig])); - long double value = 3.14156789; - testassert(value == [taggedPointer fpret: value]); + long double dblvalue = 3.14156789; + testassert(dblvalue == [taggedPointer fpret: dblvalue]); + + objc_setAssociatedObject(taggedPointer, (__bridge void *)taggedPointer, taggedPointer, OBJC_ASSOCIATION_RETAIN); + testassert(objc_getAssociatedObject(taggedPointer, (__bridge void *)taggedPointer) == taggedPointer); + objc_setAssociatedObject(taggedPointer, (__bridge void *)taggedPointer, nil, OBJC_ASSOCIATION_RETAIN); + testassert(objc_getAssociatedObject(taggedPointer, (__bridge void *)taggedPointer) == nil); +} + +void testGenericTaggedPointer(objc_tag_index_t tag, Class cls) +{ + testassert(cls); + testprintf("%s\n", class_getName(cls)); + + testTaggedPointerValue(cls, tag, 0); + testTaggedPointerValue(cls, tag, 1UL << 0); + testTaggedPointerValue(cls, tag, 1UL << 1); + testTaggedPointerValue(cls, tag, 1UL << 58); + testTaggedPointerValue(cls, tag, 1UL << 59); + testTaggedPointerValue(cls, tag, ~0UL >> 4); + testTaggedPointerValue(cls, tag, ~0UL); // Tagged pointers should bypass refcount tables and autorelease pools // and weak reference tables WeakContainer *w = [WeakContainer new]; #if !__has_feature(objc_arc) // prime method caches before leak checking + id taggedPointer = (id)_objc_makeTaggedPointer(tag, 1234); [taggedPointer retain]; [taggedPointer release]; [taggedPointer autorelease]; @@ -257,17 +281,17 @@ int main() _objc_registerTaggedPointerClass(OBJC_TAG_1, objc_getClass("TaggedBaseClass")); testGenericTaggedPointer(OBJC_TAG_1, - "TaggedBaseClass"); + objc_getClass("TaggedBaseClass")); _objc_registerTaggedPointerClass(OBJC_TAG_7, objc_getClass("TaggedSubclass")); testGenericTaggedPointer(OBJC_TAG_7, - "TaggedSubclass"); + objc_getClass("TaggedSubclass")); _objc_registerTaggedPointerClass(OBJC_TAG_NSManagedObjectID, objc_getClass("TaggedNSObjectSubclass")); testGenericTaggedPointer(OBJC_TAG_NSManagedObjectID, - "TaggedNSObjectSubclass"); + objc_getClass("TaggedNSObjectSubclass")); } POP_POOL; succeed(__FILE__); @@ -283,7 +307,6 @@ int main() { #if __OBJC2__ testassert(objc_debug_taggedpointer_mask == 0); - testassert(!_objc_taggedPointersEnabled()); #else testassert(!dlsym(RTLD_DEFAULT, "objc_debug_taggedpointer_mask")); #endif diff --git a/test/tbi.c b/test/tbi.c new file mode 100644 index 0000000..2e16fef --- /dev/null +++ b/test/tbi.c @@ -0,0 +1,14 @@ +// TEST_CONFIG SDK=iphoneos ARCH=arm64 + +#include "test.h" + +#ifndef __arm64__ +#error wrong architecture for TBI hardware feature +#endif + +volatile int x = 123456; + +int main(void) { + testassert(*(int *)((unsigned long)&x | 0xFF00000000000000ul) == 123456); + succeed(__FILE__); +} diff --git a/test/test.h b/test/test.h index e5d52e2..c3abfb0 100644 --- a/test/test.h +++ b/test/test.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,21 @@ #include #include +// Configuration macros + +#if !__LP64__ || TARGET_OS_WIN32 || __OBJC_GC__ || TARGET_IPHONE_SIMULATOR || (TARGET_OS_MAC && !TARGET_OS_IPHONE) +# define SUPPORT_NONPOINTER_ISA 0 +#elif __x86_64__ +# define SUPPORT_NONPOINTER_ISA 1 +#elif __arm64__ +# define SUPPORT_NONPOINTER_ISA 1 +#else +# error unknown architecture +#endif + + +// Test output + static inline void succeed(const char *name) __attribute__((noreturn)); static inline void succeed(const char *name) { @@ -176,6 +192,8 @@ static inline void testonthread(__unsafe_unretained testblock_t code) `#define TEST_CALLS_OPERATOR_NEW` before including test.h. */ #if __cplusplus && !defined(TEST_CALLS_OPERATOR_NEW) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winline-new-delete" #import inline void* operator new(std::size_t) throw (std::bad_alloc) { fail("called global operator new"); } inline void* operator new[](std::size_t) throw (std::bad_alloc) { fail("called global operator new[]"); } @@ -185,6 +203,7 @@ inline void operator delete(void*) throw() { fail("called global operator delete inline void operator delete[](void*) throw() { fail("called global operator delete[]"); } inline void operator delete(void*, const std::nothrow_t&) throw() { fail("called global operator delete(nothrow)"); } inline void operator delete[](void*, const std::nothrow_t&) throw() { fail("called global operator delete[](nothrow)"); } +#pragma clang diagnostic pop #endif @@ -212,7 +231,12 @@ static inline size_t leak_inuse(void) malloc_zone_t *zone = (malloc_zone_t *)zones[i]; if (!zone->introspect || !zone->introspect->enumerator) continue; + // skip DispatchContinuations because it sometimes claims to be + // using lots of memory that then goes away later + if (0 == strcmp(zone->zone_name, "DispatchContinuations")) continue; + zone->introspect->enumerator(mach_task_self(), &inuse, MALLOC_PTR_IN_USE_RANGE_TYPE, (vm_address_t)zone, NULL, leak_recorder); + // fprintf(stderr, "%zu in use for zone %s\n", inuse, zone->zone_name); total += inuse; } diff --git a/test/test.pl b/test/test.pl index 50bb5a9..d8fb704 100755 --- a/test/test.pl +++ b/test/test.pl @@ -44,6 +44,7 @@ options: CC= + LANGUAGE=c,c++,objective-c,objective-c++,swift MEM=mrc,arc,gc STDLIB=libc++,libstdc++ GUARDMALLOC=0|1 @@ -82,8 +83,8 @@ my %ALL_TESTS; # things you can multiplex on the command line # ARCH=i386,x86_64,armv6,armv7 -# SDK=system,macosx,iphoneos,iphonesimulator -# LANGUAGE=c,c++,objective-c,objective-c++ +# SDK=macosx,iphoneos,iphonesimulator +# LANGUAGE=c,c++,objective-c,objective-c++,swift # CC=clang,gcc-4.2,llvm-gcc-4.2 # MEM=mrc,arc,gc # STDLIB=libc++,libstdc++ @@ -162,9 +163,10 @@ my %extensions_for_language = ( "c" => ["c"], "objective-c" => ["c", "m"], "c++" => ["c", "cc", "cp", "cpp", "cxx", "c++"], - "objective-c++" => ["c", "m", "cc", "cp", "cpp", "cxx", "c++", "mm"], + "objective-c++" => ["c", "m", "cc", "cp", "cpp", "cxx", "c++", "mm"], + "swift" => ["swift"], - "any" => ["c", "m", "cc", "cp", "cpp", "cxx", "c++", "mm"], + "any" => ["c", "m", "cc", "cp", "cpp", "cxx", "c++", "mm", "swift"], ); # map extension to languages @@ -177,6 +179,7 @@ my %languages_for_extension = ( "cpp" => ["c++", "objective-c++"], "cxx" => ["c++", "objective-c++"], "c++" => ["c++", "objective-c++"], + "swift" => ["swift"], ); # Run some newline-separated commands like `make` would, stopping if any fail @@ -250,11 +253,18 @@ sub cplusplus { return $c . "++"; # e.g. clang => clang++ } +# Turn a C compiler name into a Swift compiler name +sub swift { + my ($c) = @_; + $c =~ s#[^/]*$#swift#; + return $c; +} + # Returns an array of all sdks from `xcodebuild -showsdks` my @sdks_memo; sub getsdks { if (!@sdks_memo) { - @sdks_memo = ("system", `xcodebuild -showsdks` =~ /-sdk (.+)$/mg); + @sdks_memo = (`xcodebuild -showsdks` =~ /-sdk (.+)$/mg); } return @sdks_memo; } @@ -262,7 +272,6 @@ sub getsdks { # Returns whether the given sdk supports -lauto sub supportslibauto { my ($sdk) = @_; - return 1 if $sdk eq "system"; return 1 if $sdk =~ /^macosx/; return 0 if $sdk =~ /^iphone/; die; @@ -338,6 +347,7 @@ sub check_output { my $bad = ""; my $warn = ""; my $runerror = $T{TEST_RUN_OUTPUT}; + filter_hax(\@output); filter_verbose(\@output); $warn = filter_warn(\@output); $bad |= filter_guardmalloc(\@output) if ($C{GUARDMALLOC}); @@ -439,6 +449,20 @@ sub filter_verbose @$outputref = @new_output; } +sub filter_hax +{ + my $outputref = shift; + + my @new_output; + for my $line (@$outputref) { + if ($line !~ /Class OS_tcp_/) { + push @new_output, $line; + } + } + + @$outputref = @new_output; +} + sub filter_valgrind { my $outputref = shift; @@ -540,6 +564,40 @@ sub filter_guardmalloc return $bad; } +# TEST_SOMETHING +# text +# text +# END +sub extract_multiline { + my ($flag, $contents, $name) = @_; + if ($contents =~ /$flag\n/) { + my ($output) = ($contents =~ /$flag\n(.*?\n)END[ *\/]*\n/s); + die "$name used $flag without END\n" if !defined($output); + return $output; + } + return undef; +} + + +# TEST_SOMETHING +# text +# OR +# text +# END +sub extract_multiple_multiline { + my ($flag, $contents, $name) = @_; + if ($contents =~ /$flag\n/) { + my ($output) = ($contents =~ /$flag\n(.*?\n)END[ *\/]*\n/s); + die "$name used $flag without END\n" if !defined($output); + + $output =~ s/\nOR\n/\n|/sg; + $output = "^(" . $output . ")\$"; + return $output; + } + return undef; +} + + sub gather_simple { my $CREF = shift; my %C = %{$CREF}; @@ -567,9 +625,9 @@ sub gather_simple { my ($conditionstring) = ($contents =~ /\bTEST_CONFIG\b(.*)$/m); my ($envstring) = ($contents =~ /\bTEST_ENV\b(.*)$/m); my ($cflags) = ($contents =~ /\bTEST_CFLAGS\b(.*)$/m); - my ($buildcmd) = ($contents =~ /TEST_BUILD\n(.*?\n)END[ *\/]*\n/s); - my ($builderror) = ($contents =~ /TEST_BUILD_OUTPUT\n(.*?\n)END[ *\/]*\n/s); - my ($runerror) = ($contents =~ /TEST_RUN_OUTPUT\n(.*?\n)END[ *\/]*\n/s); + my ($buildcmd) = extract_multiline("TEST_BUILD", $contents, $name); + my ($builderror) = extract_multiple_multiline("TEST_BUILD_OUTPUT", $contents, $name); + my ($runerror) = extract_multiple_multiline("TEST_RUN_OUTPUT", $contents, $name); return 0 if !$test_h && !$disabled && !$crashes && !defined($conditionstring) && !defined($envstring) && !defined($cflags) && !defined($buildcmd) && !defined($builderror) && !defined($runerror); @@ -599,11 +657,6 @@ sub gather_simple { next if !defined($testvalue); # testvalue is the configuration being run now # condvalues are the allowed values for this test - - # special case: look up the name of SDK "system" - if ($condkey eq "SDK" && $testvalue eq "system") { - $testvalue = systemsdkname(); - } my $ok = 0; for my $condvalue (@condvalues) { @@ -616,10 +669,9 @@ sub gather_simple { $ok = 1 if ($testvalue eq $condvalue); - # special case: SDK allows prefixes, and "system" is "macosx" + # special case: SDK allows prefixes if ($condkey eq "SDK") { $ok = 1 if ($testvalue =~ /^$condvalue/); - $ok = 1 if ($testvalue eq "system" && "macosx" =~ /^$condvalue/); } # special case: CC and CXX allow substring matches @@ -637,17 +689,6 @@ sub gather_simple { } } - # builderror is multiple REs separated by OR - if (defined $builderror) { - $builderror =~ s/\nOR\n/\n|/sg; - $builderror = "^(" . $builderror . ")\$"; - } - # runerror is multiple REs separated by OR - if (defined $runerror) { - $runerror =~ s/\nOR\n/\n|/sg; - $runerror = "^(" . $runerror . ")\$"; - } - # save some results for build and run phases $$CREF{"TEST_$name"} = { TEST_BUILD => $buildcmd, @@ -682,6 +723,9 @@ sub build_simple { my $output = make($cmd); + # rdar://10163155 + $output =~ s/ld: warning: could not create compact unwind for [^\n]+: does not use standard frame\n//g; + my $ok; if (my $builderror = $T{TEST_BUILD_OUTPUT}) { # check for expected output and ignore $? @@ -745,7 +789,8 @@ sub run_simple { # run on iOS device my $remotedir = "/var/root/test/" . basename($C{DIR}) . "/$name.build"; - my $remotedyld = " DYLD_LIBRARY_PATH=$remotedir"; + my $remotedyld = ""; + $remotedyld .= " DYLD_LIBRARY_PATH=$remotedir"; $remotedyld .= ":/var/root/test/" if ($C{TESTLIB} ne $TESTLIBPATH); # elide host-specific paths @@ -777,11 +822,7 @@ sub find_compiler { my $result = $compiler_memo{$key}; return $result if defined $result; - if ($sdk eq "system") { - $result = `xcrun -find $cc 2>/dev/null`; - } else { - $result = `xcrun -sdk $sdk -find $cc 2>/dev/null`; - } + $result = `xcrun -sdk $sdk -find $cc 2>/dev/null`; chomp $result; $compiler_memo{$key} = $result; @@ -829,10 +870,7 @@ sub make_one_config { (my $configdir = $configname) =~ s#/##g; $C{DIR} = "$BUILDDIR/$configdir"; - $C{SDK_PATH} = "/"; - if ($C{SDK} ne "system") { - ($C{SDK_PATH}) = (`xcodebuild -version -sdk $C{SDK} Path` =~ /^\s*(.+?)\s*$/); - } + ($C{SDK_PATH}) = (`xcodebuild -version -sdk $C{SDK} Path` =~ /^\s*(.+?)\s*$/); # Look up test library (possible in root or SDK_PATH) @@ -878,15 +916,19 @@ sub make_one_config { # Look up compilers my $cc = $C{CC}; my $cxx = cplusplus($C{CC}); + my $swift = swift($C{CC}); if (! $BUILD) { $C{CC} = $cc; $C{CXX} = $cxx; + $C{SWIFT} = $swift } else { $C{CC} = find_compiler($cc, $C{SDK}, $C{SDK_PATH}); $C{CXX} = find_compiler($cxx, $C{SDK}, $C{SDK_PATH}); + $C{SWIFT} = find_compiler($swift, $C{SDK}, $C{SDK_PATH}); die "No compiler '$cc' ('$C{CC}') in SDK '$C{SDK}'\n" if !-e $C{CC}; die "No compiler '$cxx' ('$C{CXX}') in SDK '$C{SDK}'\n" if !-e $C{CXX}; + die "No compiler '$swift' ('$C{SWIFT}') in SDK '$C{SDK}'\n" if !-e $C{SWIFT}; } # Populate cflags @@ -894,21 +936,31 @@ sub make_one_config { # save-temps so dsymutil works so debug info works my $cflags = "-I$DIR -W -Wall -Wno-deprecated-declarations -Wshorten-64-to-32 -g -save-temps -Os -arch $C{ARCH} "; my $objcflags = ""; + my $swiftflags = "-g "; - if ($C{SDK} ne "system") { - $cflags .= " -isysroot '$C{SDK_PATH}'"; - $cflags .= " '-Wl,-syslibroot,$C{SDK_PATH}'"; - } + $cflags .= " -isysroot '$C{SDK_PATH}'"; + $cflags .= " '-Wl,-syslibroot,$C{SDK_PATH}'"; + $swiftflags .= " -sdk '$C{SDK_PATH}'"; + my $target = ""; if ($C{SDK} =~ /^iphoneos[0-9]/ && $cflags !~ /-mios-version-min/) { - my ($vers) = ($C{SDK} =~ /^iphoneos([0-9]+\.[0-9+])/); + my ($vers) = ($C{SDK} =~ /^iphoneos([0-9]+\.[0-9]+)/); $cflags .= " -mios-version-min=$vers"; + $target = "$C{ARCH}-apple-ios$vers"; } - if ($C{SDK} =~ /^iphonesimulator[0-9]/ && $cflags !~ /-mios-simulator-version-min/) { - my ($vers) = ($C{SDK} =~ /^iphonesimulator([0-9]+\.[0-9+])/); + elsif ($C{SDK} =~ /^iphonesimulator[0-9]/ && $cflags !~ /-mios-simulator-version-min/) { + my ($vers) = ($C{SDK} =~ /^iphonesimulator([0-9]+\.[0-9]+)/); $cflags .= " -mios-simulator-version-min=$vers"; + $target = "$C{ARCH}-apple-ios$vers"; + } + else { + my ($vers) = ($C{SDK} =~ /^macosx([0-9]+\.[0-9]+)/); + $vers = "" if !defined($vers); + $target = "$C{ARCH}-apple-macosx$vers"; } - if ($C{SDK} =~ /^iphonesimulator/) { + $swiftflags .= " -target $target"; + + if ($C{SDK} =~ /^iphonesimulator/ && $C{ARCH} eq "i386") { $objcflags .= " -fobjc-abi-version=2 -fobjc-legacy-dispatch"; } @@ -926,7 +978,8 @@ sub make_one_config { if ($C{CC} =~ /clang/) { $cflags .= " -Qunused-arguments -fno-caret-diagnostics"; - $cflags .= " -stdlib=$C{STDLIB} -fno-objc-link-runtime"; + $cflags .= " -stdlib=$C{STDLIB}"; # fixme -fno-objc-link-runtime" + $cflags .= " -Wl,-segalign,0x4000 "; } @@ -980,11 +1033,13 @@ sub make_one_config { $C{COMPILE_CXX} = "env LANG=C '$C{CXX}' $cflags -x c++"; $C{COMPILE_M} = "env LANG=C '$C{CC}' $cflags $objcflags -x objective-c -std=gnu99"; $C{COMPILE_MM} = "env LANG=C '$C{CXX}' $cflags $objcflags -x objective-c++"; + $C{COMPILE_SWIFT} = "env LANG=C '$C{SWIFT}' $swiftflags"; - $C{COMPILE} = $C{COMPILE_C} if $C{LANGUAGE} eq "c"; - $C{COMPILE} = $C{COMPILE_CXX} if $C{LANGUAGE} eq "c++"; - $C{COMPILE} = $C{COMPILE_M} if $C{LANGUAGE} eq "objective-c"; - $C{COMPILE} = $C{COMPILE_MM} if $C{LANGUAGE} eq "objective-c++"; + $C{COMPILE} = $C{COMPILE_C} if $C{LANGUAGE} eq "c"; + $C{COMPILE} = $C{COMPILE_CXX} if $C{LANGUAGE} eq "c++"; + $C{COMPILE} = $C{COMPILE_M} if $C{LANGUAGE} eq "objective-c"; + $C{COMPILE} = $C{COMPILE_MM} if $C{LANGUAGE} eq "objective-c++"; + $C{COMPILE} = $C{COMPILE_SWIFT} if $C{LANGUAGE} eq "swift"; die "unknown language '$C{LANGUAGE}'\n" if !defined $C{COMPILE}; ($C{COMPILE_NOMEM} = $C{COMPILE}) =~ s/ -fobjc-(?:gc|arc)\S*//g; @@ -1019,6 +1074,13 @@ sub make_one_config { return 0; } + # fixme + if ($C{LANGUAGE} eq "swift" && $C{ARCH} =~ /^arm/) { + print "note: skipping configuration $C{NAME}\n"; + print "note: because ARCH=$C{ARCH} does not support LANGAUGE=SWIFT\n"; + return 0; + } + %$configref = %C; } @@ -1122,11 +1184,7 @@ sub run_one_config { # upload library to iOS device if ($C{TESTLIB} ne $TESTLIBPATH) { - # hack - send thin library because device may use lib=armv7 - # even though app=armv6, and we want to set the lib's arch - make("xcrun -sdk $C{SDK} lipo -output /tmp/$TESTLIBNAME -thin $C{ARCH} $C{TESTLIB} || cp $C{TESTLIB} /tmp/$TESTLIBNAME"); - die "Couldn't thin $C{TESTLIB} to $C{ARCH}\n" if ($?); - make("RSYNC_PASSWORD=alpine rsync -av /tmp/$TESTLIBNAME rsync://root\@localhost:10873/root/var/root/test/"); + make("RSYNC_PASSWORD=alpine rsync -av $C{TESTLIB} rsync://root\@localhost:10873/root/var/root/test/"); die "Couldn't rsync $C{TESTLIB} to device\n" if ($?); make("RSYNC_PASSWORD=alpine rsync -av $C{TESTDSYM} rsync://root\@localhost:10873/root/var/root/test/"); } @@ -1161,7 +1219,7 @@ sub getargs { return [split ',', $value] if defined $value; } - return [$default]; + return [split ',', $default]; } # Return 1 or 0 if set by "$argname=1" or "$argname=0" on the @@ -1196,10 +1254,10 @@ my $default_arch = (`/usr/sbin/sysctl hw.optional.x86_64` eq "hw.optional.x86_64 $args{ARCH} = getargs("ARCH", 0); $args{ARCH} = getargs("ARCHS", $default_arch) if !@{$args{ARCH}}[0]; -$args{SDK} = getargs("SDK", "system"); +$args{SDK} = getargs("SDK", "macosx"); $args{MEM} = getargs("MEM", "mrc"); -$args{LANGUAGE} = [ map { lc($_) } @{getargs("LANGUAGE", "objective-c")} ]; +$args{LANGUAGE} = [ map { lc($_) } @{getargs("LANGUAGE", "objective-c,swift")} ]; $args{STDLIB} = getargs("STDLIB", "libstdc++"); $args{CC} = getargs("CC", "clang"); diff --git a/test/unload.m b/test/unload.m index 81ce9ad..4296b3a 100644 --- a/test/unload.m +++ b/test/unload.m @@ -116,7 +116,7 @@ int main() objc_setForwardHandler((void*)&forward_handler, (void*)&forward_handler); -#if defined(__arm__) +#if defined(__arm__) || defined(__arm64__) int count = 10; #else int count = is_guardmalloc() ? 10 : 100; diff --git a/test/unwind.m b/test/unwind.m index 3216880..7fa1159 100644 --- a/test/unwind.m +++ b/test/unwind.m @@ -56,10 +56,6 @@ static void handler(id unused __unused, void *ctx __unused) int main() { -#if TARGET_IPHONE_SIMULATOR - testwarn(" Simulator: cannot throw exceptions across objc_msgSend"); - succeed(__FILE__); -#else int i; // unwind exception and alt handler through objc_msgSend() @@ -92,7 +88,6 @@ int main() } POP_POOL; succeed(__FILE__); -#endif } #endif diff --git a/test/zone.m b/test/zone.m index c151276..46ec5ea 100644 --- a/test/zone.m +++ b/test/zone.m @@ -28,7 +28,7 @@ int main() for (i = 0; i < count; i++) { const char *name = malloc_get_zone_name((malloc_zone_t *)zones[i]); if (name) { - BOOL is_objc = (0 == strcmp(name, "ObjC")) ? YES : NO; + BOOL is_objc = (0 == strcmp(name, "ObjC_Internal")) ? YES : NO; if (is_objc) has_objc = YES; testprintf("zone %s\n", name); } diff --git a/unexported_symbols b/unexported_symbols index e0856c9..73f77a8 100644 --- a/unexported_symbols +++ b/unexported_symbols @@ -9,5 +9,9 @@ __ZdlPv __ZdlPvRKSt9nothrow_t __ZTISt9bad_alloc __ZTISt9exception +__ZTISt11logic_error +__ZTISt12length_error __ZTSSt9bad_alloc __ZTSSt9exception +__ZTSSt11logic_error +__ZTSSt12length_error -- 2.47.2