#define OBJC_IMAGE_SUPPORTS_GC (1<<1)
#define OBJC_IMAGE_REQUIRES_GC (1<<2)
#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3)
-#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4)
bool debug;
bool verbose;
ii->flags = OSSwapInt32(ii->flags);
}
if (debug) printf("flags->%x, nitems %lu\n", ii->flags, size/sizeof(struct imageInfo));
- uint32_t support_mask = (OBJC_IMAGE_SUPPORTS_GC | OBJC_IMAGE_SUPPORTS_COMPACTION);
+ uint32_t support_mask = OBJC_IMAGE_SUPPORTS_GC;
uint32_t flags = ii->flags;
if (patch && (flags & support_mask) != support_mask) {
//printf("will patch %s at offset %p\n", FileName, (char*)(&ii->flags) - FileBase);
831C85D50E10CF850066E64C /* objc-os.h in Headers */ = {isa = PBXBuildFile; fileRef = 831C85D30E10CF850066E64C /* objc-os.h */; };
831C85D60E10CF850066E64C /* objc-os.mm in Sources */ = {isa = PBXBuildFile; fileRef = 831C85D40E10CF850066E64C /* objc-os.mm */; };
834266D80E665A8B002E4DA2 /* objc-gdb.h in Headers */ = {isa = PBXBuildFile; fileRef = 834266D70E665A8B002E4DA2 /* objc-gdb.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 834DF8B715993EE1002F2BC9 /* objc-sel-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */; };
834EC0A411614167009B2563 /* objc-abi.h in Headers */ = {isa = PBXBuildFile; fileRef = 834EC0A311614167009B2563 /* objc-abi.h */; settings = {ATTRIBUTES = (Private, ); }; };
83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; };
83725F4C14CA5C210014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; };
8383A3AE122600FB009290B8 /* hashtable2.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485B80D6D687300CEA253 /* hashtable2.mm */; };
8383A3AF122600FB009290B8 /* maptable.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485BC0D6D687300CEA253 /* maptable.mm */; };
8383A3B0122600FB009290B8 /* objc-accessors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A930D73876100392440 /* objc-accessors.mm */; };
- 8383A3B1122600FB009290B8 /* objc-auto.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.m */; };
- 8383A3B2122600FB009290B8 /* objc-auto-dump.m in Sources */ = {isa = PBXBuildFile; fileRef = BC07A0100EF72D9C0014EC61 /* objc-auto-dump.m */; };
+ 8383A3B1122600FB009290B8 /* objc-auto.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.mm */; };
+ 8383A3B2122600FB009290B8 /* objc-auto-dump.mm in Sources */ = {isa = PBXBuildFile; fileRef = BC07A0100EF72D9C0014EC61 /* objc-auto-dump.mm */; };
8383A3B3122600FB009290B8 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; };
8383A3B4122600FB009290B8 /* objc-cache.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CB0D6D68A200CEA253 /* objc-cache.mm */; };
- 8383A3B5122600FB009290B8 /* objc-class-old.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.m */; };
+ 8383A3B5122600FB009290B8 /* objc-class-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.mm */; };
8383A3B6122600FB009290B8 /* objc-class.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CE0D6D68A200CEA253 /* objc-class.mm */; };
8383A3B7122600FB009290B8 /* objc-errors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D00D6D68A200CEA253 /* objc-errors.mm */; };
8383A3B8122600FB009290B8 /* objc-exception.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D20D6D68A200CEA253 /* objc-exception.mm */; settings = {COMPILER_FLAGS = "-fexceptions"; }; };
8383A3B9122600FB009290B8 /* objc-file.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D30D6D68A200CEA253 /* objc-file.mm */; };
- 8383A3BA122600FB009290B8 /* objc-file-old.m in Sources */ = {isa = PBXBuildFile; fileRef = 83BE02E30FCCB23400661494 /* objc-file-old.m */; };
+ 8383A3BA122600FB009290B8 /* objc-file-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83BE02E30FCCB23400661494 /* objc-file-old.mm */; };
8383A3BB122600FB009290B8 /* objc-initialize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D50D6D68A200CEA253 /* objc-initialize.mm */; };
8383A3BC122600FB009290B8 /* objc-layout.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D60D6D68A200CEA253 /* objc-layout.mm */; };
8383A3BD122600FB009290B8 /* objc-load.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D80D6D68A200CEA253 /* objc-load.mm */; };
8383A3C0122600FB009290B8 /* objc-os.mm in Sources */ = {isa = PBXBuildFile; fileRef = 831C85D40E10CF850066E64C /* objc-os.mm */; };
8383A3C1122600FB009290B8 /* objc-references.mm in Sources */ = {isa = PBXBuildFile; fileRef = 393CEABF0DC69E3E000B69DE /* objc-references.mm */; };
8383A3C3122600FB009290B8 /* objc-runtime-new.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E10D6D68A200CEA253 /* objc-runtime-new.mm */; };
- 8383A3C4122600FB009290B8 /* objc-runtime-old.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485E20D6D68A200CEA253 /* objc-runtime-old.m */; };
+ 8383A3C4122600FB009290B8 /* objc-runtime-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */; };
8383A3C5122600FB009290B8 /* objc-runtime.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E40D6D68A200CEA253 /* objc-runtime.mm */; };
8383A3C6122600FB009290B8 /* objc-sel-set.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E60D6D68A200CEA253 /* objc-sel-set.mm */; };
8383A3C7122600FB009290B8 /* objc-sel-table.s in Sources */ = {isa = PBXBuildFile; fileRef = 83EB007A121C9EC200B92C16 /* objc-sel-table.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
838485C40D6D687300CEA253 /* maptable.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485BC0D6D687300CEA253 /* maptable.mm */; };
838485EF0D6D68A200CEA253 /* objc-api.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C80D6D68A200CEA253 /* objc-api.h */; settings = {ATTRIBUTES = (Public, ); }; };
838485F00D6D68A200CEA253 /* objc-auto.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C90D6D68A200CEA253 /* objc-auto.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 838485F10D6D68A200CEA253 /* objc-auto.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.m */; };
+ 838485F10D6D68A200CEA253 /* objc-auto.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.mm */; };
838485F20D6D68A200CEA253 /* objc-cache.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CB0D6D68A200CEA253 /* objc-cache.mm */; };
- 838485F30D6D68A200CEA253 /* objc-class-old.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.m */; };
+ 838485F30D6D68A200CEA253 /* objc-class-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.mm */; };
838485F40D6D68A200CEA253 /* objc-class.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CD0D6D68A200CEA253 /* objc-class.h */; settings = {ATTRIBUTES = (Public, ); }; };
838485F50D6D68A200CEA253 /* objc-class.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CE0D6D68A200CEA253 /* objc-class.mm */; };
838485F60D6D68A200CEA253 /* objc-config.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CF0D6D68A200CEA253 /* objc-config.h */; };
838486030D6D68A200CEA253 /* objc-private.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485DC0D6D68A200CEA253 /* objc-private.h */; };
838486070D6D68A200CEA253 /* objc-runtime-new.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E00D6D68A200CEA253 /* objc-runtime-new.h */; };
838486080D6D68A200CEA253 /* objc-runtime-new.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E10D6D68A200CEA253 /* objc-runtime-new.mm */; };
- 838486090D6D68A200CEA253 /* objc-runtime-old.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485E20D6D68A200CEA253 /* objc-runtime-old.m */; };
+ 838486090D6D68A200CEA253 /* objc-runtime-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */; };
8384860A0D6D68A200CEA253 /* objc-runtime.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E30D6D68A200CEA253 /* objc-runtime.h */; settings = {ATTRIBUTES = (Public, ); }; };
8384860B0D6D68A200CEA253 /* objc-runtime.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E40D6D68A200CEA253 /* objc-runtime.mm */; };
8384860C0D6D68A200CEA253 /* objc-sel-set.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E50D6D68A200CEA253 /* objc-sel-set.h */; };
838486120D6D68A200CEA253 /* objc-typeencoding.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */; };
838486130D6D68A200CEA253 /* objc.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485EC0D6D68A200CEA253 /* objc.h */; settings = {ATTRIBUTES = (Public, ); }; };
838486140D6D68A200CEA253 /* Object.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485ED0D6D68A200CEA253 /* Object.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 838486150D6D68A200CEA253 /* Object.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485EE0D6D68A200CEA253 /* Object.m */; };
+ 838486150D6D68A200CEA253 /* Object.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EE0D6D68A200CEA253 /* Object.mm */; };
8384861E0D6D68A800CEA253 /* Protocol.h in Headers */ = {isa = PBXBuildFile; fileRef = 838486180D6D68A800CEA253 /* Protocol.h */; settings = {ATTRIBUTES = (Public, ); }; };
8384861F0D6D68A800CEA253 /* Protocol.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838486190D6D68A800CEA253 /* Protocol.mm */; };
838486200D6D68A800CEA253 /* runtime.h in Headers */ = {isa = PBXBuildFile; fileRef = 8384861A0D6D68A800CEA253 /* runtime.h */; settings = {ATTRIBUTES = (Public, ); }; };
838486260D6D68F000CEA253 /* List.h in Headers */ = {isa = PBXBuildFile; fileRef = 838486240D6D68F000CEA253 /* List.h */; settings = {ATTRIBUTES = (Public, ); }; };
838486280D6D6A2400CEA253 /* message.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BD0D6D687300CEA253 /* message.h */; settings = {ATTRIBUTES = (Public, ); }; };
83B1A8BE0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = 83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */; };
- 83BE02E40FCCB23400661494 /* objc-file-old.m in Sources */ = {isa = PBXBuildFile; fileRef = 83BE02E30FCCB23400661494 /* objc-file-old.m */; };
+ 83BE02E40FCCB23400661494 /* objc-file-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83BE02E30FCCB23400661494 /* objc-file-old.mm */; };
83BE02E80FCCB24D00661494 /* objc-file-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E50FCCB24D00661494 /* objc-file-old.h */; };
83BE02E90FCCB24D00661494 /* objc-file.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E60FCCB24D00661494 /* objc-file.h */; };
83BE02EA0FCCB24D00661494 /* objc-runtime-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */; };
83E50CF60FF19E8200D74C19 /* objc-os.h in Headers */ = {isa = PBXBuildFile; fileRef = 831C85D30E10CF850066E64C /* objc-os.h */; };
83E50CF70FF19E8200D74C19 /* objc-gdb.h in Headers */ = {isa = PBXBuildFile; fileRef = 834266D70E665A8B002E4DA2 /* objc-gdb.h */; settings = {ATTRIBUTES = (Private, ); }; };
83E50CF80FF19E8200D74C19 /* objc-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 83112ED30F00599600A5FBAF /* objc-internal.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 83E50D130FF19E8200D74C19 /* Object.m in Sources */ = {isa = PBXBuildFile; fileRef = 838485EE0D6D68A200CEA253 /* Object.m */; };
+ 83E50D130FF19E8200D74C19 /* Object.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EE0D6D68A200CEA253 /* Object.mm */; };
83E50D140FF19E8200D74C19 /* Protocol.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838486190D6D68A800CEA253 /* Protocol.mm */; };
83E50D150FF19E8200D74C19 /* List.m in Sources */ = {isa = PBXBuildFile; fileRef = 838486230D6D68F000CEA253 /* List.m */; };
83E57595121E892100295464 /* objc-abi.h in Headers */ = {isa = PBXBuildFile; fileRef = 834EC0A311614167009B2563 /* objc-abi.h */; settings = {ATTRIBUTES = (Private, ); }; };
83E57597121E8A0A00295464 /* objc-runtime-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */; };
83E57598121E8A1600295464 /* objc-file.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E60FCCB24D00661494 /* objc-file.h */; };
83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */ = {isa = PBXBuildFile; fileRef = 83EB007A121C9EC200B92C16 /* objc-sel-table.s */; };
+ 83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52615E843B100E0926F /* NSObjCRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 83F4B52915E843B100E0926F /* NSObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52715E843B100E0926F /* NSObject.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 83F4B52B15E843C300E0926F /* NSObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52715E843B100E0926F /* NSObject.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 83F4B52C15E843C800E0926F /* NSObjCRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52615E843B100E0926F /* NSObjCRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F550DF155E030800E95D3B /* objc-cache-old.mm */; };
87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; };
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
9672F7EF14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
BC07A00C0EF72D360014EC61 /* objc-auto-dump.h in Headers */ = {isa = PBXBuildFile; fileRef = BC07A00B0EF72D360014EC61 /* objc-auto-dump.h */; settings = {ATTRIBUTES = (Private, ); }; };
- BC07A0110EF72D9C0014EC61 /* objc-auto-dump.m in Sources */ = {isa = PBXBuildFile; fileRef = BC07A0100EF72D9C0014EC61 /* objc-auto-dump.m */; };
+ BC07A0110EF72D9C0014EC61 /* objc-auto-dump.mm in Sources */ = {isa = PBXBuildFile; fileRef = BC07A0100EF72D9C0014EC61 /* objc-auto-dump.mm */; };
E8923DA1116AB2820071B552 /* a1a2-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */; };
E8923DA2116AB2820071B552 /* a1a2-blocktramps-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9D116AB2820071B552 /* a1a2-blocktramps-x86_64.s */; };
E8923DA3116AB2820071B552 /* a2a3-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9E116AB2820071B552 /* a2a3-blocktramps-i386.s */; };
831C85D30E10CF850066E64C /* objc-os.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-os.h"; path = "runtime/objc-os.h"; sourceTree = "<group>"; };
831C85D40E10CF850066E64C /* objc-os.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-os.mm"; path = "runtime/objc-os.mm"; sourceTree = "<group>"; };
834266D70E665A8B002E4DA2 /* objc-gdb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-gdb.h"; path = "runtime/objc-gdb.h"; sourceTree = "<group>"; };
+ 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-sel-old.mm"; path = "runtime/objc-sel-old.mm"; sourceTree = "<group>"; };
834EC0A311614167009B2563 /* objc-abi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-abi.h"; path = "runtime/objc-abi.h"; sourceTree = "<group>"; };
83725F4914CA5BFA0014370E /* objc-opt.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-opt.mm"; path = "runtime/objc-opt.mm"; sourceTree = "<group>"; };
8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-arm.s"; path = "runtime/a1a2-blocktramps-arm.s"; sourceTree = "<group>"; };
838485BD0D6D687300CEA253 /* message.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = message.h; path = runtime/message.h; sourceTree = "<group>"; };
838485C80D6D68A200CEA253 /* objc-api.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-api.h"; path = "runtime/objc-api.h"; sourceTree = "<group>"; };
838485C90D6D68A200CEA253 /* objc-auto.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-auto.h"; path = "runtime/objc-auto.h"; sourceTree = "<group>"; };
- 838485CA0D6D68A200CEA253 /* objc-auto.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "objc-auto.m"; path = "runtime/objc-auto.m"; sourceTree = "<group>"; };
+ 838485CA0D6D68A200CEA253 /* objc-auto.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-auto.mm"; path = "runtime/objc-auto.mm"; sourceTree = "<group>"; };
838485CB0D6D68A200CEA253 /* objc-cache.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-cache.mm"; path = "runtime/objc-cache.mm"; sourceTree = "<group>"; };
- 838485CC0D6D68A200CEA253 /* objc-class-old.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "objc-class-old.m"; path = "runtime/objc-class-old.m"; sourceTree = "<group>"; };
+ 838485CC0D6D68A200CEA253 /* objc-class-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-class-old.mm"; path = "runtime/objc-class-old.mm"; sourceTree = "<group>"; };
838485CD0D6D68A200CEA253 /* objc-class.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-class.h"; path = "runtime/objc-class.h"; sourceTree = "<group>"; };
838485CE0D6D68A200CEA253 /* objc-class.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-class.mm"; path = "runtime/objc-class.mm"; sourceTree = "<group>"; };
838485CF0D6D68A200CEA253 /* objc-config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-config.h"; path = "runtime/objc-config.h"; sourceTree = "<group>"; };
838485DC0D6D68A200CEA253 /* objc-private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-private.h"; path = "runtime/objc-private.h"; sourceTree = "<group>"; };
838485E00D6D68A200CEA253 /* objc-runtime-new.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime-new.h"; path = "runtime/objc-runtime-new.h"; sourceTree = "<group>"; };
838485E10D6D68A200CEA253 /* objc-runtime-new.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-runtime-new.mm"; path = "runtime/objc-runtime-new.mm"; sourceTree = "<group>"; };
- 838485E20D6D68A200CEA253 /* objc-runtime-old.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "objc-runtime-old.m"; path = "runtime/objc-runtime-old.m"; sourceTree = "<group>"; };
+ 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-runtime-old.mm"; path = "runtime/objc-runtime-old.mm"; sourceTree = "<group>"; };
838485E30D6D68A200CEA253 /* objc-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime.h"; path = "runtime/objc-runtime.h"; sourceTree = "<group>"; };
838485E40D6D68A200CEA253 /* objc-runtime.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-runtime.mm"; path = "runtime/objc-runtime.mm"; sourceTree = "<group>"; };
838485E50D6D68A200CEA253 /* objc-sel-set.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-sel-set.h"; path = "runtime/objc-sel-set.h"; sourceTree = "<group>"; };
838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-typeencoding.mm"; path = "runtime/objc-typeencoding.mm"; sourceTree = "<group>"; };
838485EC0D6D68A200CEA253 /* objc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = objc.h; path = runtime/objc.h; sourceTree = "<group>"; };
838485ED0D6D68A200CEA253 /* Object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Object.h; path = runtime/Object.h; sourceTree = "<group>"; };
- 838485EE0D6D68A200CEA253 /* Object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = Object.m; path = runtime/Object.m; sourceTree = "<group>"; };
+ 838485EE0D6D68A200CEA253 /* Object.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = Object.mm; path = runtime/Object.mm; sourceTree = "<group>"; };
838486180D6D68A800CEA253 /* Protocol.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Protocol.h; path = runtime/Protocol.h; sourceTree = "<group>"; };
838486190D6D68A800CEA253 /* Protocol.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = Protocol.mm; path = runtime/Protocol.mm; sourceTree = "<group>"; };
8384861A0D6D68A800CEA253 /* runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = runtime.h; path = runtime/runtime.h; sourceTree = "<group>"; };
838486230D6D68F000CEA253 /* List.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = List.m; path = runtime/OldClasses.subproj/List.m; sourceTree = "<group>"; };
838486240D6D68F000CEA253 /* List.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = List.h; path = runtime/OldClasses.subproj/List.h; sourceTree = "<group>"; };
83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-simulator-i386.s"; path = "runtime/Messengers.subproj/objc-msg-simulator-i386.s"; sourceTree = "<group>"; };
- 83BE02E30FCCB23400661494 /* objc-file-old.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "objc-file-old.m"; path = "runtime/objc-file-old.m"; sourceTree = "<group>"; };
+ 83BE02E30FCCB23400661494 /* objc-file-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-file-old.mm"; path = "runtime/objc-file-old.mm"; sourceTree = "<group>"; };
83BE02E50FCCB24D00661494 /* objc-file-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-file-old.h"; path = "runtime/objc-file-old.h"; sourceTree = "<group>"; };
83BE02E60FCCB24D00661494 /* objc-file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-file.h"; path = "runtime/objc-file.h"; sourceTree = "<group>"; };
83BE02E70FCCB24D00661494 /* objc-runtime-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime-old.h"; path = "runtime/objc-runtime-old.h"; sourceTree = "<group>"; };
83E50D2A0FF19E8200D74C19 /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
83E50D2B0FF19E9E00D74C19 /* IndigoSDK.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = IndigoSDK.xcconfig; path = AppleInternal/XcodeConfig/IndigoSDK.xcconfig; sourceTree = DEVELOPER_DIR; };
83EB007A121C9EC200B92C16 /* objc-sel-table.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-sel-table.s"; path = "runtime/objc-sel-table.s"; sourceTree = "<group>"; };
+ 83F4B52615E843B100E0926F /* NSObjCRuntime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NSObjCRuntime.h; path = runtime/NSObjCRuntime.h; sourceTree = "<group>"; };
+ 83F4B52715E843B100E0926F /* NSObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NSObject.h; path = runtime/NSObject.h; sourceTree = "<group>"; };
+ 83F550DF155E030800E95D3B /* objc-cache-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-cache-old.mm"; path = "runtime/objc-cache-old.mm"; sourceTree = "<group>"; };
87BB4E900EC39633005D08E1 /* objc-probes.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; name = "objc-probes.d"; path = "runtime/objc-probes.d"; sourceTree = "<group>"; };
9672F7ED14D5F488007CEC96 /* NSObject.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = NSObject.mm; path = runtime/NSObject.mm; sourceTree = "<group>"; };
BC07A00B0EF72D360014EC61 /* objc-auto-dump.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-auto-dump.h"; path = "runtime/objc-auto-dump.h"; sourceTree = "<group>"; };
- BC07A0100EF72D9C0014EC61 /* objc-auto-dump.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "objc-auto-dump.m"; path = "runtime/objc-auto-dump.m"; sourceTree = "<group>"; };
+ BC07A0100EF72D9C0014EC61 /* objc-auto-dump.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-auto-dump.mm"; path = "runtime/objc-auto-dump.mm"; sourceTree = "<group>"; };
BC8B5D1212D3D48100C78A5B /* libauto.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libauto.dylib; path = /usr/lib/libauto.dylib; sourceTree = "<absolute>"; };
D2AAC0630554660B00DB518D /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-i386.s"; path = "runtime/a1a2-blocktramps-i386.s"; sourceTree = "<group>"; };
9672F7ED14D5F488007CEC96 /* NSObject.mm */,
838486190D6D68A800CEA253 /* Protocol.mm */,
830F2A930D73876100392440 /* objc-accessors.mm */,
- 838485CA0D6D68A200CEA253 /* objc-auto.m */,
- BC07A0100EF72D9C0014EC61 /* objc-auto-dump.m */,
+ 838485CA0D6D68A200CEA253 /* objc-auto.mm */,
+ BC07A0100EF72D9C0014EC61 /* objc-auto-dump.mm */,
39ABD72012F0B61800D1054C /* objc-weak.mm */,
E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */,
838485CB0D6D68A200CEA253 /* objc-cache.mm */,
- 838485CC0D6D68A200CEA253 /* objc-class-old.m */,
+ 83F550DF155E030800E95D3B /* objc-cache-old.mm */,
+ 838485CC0D6D68A200CEA253 /* objc-class-old.mm */,
838485CE0D6D68A200CEA253 /* objc-class.mm */,
838485D00D6D68A200CEA253 /* objc-errors.mm */,
838485D20D6D68A200CEA253 /* objc-exception.mm */,
399BC72D1224831B007FBDF0 /* objc-externalref.mm */,
838485D30D6D68A200CEA253 /* objc-file.mm */,
- 83BE02E30FCCB23400661494 /* objc-file-old.m */,
+ 83BE02E30FCCB23400661494 /* objc-file-old.mm */,
838485D50D6D68A200CEA253 /* objc-initialize.mm */,
838485D60D6D68A200CEA253 /* objc-layout.mm */,
838485D80D6D68A200CEA253 /* objc-load.mm */,
831C85D40E10CF850066E64C /* objc-os.mm */,
393CEABF0DC69E3E000B69DE /* objc-references.mm */,
838485E10D6D68A200CEA253 /* objc-runtime-new.mm */,
- 838485E20D6D68A200CEA253 /* objc-runtime-old.m */,
+ 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */,
838485E40D6D68A200CEA253 /* objc-runtime.mm */,
838485E60D6D68A200CEA253 /* objc-sel-set.mm */,
83EB007A121C9EC200B92C16 /* objc-sel-table.s */,
838485E80D6D68A200CEA253 /* objc-sel.mm */,
+ 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */,
838485EA0D6D68A200CEA253 /* objc-sync.mm */,
838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */,
E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */,
838485C60D6D687700CEA253 /* Public Headers */ = {
isa = PBXGroup;
children = (
+ 83F4B52615E843B100E0926F /* NSObjCRuntime.h */,
+ 83F4B52715E843B100E0926F /* NSObject.h */,
838485BD0D6D687300CEA253 /* message.h */,
838485C80D6D68A200CEA253 /* objc-api.h */,
838485C90D6D68A200CEA253 /* objc-auto.h */,
isa = PBXGroup;
children = (
838486230D6D68F000CEA253 /* List.m */,
- 838485EE0D6D68A200CEA253 /* Object.m */,
+ 838485EE0D6D68A200CEA253 /* Object.mm */,
);
name = "Obsolete Source";
sourceTree = "<group>";
83E50CEF0FF19E8200D74C19 /* Protocol.h in Headers */,
83E50CF00FF19E8200D74C19 /* runtime.h in Headers */,
39ABD72512F0B61800D1054C /* objc-weak.h in Headers */,
+ 83F4B52B15E843C300E0926F /* NSObject.h in Headers */,
+ 83F4B52C15E843C800E0926F /* NSObjCRuntime.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
8384861E0D6D68A800CEA253 /* Protocol.h in Headers */,
838486200D6D68A800CEA253 /* runtime.h in Headers */,
39ABD72312F0B61800D1054C /* objc-weak.h in Headers */,
+ 83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */,
+ 83F4B52915E843B100E0926F /* NSObject.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
- 83E50D130FF19E8200D74C19 /* Object.m in Sources */,
+ 83E50D130FF19E8200D74C19 /* Object.mm in Sources */,
83E50D140FF19E8200D74C19 /* Protocol.mm in Sources */,
83E50D150FF19E8200D74C19 /* List.m in Sources */,
8383A3AC122600FB009290B8 /* a1a2-blocktramps-arm.s in Sources */,
8383A3AE122600FB009290B8 /* hashtable2.mm in Sources */,
8383A3AF122600FB009290B8 /* maptable.mm in Sources */,
8383A3B0122600FB009290B8 /* objc-accessors.mm in Sources */,
- 8383A3B1122600FB009290B8 /* objc-auto.m in Sources */,
- 8383A3B2122600FB009290B8 /* objc-auto-dump.m in Sources */,
+ 8383A3B1122600FB009290B8 /* objc-auto.mm in Sources */,
+ 8383A3B2122600FB009290B8 /* objc-auto-dump.mm in Sources */,
8383A3B3122600FB009290B8 /* objc-block-trampolines.mm in Sources */,
8383A3B4122600FB009290B8 /* objc-cache.mm in Sources */,
- 8383A3B5122600FB009290B8 /* objc-class-old.m in Sources */,
+ 8383A3B5122600FB009290B8 /* objc-class-old.mm in Sources */,
8383A3B6122600FB009290B8 /* objc-class.mm in Sources */,
8383A3B7122600FB009290B8 /* objc-errors.mm in Sources */,
8383A3B8122600FB009290B8 /* objc-exception.mm in Sources */,
8383A3B9122600FB009290B8 /* objc-file.mm in Sources */,
- 8383A3BA122600FB009290B8 /* objc-file-old.m in Sources */,
+ 8383A3BA122600FB009290B8 /* objc-file-old.mm in Sources */,
8383A3BB122600FB009290B8 /* objc-initialize.mm in Sources */,
8383A3BC122600FB009290B8 /* objc-layout.mm in Sources */,
8383A3BD122600FB009290B8 /* objc-load.mm in Sources */,
8383A3C0122600FB009290B8 /* objc-os.mm in Sources */,
8383A3C1122600FB009290B8 /* objc-references.mm in Sources */,
8383A3C3122600FB009290B8 /* objc-runtime-new.mm in Sources */,
- 8383A3C4122600FB009290B8 /* objc-runtime-old.m in Sources */,
+ 8383A3C4122600FB009290B8 /* objc-runtime-old.mm in Sources */,
8383A3C5122600FB009290B8 /* objc-runtime.mm in Sources */,
8383A3C6122600FB009290B8 /* objc-sel-set.mm in Sources */,
8383A3C7122600FB009290B8 /* objc-sel-table.s in Sources */,
files = (
838485C00D6D687300CEA253 /* hashtable2.mm in Sources */,
838485C40D6D687300CEA253 /* maptable.mm in Sources */,
- 838485F10D6D68A200CEA253 /* objc-auto.m in Sources */,
+ 838485F10D6D68A200CEA253 /* objc-auto.mm in Sources */,
838485F20D6D68A200CEA253 /* objc-cache.mm in Sources */,
- 838485F30D6D68A200CEA253 /* objc-class-old.m in Sources */,
+ 838485F30D6D68A200CEA253 /* objc-class-old.mm in Sources */,
838485F50D6D68A200CEA253 /* objc-class.mm in Sources */,
838485F70D6D68A200CEA253 /* objc-errors.mm in Sources */,
838485F90D6D68A200CEA253 /* objc-exception.mm in Sources */,
838486010D6D68A200CEA253 /* objc-loadmethod.mm in Sources */,
838486020D6D68A200CEA253 /* objc-lockdebug.mm in Sources */,
838486080D6D68A200CEA253 /* objc-runtime-new.mm in Sources */,
- 838486090D6D68A200CEA253 /* objc-runtime-old.m in Sources */,
+ 838486090D6D68A200CEA253 /* objc-runtime-old.mm in Sources */,
8384860B0D6D68A200CEA253 /* objc-runtime.mm in Sources */,
8384860D0D6D68A200CEA253 /* objc-sel-set.mm in Sources */,
8384860F0D6D68A200CEA253 /* objc-sel.mm in Sources */,
838486110D6D68A200CEA253 /* objc-sync.mm in Sources */,
838486120D6D68A200CEA253 /* objc-typeencoding.mm in Sources */,
- 838486150D6D68A200CEA253 /* Object.m in Sources */,
+ 838486150D6D68A200CEA253 /* Object.mm in Sources */,
8384861F0D6D68A800CEA253 /* Protocol.mm in Sources */,
838486250D6D68F000CEA253 /* List.m in Sources */,
830F2A740D737FB800392440 /* objc-msg-arm.s in Sources */,
393CEAC00DC69E3E000B69DE /* objc-references.mm in Sources */,
831C85D60E10CF850066E64C /* objc-os.mm in Sources */,
87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */,
- BC07A0110EF72D9C0014EC61 /* objc-auto-dump.m in Sources */,
- 83BE02E40FCCB23400661494 /* objc-file-old.m in Sources */,
+ BC07A0110EF72D9C0014EC61 /* objc-auto-dump.mm in Sources */,
+ 83BE02E40FCCB23400661494 /* objc-file-old.mm in Sources */,
E8923DA1116AB2820071B552 /* a1a2-blocktramps-i386.s in Sources */,
E8923DA2116AB2820071B552 /* a1a2-blocktramps-x86_64.s in Sources */,
E8923DA3116AB2820071B552 /* a2a3-blocktramps-i386.s in Sources */,
39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */,
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */,
83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */,
+ 83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */,
+ 834DF8B715993EE1002F2BC9 /* objc-sel-old.mm in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
1DEB914F08733D8E0010E9CD /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
- "CLANG_CXX_LIBRARY[sdk=iphoneos*]" = "libstdc++";
- "CLANG_CXX_LIBRARY[sdk=iphonesimulator*]" = "libstdc++";
- CLANG_OBJC_RUNTIME = NO;
CLANG_LINK_OBJC_RUNTIME = NO;
+ CLANG_OBJC_RUNTIME = NO;
DEBUG_INFORMATION_FORMAT = dwarf;
GCC_ENABLE_CPP_EXCEPTIONS = NO;
GCC_ENABLE_CPP_RTTI = NO;
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO;
GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_SHADOW = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
+ OTHER_CFLAGS = "";
+ "OTHER_CFLAGS[arch=x86_64]" = "-fobjc-legacy-dispatch";
+ OTHER_CPLUSPLUSFLAGS = (
+ "$(OTHER_CFLAGS)",
+ "-D_LIBCPP_VISIBLE=\"\"",
+ );
STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
WARNING_CFLAGS = (
"-Wall",
"-Wstrict-overflow=4",
"-Wno-unused-parameter",
"-Wno-deprecated-objc-isa-usage",
+ "-Wno-cast-of-sel-type",
);
};
name = Debug;
1DEB915008733D8E0010E9CD /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
- "CLANG_CXX_LIBRARY[sdk=iphoneos*]" = "libstdc++";
- "CLANG_CXX_LIBRARY[sdk=iphonesimulator*]" = "libstdc++";
- CLANG_OBJC_RUNTIME = NO;
CLANG_LINK_OBJC_RUNTIME = NO;
+ CLANG_OBJC_RUNTIME = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
GCC_ENABLE_CPP_EXCEPTIONS = NO;
GCC_ENABLE_CPP_RTTI = NO;
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO;
GCC_WARN_ABOUT_MISSING_NEWLINE = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_SHADOW = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
"OTHER_CFLAGS[arch=i386]" = "-momit-leaf-frame-pointer";
- "OTHER_CFLAGS[arch=x86_64]" = "-momit-leaf-frame-pointer";
+ "OTHER_CFLAGS[arch=x86_64]" = (
+ "-momit-leaf-frame-pointer",
+ "-fobjc-legacy-dispatch",
+ );
+ OTHER_CPLUSPLUSFLAGS = (
+ "$(OTHER_CFLAGS)",
+ "-D_LIBCPP_VISIBLE=\"\"",
+ );
STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
WARNING_CFLAGS = (
"-Wall",
"-Wstrict-overflow=4",
"-Wno-unused-parameter",
"-Wno-deprecated-objc-isa-usage",
+ "-Wno-cast-of-sel-type",
);
};
name = Release;
__BEGIN_DECLS
-// GC-specific accessors.
-extern void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
-extern id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
+#if SUPPORT_GC
-// Non-GC accessors.
extern void objc_setProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
extern id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
+extern void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
+extern id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
+
+#endif
+
__END_DECLS
#endif
#define MUTABLE_COPY 2
-id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
- return *(id*) ((char*)self + offset);
-}
-
id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
// Retain release world
id *slot = (id*) ((char*)self + offset);
return objc_autoreleaseReturnValue(value);
}
-id objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
- return
-#if SUPPORT_GC
- (UseGC ? objc_getProperty_gc : objc_getProperty_non_gc)
-#else
- objc_getProperty_non_gc
-#endif
- (self, _cmd, offset, atomic);
-}
-
-#if SUPPORT_GC
-void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) {
- if (shouldCopy) {
- newValue = (shouldCopy == MUTABLE_COPY ? [newValue mutableCopyWithZone:NULL] : [newValue copyWithZone:NULL]);
- }
- objc_assign_ivar_gc(newValue, self, offset);
-}
-#endif
static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy) __attribute__((always_inline));
}
-void objc_setProperty(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) {
#if SUPPORT_GC
- (UseGC ? objc_setProperty_gc : objc_setProperty_non_gc)
+
+id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
+ return *(id*) ((char*)self + offset);
+}
+
+void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) {
+ if (shouldCopy) {
+ newValue = (shouldCopy == MUTABLE_COPY ? [newValue mutableCopyWithZone:NULL] : [newValue copyWithZone:NULL]);
+ }
+ objc_assign_ivar(newValue, self, offset);
+}
+
+// objc_getProperty and objc_setProperty are resolver functions in objc-auto.mm
+
#else
- objc_setProperty_non_gc
-#endif
- (self, _cmd, offset, newValue, atomic, shouldCopy);
+
+id
+objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic)
+{
+ return objc_getProperty_non_gc(self, _cmd, offset, atomic);
}
+void
+objc_setProperty(id self, SEL _cmd, ptrdiff_t offset, id newValue,
+ BOOL atomic, signed char shouldCopy)
+{
+ objc_setProperty_non_gc(self, _cmd, offset, newValue, atomic, shouldCopy);
+}
+
+#endif
+
// This entry point was designed wrong. When used as a getter, src needs to be locked so that
// if simultaneously used for a setter then there would be contention on src.
#include <arm/arch.h>
-#ifdef ARM11
-#define MOVE cpy
-#define MOVEEQ cpyeq
-#define MOVENE cpyne
-#else
-#define MOVE mov
-#define MOVEEQ moveq
-#define MOVENE movne
-#endif
-
-#ifdef _ARM_ARCH_7
-#define THUMB 1
+#ifndef _ARM_ARCH_7
+# error requires armv7
#endif
.syntax unified
-#if defined(__DYNAMIC__)
#define MI_EXTERN(var) \
.non_lazy_symbol_pointer ;\
-L ## var ## __non_lazy_ptr: ;\
+L ## var ## $$non_lazy_ptr: ;\
.indirect_symbol var ;\
.long 0
-#else
-#define MI_EXTERN(var) \
- .globl var
-#endif
+#define MI_GET_EXTERN(reg,var) \
+ movw reg, :lower16:(L##var##$$non_lazy_ptr-4f-4) ;\
+ movt reg, :upper16:(L##var##$$non_lazy_ptr-4f-4) ;\
+4: add reg, pc ;\
+ ldr reg, [reg]
-#if defined(__DYNAMIC__) && defined(THUMB)
-#define MI_GET_ADDRESS(reg,var) \
- ldr reg, 4f ;\
-3: add reg, pc ;\
- ldr reg, [reg] ;\
- b 5f ;\
- .align 2 ;\
-4: .long L ## var ## __non_lazy_ptr - (3b + 4) ;\
-5:
-#elif defined(__DYNAMIC__)
-#define MI_GET_ADDRESS(reg,var) \
- ldr reg, 4f ;\
-3: ldr reg, [pc, reg] ;\
- b 5f ;\
- .align 2 ;\
-4: .long L ## var ## __non_lazy_ptr - (3b + 8) ;\
-5:
-#else
-#define MI_GET_ADDRESS(reg,var) \
- ldr reg, 3f ;\
- b 4f ;\
- .align 2 ;\
-3: .long var ;\
-4:
-#endif
-
-
-#if defined(__DYNAMIC__)
-#define MI_BRANCH_EXTERNAL(var) \
- MI_GET_ADDRESS(ip, var) ;\
- bx ip
-#else
-#define MI_BRANCH_EXTERNAL(var) \
- b var
-#endif
-
-#if defined(__DYNAMIC__) && defined(THUMB)
#define MI_CALL_EXTERNAL(var) \
- MI_GET_ADDRESS(ip,var) ;\
- blx ip
-#elif defined(__DYNAMIC__)
-#define MI_CALL_EXTERNAL(var) \
- MI_GET_ADDRESS(ip,var) ;\
- MOVE lr, pc ;\
- bx ip
-#else
-#define MI_CALL_EXTERNAL(var) \
- bl var
-#endif
+ MI_GET_EXTERN(r12,var) ;\
+ blx r12
+
+
+#define MI_GET_ADDRESS(reg,var) \
+ movw reg, :lower16:(var-4f-4) ;\
+ movt reg, :upper16:(var-4f-4) ;\
+4: add reg, pc ;\
MI_EXTERN(__class_lookupMethodAndLoadCache3)
-MI_EXTERN(_FwdSel)
MI_EXTERN(___objc_error)
-MI_EXTERN(__objc_forward_handler)
-MI_EXTERN(__objc_forward_stret_handler)
-
-#if 0
-// Special section containing a function pointer that dyld will call
-// when it loads new images.
-MI_EXTERN(__objc_notify_images)
-.text
-.align 2
-L__objc_notify_images:
- MI_BRANCH_EXTERNAL(__objc_notify_images)
-
-.section __DATA,__image_notify
-.long L__objc_notify_images
-#endif
-# _objc_entryPoints and _objc_exitPoints are used by method dispatch
-# caching code to figure out whether any threads are actively
-# in the cache for dispatching. The labels surround the asm code
-# that do cache lookups. The tables are zero-terminated.
+// _objc_entryPoints and _objc_exitPoints are used by method dispatch
+// caching code to figure out whether any threads are actively
+// in the cache for dispatching. The labels surround the asm code
+// that do cache lookups. The tables are zero-terminated.
.data
.private_extern _objc_entryPoints
_objc_entryPoints:
- .long __cache_getImp
- .long __cache_getMethod
+ .long _cache_getImp
.long _objc_msgSend
- .long _objc_msgSend_noarg
.long _objc_msgSend_stret
.long _objc_msgSendSuper
.long _objc_msgSendSuper_stret
.private_extern _objc_exitPoints
_objc_exitPoints:
.long LGetImpExit
- .long LGetMethodExit
.long LMsgSendExit
- .long LMsgSendNoArgExit
.long LMsgSendStretExit
.long LMsgSendSuperExit
.long LMsgSendSuperStretExit
.long 0
+/********************************************************************
+* List every exit insn from every messenger for debugger use.
+* Format:
+* (
+* 1 word instruction's address
+* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
+* )
+* 1 word zero
+*
+* ENTER is the start of a dispatcher
+* FAST_EXIT is method dispatch
+* SLOW_EXIT is uncached method lookup
+* NIL_EXIT is returning zero from a message sent to nil
+* These must match objc-gdb.h.
+********************************************************************/
+
+#define ENTER 1
+#define FAST_EXIT 2
+#define SLOW_EXIT 3
+#define NIL_EXIT 4
+
+.section __DATA,__objc_msg_break
+.globl _gdb_objc_messenger_breakpoints
+_gdb_objc_messenger_breakpoints:
+// contents populated by the macros below
+
+.macro MESSENGER_START
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long ENTER
+ .text
+.endmacro
+.macro MESSENGER_END_FAST
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long FAST_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_SLOW
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long SLOW_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_NIL
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long NIL_EXIT
+ .text
+.endmacro
+
+
+/********************************************************************
+ * Names for relative labels
+ * DO NOT USE THESE LABELS ELSEWHERE
+ * Reserved labels: 8: 9:
+ ********************************************************************/
+#define LCacheMiss 8
+#define LCacheMiss_f 8f
+#define LCacheMiss_b 8b
+#define LNilReceiver 9
+#define LNilReceiver_f 9f
+#define LNilReceiver_b 9b
+
+
+/********************************************************************
+ * Macro parameters
+ ********************************************************************/
+
+#define NORMAL 0
+#define FPRET 1
+#define FP2RET 2
+#define GETIMP 3
+#define STRET 4
+#define SUPER 5
+#define SUPER2 6
+#define SUPER_STRET 7
+#define SUPER2_STRET 8
+
+
+/********************************************************************
+ *
+ * Structure definitions.
+ *
+ ********************************************************************/
+
/* objc_super parameter to sendSuper */
-.set RECEIVER, 0
-.set CLASS, 4
+#define RECEIVER 0
+#define CLASS 4
/* Selected field offsets in class structure */
-.set ISA, 0
-.set SUPERCLASS, 4
-.set CACHE, 8
-
-/* Method descriptor */
-.set METHOD_NAME, 0
-.set METHOD_IMP, 8
-
-/* Cache header */
-.set MASK, 0
-.set NEGMASK, -8
-.set OCCUPIED, 4
-.set BUCKETS, 8 /* variable length array */
-
-
-#####################################################################
-#
-# ENTRY functionName
-#
-# Assembly directives to begin an exported function.
-# We align on cache boundaries for these few functions.
-#
-# Takes: functionName - name of the exported function
-#####################################################################
+#define ISA 0
+#define SUPERCLASS 4
+#define CACHE 8
+#define CACHE_MASK 12
+
+/* Selected field offsets in method structure */
+#define METHOD_NAME 0
+#define METHOD_TYPES 4
+#define METHOD_IMP 8
+
+
+//////////////////////////////////////////////////////////////////////
+//
+// ENTRY functionName
+//
+// Assembly directives to begin an exported function.
+//
+// Takes: functionName - name of the exported function
+//////////////////////////////////////////////////////////////////////
.macro ENTRY /* name */
.text
-#ifdef THUMB
.thumb
-#endif
.align 5
.globl _$0
-#ifdef THUMB
.thumb_func
-#endif
_$0:
.endmacro
.macro STATIC_ENTRY /*name*/
.text
-#ifdef THUMB
.thumb
-#endif
.align 5
.private_extern _$0
-#ifdef THUMB
.thumb_func
-#endif
_$0:
.endmacro
-#####################################################################
-#
-# END_ENTRY functionName
-#
-# Assembly directives to end an exported function. Just a placeholder,
-# a close-parenthesis for ENTRY, until it is needed for something.
-#
-# Takes: functionName - name of the exported function
-#####################################################################
+//////////////////////////////////////////////////////////////////////
+//
+// END_ENTRY functionName
+//
+// Assembly directives to end an exported function. Just a placeholder,
+// a close-parenthesis for ENTRY, until it is needed for something.
+//
+// Takes: functionName - name of the exported function
+//////////////////////////////////////////////////////////////////////
.macro END_ENTRY /* name */
.endmacro
-#####################################################################
-#
-# CacheLookup selectorRegister, classReg, cacheMissLabel
-#
-# Locate the implementation for a selector in a class method cache.
-#
-# Takes:
-# $0 = register containing selector (a2 or a3 ONLY)
-# $1 = class whose cache is to be searched
-# cacheMissLabel = label to branch to iff method is not cached
-#
-# Kills:
-# a4, $1, r9, ip
-#
-# On exit: (found) method triplet in $1, imp in ip
-# (not found) jumps to cacheMissLabel
-#
-#####################################################################
-
-.macro CacheLookup /* selReg, classReg, missLabel */
+/////////////////////////////////////////////////////////////////////
+//
+// CacheLookup return-type
+//
+// Locate the implementation for a selector in a class's method cache.
+//
+// Takes:
+// $0 = NORMAL, STRET, SUPER, SUPER_STRET, SUPER2, SUPER2_STRET, GETIMP
+// r0 or r1 (STRET) = receiver
+// r1 or r2 (STRET) = selector
+// r9 = class to search in
+//
+// On exit: r9 and r12 clobbered
+// (found) calls or returns IMP, eq/ne/r9 set for forwarding
+// (not found) jumps to LCacheMiss
+//
+/////////////////////////////////////////////////////////////////////
- MOVE r9, $0, LSR #2 /* index = (sel >> 2) */
- ldr a4, [$1, #CACHE] /* cache = class->cache */
- add a4, a4, #BUCKETS /* buckets = &cache->buckets */
-
-/* search the cache */
-/* a1=receiver, a2 or a3=sel, r9=index, a4=buckets, $1=method */
-1:
- ldr ip, [a4, #NEGMASK] /* mask = cache->mask */
- and r9, r9, ip /* index &= mask */
- ldr $1, [a4, r9, LSL #2] /* method = buckets[index] */
- teq $1, #0 /* if (method == NULL) */
- add r9, r9, #1 /* index++ */
- beq $2 /* goto cacheMissLabel */
-
- ldr ip, [$1, #METHOD_NAME] /* load method->method_name */
- teq $0, ip /* if (method->method_name != sel) */
- bne 1b /* retry */
-
-/* cache hit, $1 == method triplet address */
-/* Return triplet in $1 and imp in ip */
- ldr ip, [$1, #METHOD_IMP] /* imp = method->method_imp */
-
-.endmacro
+.macro CacheHit
-
-/********************************************************************
- * Method _cache_getMethod(Class cls, SEL sel, IMP msgForward_internal_imp)
- *
- * On entry: a1 = class whose cache is to be searched
- * a2 = selector to search for
- * a3 = _objc_msgForward_internal IMP
- *
- * If found, returns method triplet pointer.
- * If not found, returns NULL.
- *
- * NOTE: _cache_getMethod never returns any cache entry whose implementation
- * is _objc_msgForward_internal. It returns NULL instead. This prevents thread-
- * safety and memory management bugs in _class_lookupMethodAndLoadCache.
- * See _class_lookupMethodAndLoadCache for details.
- *
- * _objc_msgForward_internal is passed as a parameter because it's more
- * efficient to do the (PIC) lookup once in the caller than repeatedly here.
- ********************************************************************/
-
- STATIC_ENTRY _cache_getMethod
-
-# search the cache
- CacheLookup a2, a1, LGetMethodMiss
-
-# cache hit, method triplet in a1 and imp in ip
- teq ip, a3 /* check for _objc_msgForward_internal */
+.if $0 == GETIMP
+ ldr r0, [r9, #4] // r0 = bucket->imp
+ MI_GET_ADDRESS(r1, __objc_msgSend_uncached_impcache)
+ teq r0, r1
it eq
- MOVEEQ a1, #1 /* return (Method)1 if forward */
- /* else return triplet (already in a1) */
- bx lr
+ moveq r0, #0 // don't return msgSend_uncached
+ bx lr // return imp
+.elseif $0 == NORMAL
+ ldr r12, [r9, #4] // r12 = bucket->imp
+ // eq already set for nonstret forward
+ MESSENGER_END_FAST
+ bx r12 // call imp
+.elseif $0 == STRET
+ ldr r12, [r9, #4] // r12 = bucket->imp
+ movs r9, #1 // r9=1, Z=0 for stret forwarding
+ MESSENGER_END_FAST
+ bx r12 // call imp
+.elseif $0 == SUPER
+ ldr r12, [r9, #4] // r12 = bucket->imp
+ ldr r9, [r0, #CLASS] // r9 = class to search for forwarding
+ ldr r0, [r0, #RECEIVER] // fetch real receiver
+ tst r12, r12 // set ne for forwarding (r12!=0)
+ MESSENGER_END_FAST
+ bx r12 // call imp
+.elseif $0 == SUPER2
+ ldr r12, [r9, #4] // r12 = bucket->imp
+ ldr r9, [r0, #CLASS]
+ ldr r9, [r9, #SUPERCLASS] // r9 = class to search for forwarding
+ ldr r0, [r0, #RECEIVER] // fetch real receiver
+ tst r12, r12 // set ne for forwarding (r12!=0)
+ MESSENGER_END_FAST
+ bx r12 // call imp
+.elseif $0 == SUPER_STRET
+ ldr r12, [r9, #4] // r12 = bucket->imp
+ ldr r9, [r1, #CLASS] // r9 = class to search for forwarding
+ orr r9, r9, #1 // r9 = class|1 for super_stret forward
+ ldr r1, [r1, #RECEIVER] // fetch real receiver
+ tst r12, r12 // set ne for forwarding (r12!=0)
+ MESSENGER_END_FAST
+ bx r12 // call imp
+.elseif $0 == SUPER2_STRET
+ ldr r12, [r9, #4] // r12 = bucket->imp
+ ldr r9, [r1, #CLASS] // r9 = class to search for forwarding
+ ldr r9, [r9, #SUPERCLASS] // r9 = class to search for forwarding
+ orr r9, r9, #1 // r9 = class|1 for super_stret forward
+ ldr r1, [r1, #RECEIVER] // fetch real receiver
+ tst r12, r12 // set ne for forwarding (r12!=0)
+ MESSENGER_END_FAST
+ bx r12 // call imp
+.else
+.abort oops
+.endif
+
+.endmacro
-LGetMethodMiss:
- MOVE a1, #0 /* return nil if cache miss */
- bx lr
+.macro CacheLookup
+
+ ldrh r12, [r9, #CACHE_MASK] // r12 = mask
+ ldr r9, [r9, #CACHE] // r9 = buckets
+.if $0 == STRET || $0 == SUPER_STRET
+ and r12, r12, r2 // r12 = index = SEL & mask
+.else
+ and r12, r12, r1 // r12 = index = SEL & mask
+.endif
+ add r9, r9, r12, LSL #3 // r9 = bucket = buckets+index*8
+ ldr r12, [r9] // r12 = bucket->sel
+2:
+.if $0 == STRET || $0 == SUPER_STRET
+ teq r12, r2
+.else
+ teq r12, r1
+.endif
+ bne 1f
+ CacheHit $0
+1:
+ cmp r12, #1
+ blo LCacheMiss_f // if (bucket->sel == 0) cache miss
+ it eq // if (bucket->sel == 1) cache wrap
+ ldreq r9, [r9, #4] // bucket->imp is before first bucket
+ ldr r12, [r9, #8]! // r12 = (++bucket)->sel
+ b 2b
-LGetMethodExit:
- END_ENTRY _cache_getMethod
+.endmacro
/********************************************************************
- * IMP _cache_getImp(Class cls, SEL sel)
+ * IMP cache_getImp(Class cls, SEL sel)
*
- * On entry: a1 = class whose cache is to be searched
- * a2 = selector to search for
+ * On entry: r0 = class whose cache is to be searched
+ * r1 = selector to search for
*
* If found, returns method implementation.
* If not found, returns NULL.
********************************************************************/
- STATIC_ENTRY _cache_getImp
-
-# save registers and load class for CacheLookup
+ STATIC_ENTRY cache_getImp
-# search the cache
- CacheLookup a2, a1, LGetImpMiss
-
-# cache hit, method triplet in a1 and imp in ip
- MOVE a1, ip @ return imp
- bx lr
+ mov r9, r0
+ CacheLookup GETIMP // returns IMP on success
-LGetImpMiss:
- MOVE a1, #0 @ return nil if cache miss
+LCacheMiss:
+ mov r0, #0 // return nil if cache miss
bx lr
LGetImpExit:
- END_ENTRY _cache_getImp
+ END_ENTRY cache_getImp
/********************************************************************
- * id objc_msgSend(id self,
- * SEL op,
- * ...)
*
- * On entry: a1 is the message receiver,
- * a2 is the selector
+ * id objc_msgSend(id self, SEL _cmd,...);
+ *
********************************************************************/
ENTRY objc_msgSend
-# check whether receiver is nil
- teq a1, #0
- beq LMsgSendNilReceiver
+ MESSENGER_START
-# save registers and load receiver's class for CacheLookup
- stmfd sp!, {a4,v1}
- ldr v1, [a1, #ISA]
+ cbz r0, LNilReceiver_f
-# receiver is non-nil: search the cache
- CacheLookup a2, v1, LMsgSendCacheMiss
+ ldr r9, [r0] // r9 = self->isa
+ CacheLookup NORMAL
+ // calls IMP or LCacheMiss
-# cache hit (imp in ip) and CacheLookup returns with nonstret (eq) set, restore registers and call
- ldmfd sp!, {a4,v1}
- bx ip
+LCacheMiss:
+ MESSENGER_END_SLOW
+ ldr r9, [r0, #ISA] // class = receiver->isa
+ b __objc_msgSend_uncached
-# cache miss: go search the method lists
-LMsgSendCacheMiss:
- ldmfd sp!, {a4,v1}
- b _objc_msgSend_uncached
-
-LMsgSendNilReceiver:
- mov a2, #0
- bx lr
+LNilReceiver:
+ mov r1, #0
+ MESSENGER_END_NIL
+ bx lr
LMsgSendExit:
END_ENTRY objc_msgSend
- STATIC_ENTRY objc_msgSend_uncached
-
-# Push stack frame
- stmfd sp!, {a1-a4,r7,lr}
- add r7, sp, #16
-
-# Load class and selector
- ldr a3, [a1, #ISA] /* class = receiver->isa */
- /* selector already in a2 */
- /* receiver already in a1 */
-
-# Do the lookup
- MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
- MOVE ip, a1
-
-# Prep for forwarding, Pop stack frame and call imp
- teq v1, v1 /* set nonstret (eq) */
- ldmfd sp!, {a1-a4,r7,lr}
- bx ip
-
/********************************************************************
* id objc_msgSend_noarg(id self, SEL op)
*
- * On entry: a1 is the message receiver,
- * a2 is the selector
+ * On entry: r0 is the message receiver,
+ * r1 is the selector
********************************************************************/
ENTRY objc_msgSend_noarg
-# check whether receiver is nil
- teq a1, #0
- beq LMsgSendNilReceiver
-
-# load receiver's class for CacheLookup
- ldr a3, [a1, #ISA]
-
-# receiver is non-nil: search the cache
- CacheLookup a2, a3, LMsgSendNoArgCacheMiss
-
-# cache hit (imp in ip) and CacheLookup returns with nonstret (eq) set
- bx ip
-
-# cache miss: go search the method lists
-LMsgSendNoArgCacheMiss:
- b _objc_msgSend_uncached
-
-LMsgSendNoArgExit:
+ b _objc_msgSend
END_ENTRY objc_msgSend_noarg
/********************************************************************
- * struct_type objc_msgSend_stret(id self,
- * SEL op,
- * ...);
+ * void objc_msgSend_stret(void *st_addr, id self, SEL op, ...);
*
* objc_msgSend_stret is the struct-return form of msgSend.
- * The ABI calls for a1 to be used as the address of the structure
+ * The ABI calls for r0 to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
- * On entry: a1 is the address where the structure is returned,
- * a2 is the message receiver,
- * a3 is the selector
+ * On entry: r0 is the address where the structure is returned,
+ * r1 is the message receiver,
+ * r2 is the selector
********************************************************************/
ENTRY objc_msgSend_stret
-# check whether receiver is nil
- teq a2, #0
- it eq
- bxeq lr
-
-# save registers and load receiver's class for CacheLookup
- stmfd sp!, {a4,v1}
- ldr v1, [a2, #ISA]
+ MESSENGER_START
+
+ cbz r1, LNilReceiver_f
-# receiver is non-nil: search the cache
- CacheLookup a3, v1, LMsgSendStretCacheMiss
+ ldr r9, [r1] // r9 = self->isa
+ CacheLookup STRET
+ // calls IMP or LCacheMiss
-# cache hit (imp in ip) - prep for forwarding, restore registers and call
- tst v1, v1 /* set stret (ne); v1 is nonzero (triplet) */
- ldmfd sp!, {a4,v1}
- bx ip
+LCacheMiss:
+ MESSENGER_END_SLOW
+ ldr r9, [r1] // r9 = self->isa
+ b __objc_msgSend_stret_uncached
-# cache miss: go search the method lists
-LMsgSendStretCacheMiss:
- ldmfd sp!, {a4,v1}
- b _objc_msgSend_stret_uncached
+LNilReceiver:
+ MESSENGER_END_NIL
+ bx lr
LMsgSendStretExit:
END_ENTRY objc_msgSend_stret
- STATIC_ENTRY objc_msgSend_stret_uncached
-
-# Push stack frame
- stmfd sp!, {a1-a4,r7,lr}
- add r7, sp, #16
-
-# Load class and selector
- MOVE a1, a2 /* receiver */
- MOVE a2, a3 /* selector */
- ldr a3, [a1, #ISA] /* class = receiver->isa */
-
-# Do the lookup
- MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
- MOVE ip, a1
-
-# Prep for forwarding, pop stack frame and call imp
- tst a1, a1 /* set stret (ne); a1 is nonzero (imp) */
-
- ldmfd sp!, {a1-a4,r7,lr}
- bx ip
-
-
/********************************************************************
- * id objc_msgSendSuper(struct objc_super *super,
- * SEL op,
- * ...)
+ * id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
*
* struct objc_super {
- * id receiver
- * Class class
+ * id receiver;
+ * Class cls; // the class to search
* }
********************************************************************/
ENTRY objc_msgSendSuper
-
-# save registers and load super class for CacheLookup
- stmfd sp!, {a4,v1}
- ldr v1, [a1, #CLASS]
-
-# search the cache
- CacheLookup a2, v1, LMsgSendSuperCacheMiss
-
-# cache hit (imp in ip) and CacheLookup returns with nonstret (eq) set, restore registers and call
- ldmfd sp!, {a4,v1}
- ldr a1, [a1, #RECEIVER] @ fetch real receiver
- bx ip
-
-# cache miss: go search the method lists
-LMsgSendSuperCacheMiss:
- ldmfd sp!, {a4,v1}
- b _objc_msgSendSuper_uncached
-
+ MESSENGER_START
+
+ ldr r9, [r0, #CLASS] // r9 = struct super->class
+ CacheLookup SUPER
+ // calls IMP or LCacheMiss
+
+LCacheMiss:
+ MESSENGER_END_SLOW
+ ldr r9, [r0, #CLASS] // r9 = struct super->class
+ ldr r0, [r0, #RECEIVER] // load real receiver
+ b __objc_msgSend_uncached
+
LMsgSendSuperExit:
END_ENTRY objc_msgSendSuper
- STATIC_ENTRY objc_msgSendSuper_uncached
-
-# Push stack frame
- stmfd sp!, {a1-a4,r7,lr}
- add r7, sp, #16
-
-# Load class and selector
- ldr a3, [a1, #CLASS] /* class = super->class */
- /* selector already in a2 */
- ldr a1, [a1, #RECEIVER] /* receiver = super->receiver */
-
-# Do the lookup
- MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
- MOVE ip, a1
-
-# Prep for forwarding, pop stack frame and call imp
- teq v1, v1 /* set nonstret (eq) */
- ldmfd sp!, {a1-a4,r7,lr}
- ldr a1, [a1, #RECEIVER] @ fetch real receiver
- bx ip
-
-
/********************************************************************
- * objc_msgSendSuper2
+ * id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ *
+ * struct objc_super {
+ * id receiver;
+ * Class cls; // SUBCLASS of the class to search
+ * }
********************************************************************/
ENTRY objc_msgSendSuper2
-
-# save registers and load super class for CacheLookup
- stmfd sp!, {a4,v1}
- ldr v1, [a1, #CLASS]
- ldr v1, [v1, #SUPERCLASS]
-
-# search the cache
- CacheLookup a2, v1, LMsgSendSuper2CacheMiss
-
-# cache hit (imp in ip) and CacheLookup returns with nonstret (eq) set, restore registers and call
- ldmfd sp!, {a4,v1}
- ldr a1, [a1, #RECEIVER] @ fetch real receiver
- bx ip
-
-# cache miss: go search the method lists
-LMsgSendSuper2CacheMiss:
- ldmfd sp!, {a4,v1}
- b _objc_msgSendSuper2_uncached
-
+ MESSENGER_START
+
+ ldr r9, [r0, #CLASS] // class = struct super->class
+ ldr r9, [r9, #SUPERCLASS] // class = class->superclass
+ CacheLookup SUPER2
+ // calls IMP or LCacheMiss
+
+LCacheMiss:
+ MESSENGER_END_SLOW
+ ldr r9, [r0, #CLASS] // class = struct super->class
+ ldr r9, [r9, #SUPERCLASS] // class = class->superclass
+ ldr r0, [r0, #RECEIVER] // load real receiver
+ b __objc_msgSend_uncached
+
LMsgSendSuper2Exit:
END_ENTRY objc_msgSendSuper2
- STATIC_ENTRY objc_msgSendSuper2_uncached
-
-# Push stack frame
- stmfd sp!, {a1-a4,r7,lr}
- add r7, sp, #16
-
-# Load class and selector
- ldr a3, [a1, #CLASS] /* class = super->class */
- ldr a3, [a3, #SUPERCLASS] /* class = class->superclass */
- /* selector already in a2 */
- ldr a1, [a1, #RECEIVER] /* receiver = super->receiver */
-
-# Do the lookup
- MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
- MOVE ip, a1
-
-# Prep for forwarding, pop stack frame and call imp
- teq v1, v1 /* set nonstret (eq) */
- ldmfd sp!, {a1-a4,r7,lr}
- ldr a1, [a1, #RECEIVER] @ fetch real receiver
- bx ip
-
-
/********************************************************************
- * struct_type objc_msgSendSuper_stret(objc_super *super,
- * SEL op,
- * ...)
- *
- * struct objc_super {
- * id receiver
- * Class class
- * }
- *
+ * void objc_msgSendSuper_stret(void *st_addr, objc_super *self, SEL op, ...);
*
* objc_msgSendSuper_stret is the struct-return form of msgSendSuper.
- * The ABI calls for a1 to be used as the address of the structure
+ * The ABI calls for r0 to be used as the address of the structure
* being returned, with the parameters in the succeeding registers.
*
- * On entry: a1 is the address to which to copy the returned structure,
- * a2 is the address of the objc_super structure,
- * a3 is the selector
+ * On entry: r0 is the address where the structure is returned,
+ * r1 is the address of the objc_super structure,
+ * r2 is the selector
********************************************************************/
ENTRY objc_msgSendSuper_stret
+ MESSENGER_START
+
+ ldr r9, [r1, #CLASS] // r9 = struct super->class
+ CacheLookup SUPER_STRET
+ // calls IMP or LCacheMiss
-# save registers and load super class for CacheLookup
- stmfd sp!, {a4,v1}
- ldr v1, [a2, #CLASS]
-
-# search the cache
- CacheLookup a3, v1, LMsgSendSuperStretCacheMiss
-
-# cache hit (imp in ip) - prep for forwarding, restore registers and call
- tst v1, v1 /* set stret (ne); v1 is nonzero (triplet) */
- ldmfd sp!, {a4,v1}
- ldr a2, [a2, #RECEIVER] @ fetch real receiver
- bx ip
-
-# cache miss: go search the method lists
-LMsgSendSuperStretCacheMiss:
- ldmfd sp!, {a4,v1}
- b _objc_msgSendSuper_stret_uncached
+LCacheMiss:
+ MESSENGER_END_SLOW
+ ldr r9, [r1, #CLASS] // r9 = struct super->class
+ ldr r1, [r1, #RECEIVER] // load real receiver
+ b __objc_msgSend_stret_uncached
LMsgSendSuperStretExit:
END_ENTRY objc_msgSendSuper_stret
- STATIC_ENTRY objc_msgSendSuper_stret_uncached
-
-# Push stack frame
- stmfd sp!, {a1-a4,r7,lr}
- add r7, sp, #16
+/********************************************************************
+ * id objc_msgSendSuper2_stret
+ ********************************************************************/
-# Load class and selector
- MOVE a1, a2 /* struct super */
- MOVE a2, a3 /* selector */
- ldr a3, [a1, #CLASS] /* class = super->class */
- ldr a1, [a1, #RECEIVER] /* receiver = super->receiver */
+ ENTRY objc_msgSendSuper2_stret
+ MESSENGER_START
+
+ ldr r9, [r1, #CLASS] // class = struct super->class
+ ldr r9, [r9, #SUPERCLASS] // class = class->superclass
+ CacheLookup SUPER2_STRET
+
+LCacheMiss:
+ MESSENGER_END_SLOW
+ ldr r9, [r1, #CLASS] // class = struct super->class
+ ldr r9, [r9, #SUPERCLASS] // class = class->superclass
+ ldr r1, [r1, #RECEIVER] // load real receiver
+ b __objc_msgSend_stret_uncached
+
+LMsgSendSuper2StretExit:
+ END_ENTRY objc_msgSendSuper2_stret
-# Do the lookup
- MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
- MOVE ip, a1
-# Prep for forwarding, pop stack frame and call imp
- tst v1, v1 /* set stret (ne); v1 is nonzero (triplet) */
+/********************************************************************
+ *
+ * _objc_msgSend_uncached_impcache
+ * Used to erase method cache entries in-place by
+ * bouncing them to the uncached lookup.
+ *
+ * _objc_msgSend_uncached
+ * _objc_msgSend_stret_uncached
+ * The uncached lookup.
+ *
+ ********************************************************************/
- ldmfd sp!, {a1-a4,r7,lr}
- ldr a2, [a2, #RECEIVER] @ fetch real receiver
- bx ip
+ STATIC_ENTRY _objc_msgSend_uncached_impcache
+ // Method cache version
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band Z is 0 (EQ) for normal, 1 (NE) for stret and/or super
+ // Out-of-band r9 is 1 for stret, cls for super, cls|1 for super_stret
+ // Note objc_msgForward_impcache uses the same parameters
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
-/********************************************************************
- * id objc_msgSendSuper2_stret
- ********************************************************************/
+ ite eq
+ ldreq r9, [r0] // normal: r9 = class = self->isa
+ tstne r9, #1 // low bit clear?
+ beq __objc_msgSend_uncached // super: r9 is already the class
+ // stret or super_stret
+ eors r9, r9, #1 // clear low bit
+ it eq // r9 now zero?
+ ldreq r9, [r1] // stret: r9 = class = self->isa
+ // super_stret: r9 is already the class
+ b __objc_msgSend_stret_uncached
- ENTRY objc_msgSendSuper2_stret
+ END_ENTRY _objc_msgSend_uncached_impcache
-# save registers and load super class for CacheLookup
- stmfd sp!, {a4,v1}
- ldr v1, [a2, #CLASS]
- ldr v1, [v1, #SUPERCLASS]
-# search the cache
- CacheLookup a3, v1, LMsgSendSuper2StretCacheMiss
+ STATIC_ENTRY _objc_msgSend_uncached
-# cache hit (imp in ip) - prep for forwarding, restore registers and call
- tst v1, v1 /* set stret (ne); v1 is nonzero (triplet) */
- ldmfd sp!, {a4,v1}
- ldr a2, [a2, #RECEIVER] @ fetch real receiver
- bx ip
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band r9 is the class to search
-# cache miss: go search the method lists
-LMsgSendSuper2StretCacheMiss:
- ldmfd sp!, {a4,v1}
- b _objc_msgSendSuper2_stret_uncached
+ stmfd sp!, {r0-r3,r7,lr}
+ add r7, sp, #16
-LMsgSendSuper2StretExit:
- END_ENTRY objc_msgSendSuper2_stret
+ // receiver already in r0
+ // selector already in r1
+ mov r2, r9 // class to search
+ MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
+ mov r12, r0 // r12 = IMP
- STATIC_ENTRY objc_msgSendSuper2_stret_uncached
+ teq r12, r12 // set eq for nonstret forwarding
+ ldmfd sp!, {r0-r3,r7,lr}
+ bx r12
-# Push stack frame
- stmfd sp!, {a1-a4,r7,lr}
+ END_ENTRY _objc_msgSend_uncached
+
+
+ STATIC_ENTRY _objc_msgSend_stret_uncached
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band r9 is the class to search
+
+ stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
-# Load class and selector
- MOVE a1, a2 /* struct super */
- MOVE a2, a3 /* selector */
- ldr a3, [a1, #CLASS] /* class = super->class */
- ldr a3, [a3, #SUPERCLASS] /* class = class->superclass */
- ldr a1, [a1, #RECEIVER] /* receiver = super->receiver */
+ mov r0, r1 // receiver
+ mov r1, r2 // selector
+ mov r2, r9 // class to search
-# Do the lookup
MI_CALL_EXTERNAL(__class_lookupMethodAndLoadCache3)
- MOVE ip, a1
+ mov r12, r0 // r12 = IMP
-# Prep for forwarding, pop stack frame and call imp
- tst v1, v1 /* set stret (ne); v1 is nonzero (triplet) */
+ tst r12, r12 // set ne for stret forwarding (r12!=0)
+ ldmfd sp!, {r0-r3,r7,lr}
+ bx r12
- ldmfd sp!, {a1-a4,r7,lr}
- ldr a2, [a2, #RECEIVER] @ fetch real receiver
- bx ip
-
+ END_ENTRY _objc_msgSend_stret_uncached
+
/********************************************************************
*
* id _objc_msgForward(id self,
* d5
* d6
* d7
- * a1
- * a2
- * a3
- * a4
+ * r0
+ * r1
+ * r2
+ * r3
* stack args...
*
* typedef struct objc_sendv_margs {
*
********************************************************************/
-.data
+
+ .cstring
+LUnkSelStr:
+ .ascii "Does not recognize selector %s\0"
+
.private_extern _FwdSel
+ .data
+ .align 2
_FwdSel:
.long 0
.private_extern __objc_forward_handler
+ .data
+ .align 2
__objc_forward_handler:
.long 0
.private_extern __objc_forward_stret_handler
+ .data
+ .align 2
__objc_forward_stret_handler:
.long 0
- STATIC_ENTRY _objc_msgForward_internal
+ STATIC_ENTRY _objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
- // Out-of-band condition register is NE for stret, EQ otherwise.
+ // Out-of-band Z is 0 (EQ) for normal, 1 (NE) for stret and/or super
+ // Out-of-band r9 is 1 for stret, cls for super, cls|1 for super_stret
+ // Note _objc_msgSend_uncached_impcache uses the same parameters
+
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
- bne __objc_msgForward_stret
- b __objc_msgForward
+ it ne
+ tstne r9, #1
+ beq __objc_msgForward
+ b __objc_msgForward_stret
- END_ENTRY _objc_msgForward_internal
+ END_ENTRY _objc_msgForward_impcache
ENTRY _objc_msgForward
// Non-stret version
-# check for user-installed forwarding handler
- MI_GET_ADDRESS(ip, __objc_forward_handler)
- ldr ip, [ip]
- teq ip, #0
+// check for user-installed forwarding handler
+ MI_GET_ADDRESS(r12, __objc_forward_handler)
+ ldr r12, [r12]
+ teq r12, #0
it ne
- bxne ip
+ bxne r12
-# build marg_list
- stmfd sp!, {a1-a4} @ push args to marg_list
+// build marg_list
+ stmfd sp!, {r0-r3} // push args to marg_list
-# build forward::'s parameter list (self, forward::, original sel, marg_list)
- # a1 already is self
- MOVE a3, a2 @ original sel
- MI_GET_ADDRESS(a2, _FwdSel) @ "forward::"
- ldr a2, [a2]
- MOVE a4, sp @ marg_list
+// build forward::'s parameter list (self, forward::, original sel, marg_list)
+ // r0 already is self
+ mov r2, r1 // original sel
+ MI_GET_ADDRESS(r1, _FwdSel) // "forward::"
+ ldr r1, [r1]
+ mov r3, sp // marg_list
-# check for forwarding of forward:: itself
- teq a2, a3
- beq LMsgForwardError @ original sel == forward:: - give up
+// check for forwarding of forward:: itself
+ teq r1, r2
+ beq LMsgForwardError // original sel == forward:: - give up
-# push stack frame
- str lr, [sp, #-(2*4)]! @ save lr and align stack
+// push stack frame
+ str lr, [sp, #-(2*4)]! // save lr and align stack
-# send it
+// send it
bl _objc_msgSend
-# pop stack frame and return
+// pop stack frame and return
ldr lr, [sp]
- add sp, sp, #(4 + 4 + 4*4) @ skip lr, pad, a1..a4
+ add sp, sp, #(4 + 4 + 4*4) // skip lr, pad, r0..r3
bx lr
+LMsgForwardError:
+ // currently r0=self, r1=forward::, r2 = original sel, r3 = marg_list
+ // call __objc_error(self, format, original sel)
+ MI_GET_ADDRESS(r1, LUnkSelStr)
+ MI_CALL_EXTERNAL(___objc_error)
+
END_ENTRY _objc_msgForward
ENTRY _objc_msgForward_stret
// Struct-return version
-# check for user-installed forwarding handler
- MI_GET_ADDRESS(ip, __objc_forward_stret_handler)
- ldr ip, [ip]
- teq ip, #0
+// check for user-installed forwarding handler
+ MI_GET_ADDRESS(r12, __objc_forward_stret_handler)
+ ldr r12, [r12]
+ teq r12, #0
it ne
- bxne ip
+ bxne r12
-# build marg_list
- stmfd sp!, {a1-a4} @ push args to marg_list
+// build marg_list
+ stmfd sp!, {r0-r3} // push args to marg_list
-# build forward::'s parameter list (self, forward::, original sel, marg_list)
- MOVE a1, a2 @ self
- MI_GET_ADDRESS(a2, _FwdSel) @ "forward::"
- ldr a2, [a2]
- # a3 is already original sel
- MOVE a4, sp @ marg_list
+// build forward::'s parameter list (self, forward::, original sel, marg_list)
+ mov r0, r1 // self
+ MI_GET_ADDRESS(r1, _FwdSel) // "forward::"
+ ldr r1, [r1]
+ // r2 is already original sel
+ mov r3, sp // marg_list
-# check for forwarding of forward:: itself
- teq a2, a3
- beq LMsgForwardError @ original sel == forward:: - give up
+// check for forwarding of forward:: itself
+ teq r1, r2
+ beq LMsgForwardStretError // original sel == forward:: - give up
-# push stack frame
- str lr, [sp, #-(2*4)]! @ save lr and align stack
+// push stack frame
+ str lr, [sp, #-(2*4)]! // save lr and align stack
-# send it
+// send it
bl _objc_msgSend
-# pop stack frame and return
+// pop stack frame and return
ldr lr, [sp]
- add sp, sp, #(4 + 4 + 4*4) @ skip lr, pad, a1..a4
+ add sp, sp, #(4 + 4 + 4*4) // skip lr, pad, r0..r3
bx lr
-
- END_ENTRY _objc_msgForward_stret
-LMsgForwardError:
- # currently a1=self, a2=forward::, a3 = original sel, a4 = marg_list
- # call __objc_error(self, format, original sel)
- add a2, pc, #4 @ pc bias is 8 bytes
+LMsgForwardStretError:
+ // currently r0=self, r1=forward::, r2 = original sel, r3 = marglist
+ // call __objc_error(self, format, original sel)
+ MI_GET_ADDRESS(r1, LUnkSelStr)
MI_CALL_EXTERNAL(___objc_error)
- .ascii "Does not recognize selector %s\0"
+
+ END_ENTRY _objc_msgForward_stret
ENTRY objc_msgSend_debug
ENTRY method_invoke
- # a2 is method triplet instead of SEL
- ldr ip, [a2, #METHOD_IMP]
- ldr a2, [a2, #METHOD_NAME]
- bx ip
+ // r1 is method triplet instead of SEL
+ ldr r12, [r1, #METHOD_IMP]
+ ldr r1, [r1, #METHOD_NAME]
+ bx r12
END_ENTRY method_invoke
ENTRY method_invoke_stret
- # a3 is method triplet instead of SEL
- ldr ip, [a3, #METHOD_IMP]
- ldr a3, [a3, #METHOD_NAME]
- bx ip
+ // r2 is method triplet instead of SEL
+ ldr r12, [r2, #METHOD_IMP]
+ ldr r2, [r2, #METHOD_NAME]
+ bx r12
END_ENTRY method_invoke_stret
STATIC_ENTRY _objc_ignored_method
- # self is already in a0
+ // self is already in a0
bx lr
END_ENTRY _objc_ignored_method
+
+.section __DATA,__objc_msg_break
+.long 0
+.long 0
+
#endif
.long 0
+/********************************************************************
+* List every exit insn from every messenger for debugger use.
+* Format:
+* (
+* 1 word instruction's address
+* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
+* )
+* 1 word zero
+*
+* ENTER is the start of a dispatcher
+* FAST_EXIT is method dispatch
+* SLOW_EXIT is uncached method lookup
+* NIL_EXIT is returning zero from a message sent to nil
+* These must match objc-gdb.h.
+********************************************************************/
+
+#define ENTER 1
+#define FAST_EXIT 2
+#define SLOW_EXIT 3
+#define NIL_EXIT 4
+
+.section __DATA,__objc_msg_break
+.globl _gdb_objc_messenger_breakpoints
+_gdb_objc_messenger_breakpoints:
+// contents populated by the macros below
+
+.macro MESSENGER_START
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long ENTER
+ .text
+.endmacro
+.macro MESSENGER_END_FAST
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long FAST_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_SLOW
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long SLOW_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_NIL
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long NIL_EXIT
+ .text
+.endmacro
+
+
/********************************************************************
*
* Common offsets.
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
+ MESSENGER_END_SLOW
// stack is already aligned
pushl %eax // class
pushl %ecx // selector
* If not found, returns NULL.
*
* NOTE: _cache_getMethod never returns any cache entry whose implementation
- * is _objc_msgForward_internal. It returns 1 instead. This prevents thread-
+ * is _objc_msgForward_impcache. It returns 1 instead. This prevents thread-
* safety and memory management bugs in _class_lookupMethodAndLoadCache.
* See _class_lookupMethodAndLoadCache for details.
*
- * _objc_msgForward_internal is passed as a parameter because it's more
+ * _objc_msgForward_impcache is passed as a parameter because it's more
* efficient to do the (PIC) lookup once in the caller than repeatedly here.
********************************************************************/
CacheLookup WORD_RETURN, CACHE_GET, LGetMethodMiss
// cache hit, method triplet in %eax
- movl first_arg(%esp), %ecx // check for _objc_msgForward_internal
- cmpl method_imp(%eax), %ecx // if (imp==_objc_msgForward_internal)
+ movl first_arg(%esp), %ecx // check for _objc_msgForward_impcache
+ cmpl method_imp(%eax), %ecx // if (imp==_objc_msgForward_impcache)
je 1f // return (Method)1
ret // else return method triplet address
1: movl $1, %eax
********************************************************************/
ENTRY _objc_msgSend
+ MESSENGER_START
CALL_MCOUNTER
// load receiver and selector
movl isa(%eax), %edx // class = self->isa
CacheLookup WORD_RETURN, MSG_SEND, LMsgSendCacheMiss
xor %edx, %edx // set nonstret for msgForward_internal
+ MESSENGER_END_FAST
jmp *%eax
// cache miss: go search the method lists
// %eax is already zero
movl $0,%edx
LMsgSendDone:
+ MESSENGER_END_NIL
ret
// guaranteed non-nil entry point (disabled for now)
********************************************************************/
ENTRY _objc_msgSendSuper
+ MESSENGER_START
CALL_MCOUNTER
// load selector and class to search
// search the cache (class in %edx)
CacheLookup WORD_RETURN, MSG_SENDSUPER, LMsgSendSuperCacheMiss
xor %edx, %edx // set nonstret for msgForward_internal
+ MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
LMsgSendSuperIgnored:
movl super(%esp), %eax
movl receiver(%eax), %eax
+ MESSENGER_END_NIL
ret
LMsgSendSuperExit:
********************************************************************/
ENTRY _objc_msgSend_fpret
+ MESSENGER_START
CALL_MCOUNTER
// load receiver and selector
movl isa(%eax), %edx // class = self->isa
CacheLookup WORD_RETURN, MSG_SEND, LMsgSendFpretCacheMiss
xor %edx, %edx // set nonstret for msgForward_internal
+ MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
LMsgSendFpretReturnZero:
fldz
LMsgSendFpretDone:
+ MESSENGER_END_NIL
ret
LMsgSendFpretExit:
********************************************************************/
ENTRY _objc_msgSend_stret
+ MESSENGER_START
CALL_MCOUNTER
// load receiver and selector
movl isa(%eax), %edx // class = self->isa
CacheLookup STRUCT_RETURN, MSG_SEND, LMsgSendStretCacheMiss
movl $1, %edx // set stret for objc_msgForward
+ MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
movl %eax, self_stret(%esp) // send to new receiver
jmp LMsgSendStretReceiverOk // receiver must be in %eax
LMsgSendStretDone:
+ MESSENGER_END_NIL
ret $4 // pop struct return address (#2995932)
// guaranteed non-nil entry point (disabled for now)
********************************************************************/
ENTRY _objc_msgSendSuper_stret
+ MESSENGER_START
CALL_MCOUNTER
// load selector and class to search
// search the cache (class in %edx)
CacheLookup STRUCT_RETURN, MSG_SENDSUPER, LMsgSendSuperStretCacheMiss
movl $1, %edx // set stret for objc_msgForward
+ MESSENGER_END_FAST
jmp *%eax // goto *imp
// cache miss: go search the method lists
.private_extern __objc_forward_stret_handler
__objc_forward_stret_handler: .long 0
- STATIC_ENTRY __objc_msgForward_internal
+ STATIC_ENTRY __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band register %edx is nonzero for stret, zero otherwise
+
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
// Check return type (stret or not)
testl %edx, %edx
jnz __objc_msgForward_stret
jmp __objc_msgForward
- END_ENTRY _objc_msgForward_internal
+ END_ENTRY _objc_msgForward_impcache
ENTRY __objc_msgForward
END_ENTRY __objc_ignored_method
+
+.section __DATA,__objc_msg_break
+.long 0
+.long 0
+
#endif
#include <TargetConditionals.h>
#if defined(__i386__) && TARGET_IPHONE_SIMULATOR
-#define __OBJC2__ 1
-
#include "objc-config.h"
.data
-// Substitute receiver for messages sent to nil (usually also nil)
-// id _objc_nilReceiver
-.align 4
-.private_extern __objc_nilReceiver
-__objc_nilReceiver:
- .long 0
-
// _objc_entryPoints and _objc_exitPoints are used by objc
// to get the critical regions for which method caches
// cannot be garbage collected.
.private_extern _objc_entryPoints
_objc_entryPoints:
- .long __cache_getImp
- .long __cache_getMethod
+ .long _cache_getImp
.long _objc_msgSend
.long _objc_msgSend_fpret
.long _objc_msgSend_stret
.private_extern _objc_exitPoints
_objc_exitPoints:
.long LGetImpExit
- .long LGetMethodExit
.long LMsgSendExit
.long LMsgSendFpretExit
.long LMsgSendStretExit
.long 0
+/********************************************************************
+* List every exit insn from every messenger for debugger use.
+* Format:
+* (
+* 1 word instruction's address
+* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
+* )
+* 1 word zero
+*
+* ENTER is the start of a dispatcher
+* FAST_EXIT is method dispatch
+* SLOW_EXIT is uncached method lookup
+* NIL_EXIT is returning zero from a message sent to nil
+* These must match objc-gdb.h.
+********************************************************************/
+
+#define ENTER 1
+#define FAST_EXIT 2
+#define SLOW_EXIT 3
+#define NIL_EXIT 4
+
+.section __DATA,__objc_msg_break
+.globl _gdb_objc_messenger_breakpoints
+_gdb_objc_messenger_breakpoints:
+// contents populated by the macros below
+
+.macro MESSENGER_START
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long ENTER
+ .text
+.endmacro
+.macro MESSENGER_END_FAST
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long FAST_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_SLOW
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long SLOW_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_NIL
+4:
+ .section __DATA,__objc_msg_break
+ .long 4b
+ .long NIL_EXIT
+ .text
+.endmacro
+
+
+/********************************************************************
+ * Names for relative labels
+ * DO NOT USE THESE LABELS ELSEWHERE
+ * Reserved labels: 5: 6: 7: 8: 9:
+ ********************************************************************/
+#define LCacheMiss 5
+#define LCacheMiss_f 5f
+#define LCacheMiss_b 5b
+#define LNilTestDone 6
+#define LNilTestDone_f 6f
+#define LNilTestDone_b 6b
+#define LNilTestSlow 7
+#define LNilTestSlow_f 7f
+#define LNilTestSlow_b 7b
+#define LGetIsaDone 8
+#define LGetIsaDone_f 8f
+#define LGetIsaDone_b 8b
+#define LGetIsaSlow 9
+#define LGetIsaSlow_f 9f
+#define LGetIsaSlow_b 9b
+
+/********************************************************************
+ * Macro parameters
+ ********************************************************************/
+
+#define NORMAL 0
+#define FPRET 1
+#define GETIMP 3
+#define STRET 4
+#define SUPER 5
+#define SUPER_STRET 6
+
+
/********************************************************************
*
* Structure definitions.
********************************************************************/
// Offsets from %esp
- self = 4
- super = 4
- selector = 8
- marg_size = 12
- marg_list = 16
- first_arg = 12
+#define self 4
+#define super 4
+#define selector 8
+#define marg_size 12
+#define marg_list 16
+#define first_arg 12
- struct_addr = 4
+#define struct_addr 4
- self_stret = 8
- super_stret = 8
- selector_stret = 12
- marg_size_stret = 16
- marg_list_stret = 20
+#define self_stret 8
+#define super_stret 8
+#define selector_stret 12
+#define marg_size_stret 16
+#define marg_list_stret 20
// objc_super parameter to sendSuper
- receiver = 0
- class = 4
+#define receiver 0
+#define class 4
// Selected field offsets in class structure
- isa = 0
- superclass = 4
-#if __OBJC2__
- cache = 8
-#else
- cache = 32
-#endif
+#define isa 0
+#define superclass 4
// Method descriptor
- method_name = 0
- method_imp = 8
-
-// Cache header
- mask = 0
- occupied = 4
- buckets = 8 // variable length array
+#define method_name 0
+#define method_imp 8
//////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
//
-//
-// CacheLookup WORD_RETURN | STRUCT_RETURN, MSG_SEND | MSG_SENDSUPER | MSG_SENDSUPER2 | CACHE_GET, cacheMissLabel
+// CacheLookup return-type
//
// Locate the implementation for a selector in a class method cache.
//
-// Takes: WORD_RETURN (first parameter is at sp+4)
-// STRUCT_RETURN (struct address is at sp+4, first parameter at sp+8)
-// MSG_SEND (first parameter is receiver)
-// MSG_SENDSUPER[2] (first parameter is address of objc_super structure)
-// CACHE_GET (first parameter is class; return method triplet)
-// selector in %ecx
-// class to search in %edx
+// Takes:
+// $0 = NORMAL, FPRET, STRET, SUPER, SUPER_STRET, GETIMP
+// ecx = selector to search for
+// edx = class to search
//
-// cacheMissLabel = label to branch to iff method is not cached
+// On exit: ecx clobbered
+// (found) calls or returns IMP in eax, eq/ne set for forwarding
+// (not found) jumps to LCacheMiss, class still in edx
//
-// On exit: (found) MSG_SEND and MSG_SENDSUPER[2]: return imp in eax
-// (found) CACHE_GET: return method triplet in eax
-// (not found) jumps to cacheMissLabel
-//
/////////////////////////////////////////////////////////////////////
-
-// Values to specify to method lookup macros whether the return type of
-// the method is word or structure.
-WORD_RETURN = 0
-STRUCT_RETURN = 1
-
-// Values to specify to method lookup macros whether the first argument
-// is an object/class reference or a 'objc_super' structure.
-MSG_SEND = 0 // first argument is receiver, search the isa
-MSG_SENDSUPER = 1 // first argument is objc_super, search the class
-MSG_SENDSUPER2 = 2 // first argument is objc_super, search the class
-CACHE_GET = 3 // first argument is class, search that class
-
-.macro CacheLookup
-
-// load variables and save caller registers.
-
- pushl %edi // save scratch register
- movl cache(%edx), %edi // cache = class->cache
- pushl %esi // save scratch register
-
- movl mask(%edi), %esi // mask = cache->mask
- movl %ecx, %edx // index = selector
- shrl $$2, %edx // index = selector >> 2
-
-// search the receiver's cache
-// ecx = selector
-// edi = cache
-// esi = mask
-// edx = index
-// eax = method (soon)
-LMsgSendProbeCache_$0_$1_$2:
- andl %esi, %edx // index &= mask
- movl buckets(%edi, %edx, 4), %eax // meth = cache->buckets[index]
-
- testl %eax, %eax // check for end of bucket
- je LMsgSendCacheMiss_$0_$1_$2 // go to cache miss code
- cmpl method_name(%eax), %ecx // check for method name match
- je LMsgSendCacheHit_$0_$1_$2 // go handle cache hit
- addl $$1, %edx // bump index ...
- jmp LMsgSendProbeCache_$0_$1_$2 // ... and loop
-
-// not found in cache: restore state and go to callers handler
-LMsgSendCacheMiss_$0_$1_$2:
-
-.if $0 == WORD_RETURN // Regular word return
-.if $1 == MSG_SEND // MSG_SEND
- popl %esi // restore callers register
- popl %edi // restore callers register
- movl self(%esp), %edx // get messaged object
- movl isa(%edx), %eax // get objects class
-.elseif $1 == MSG_SENDSUPER || $1 == MSG_SENDSUPER2 // MSG_SENDSUPER[2]
+.macro CacheHit
+
+ // CacheHit must always be preceded by a not-taken `jne` instruction
+ // in case the imp is _objc_msgForward_impcache.
+
+.if $0 == GETIMP
+ movl 4(%eax), %eax // return imp
+ call 4f
+4: pop %edx
+ leal __objc_msgSend_uncached_impcache-4b(%edx), %edx
+ cmpl %edx, %eax
+ jne 4f
+ xor %eax, %eax // don't return msgSend_uncached
+4: ret
+.elseif $0 == NORMAL || $0 == FPRET
+ // eq already set for forwarding by `jne`
+ MESSENGER_END_FAST
+ jmp *4(%eax) // call imp
+.elseif $0 == STRET
+ test %eax, %eax // set ne for stret forwarding
+ MESSENGER_END_FAST
+ jmp *4(%eax) // call imp
+.elseif $0 == SUPER
// replace "super" arg with "receiver"
- movl super+8(%esp), %edi // get super structure
- movl receiver(%edi), %edx // get messaged object
- movl %edx, super+8(%esp) // make it the first argument
- movl class(%edi), %eax // get messaged class
- .if $1 == MSG_SENDSUPER2
- movl superclass(%eax), %eax // get messaged class
- .endif
- popl %esi // restore callers register
- popl %edi // restore callers register
-.else // CACHE_GET
- popl %esi // restore callers register
- popl %edi // restore callers register
-.endif
-.else // Struct return
-.if $1 == MSG_SEND // MSG_SEND (stret)
- popl %esi // restore callers register
- popl %edi // restore callers register
- movl self_stret(%esp), %edx // get messaged object
- movl isa(%edx), %eax // get objects class
-.elseif $1 == MSG_SENDSUPER || $1 == MSG_SENDSUPER2 // MSG_SENDSUPER[2] (stret)
+ movl super(%esp), %ecx // get super structure
+ movl receiver(%ecx), %ecx // get messaged object
+ movl %ecx, super(%esp) // make it the first argument
+ cmp %eax, %eax // set eq for non-stret forwarding
+ MESSENGER_END_FAST
+ jmp *4(%eax) // call imp
+.elseif $0 == SUPER_STRET
// replace "super" arg with "receiver"
- movl super_stret+8(%esp), %edi// get super structure
- movl receiver(%edi), %edx // get messaged object
- movl %edx, super_stret+8(%esp)// make it the first argument
- movl class(%edi), %eax // get messaged class
- .if $1 == MSG_SENDSUPER2
- movl superclass(%eax), %eax // get messaged class
- .endif
- popl %esi // restore callers register
- popl %edi // restore callers register
-.else // CACHE_GET
- !! This should not happen.
-.endif
+ movl super_stret(%esp), %ecx // get super structure
+ movl receiver(%ecx), %ecx // get messaged object
+ movl %ecx, super_stret(%esp) // make it the first argument
+ test %eax, %eax // set ne for stret forwarding
+ MESSENGER_END_FAST
+ jmp *4(%eax) // call imp
+.else
+.abort oops
.endif
- // edx = receiver
- // ecx = selector
- // eax = class
- jmp $2 // go to callers handler
+.endmacro
-// eax points to matching cache entry
- .align 4, 0x90
-LMsgSendCacheHit_$0_$1_$2:
-// load implementation address, restore state, and we're done
-.if $1 == CACHE_GET
- // method triplet is already in eax
-.else
- movl method_imp(%eax), %eax // imp = method->method_imp
-.endif
+.macro CacheLookup
-.if $0 == WORD_RETURN // Regular word return
-.if $1 == MSG_SENDSUPER || $1 == MSG_SENDSUPER2
- // replace "super" arg with "self"
- movl super+8(%esp), %edi
- movl receiver(%edi), %esi
- movl %esi, super+8(%esp)
-.endif
-.else // Struct return
-.if $1 == MSG_SENDSUPER || $1 == MSG_SENDSUPER2
- // replace "super" arg with "self"
- movl super_stret+8(%esp), %edi
- movl receiver(%edi), %esi
- movl %esi, super_stret+8(%esp)
-.endif
-.endif
+ movzwl 12(%edx), %eax // eax = mask
+ andl %ecx, %eax // eax = SEL & mask
+ shll $$3, %eax // eax = offset = (SEL & mask) * 8
+ addl 8(%edx), %eax // eax = bucket = cache->buckets+offset
+ cmpl (%eax), %ecx // if (bucket->sel != SEL)
+ jne 1f // scan more
+ // The `jne` above sets flags for CacheHit
+ CacheHit $0 // call or return imp
+
+1:
+ // loop
+ cmpl $$1, (%eax)
+ je 3f // if (bucket->sel == 1) cache wrap
+ jb LCacheMiss_f // if (bucket->sel == 0) cache miss
+
+ addl $$8, %eax // bucket++
+2:
+ cmpl (%eax), %ecx // if (bucket->sel != sel)
+ jne 1b // scan more
+ // The `jne` above sets flags for CacheHit
+ CacheHit $0 // call or return imp
+
+3:
+ // wrap
+ // eax is last bucket, bucket->imp is first bucket
+ movl 4(%eax), %eax
+ jmp 2b
- // restore caller registers
- popl %esi
- popl %edi
.endmacro
/////////////////////////////////////////////////////////////////////
//
-// MethodTableLookup WORD_RETURN | STRUCT_RETURN, MSG_SEND | MSG_SENDSUPER
+// MethodTableLookup
//
-// Takes: WORD_RETURN (first parameter is at sp+4)
-// STRUCT_RETURN (struct address is at sp+4, first parameter at sp+8)
-// MSG_SEND (first parameter is receiver)
-// MSG_SENDSUPER (first parameter is address of objc_super structure)
-//
-// edx = receiver
+// Takes:
+// $0 = NORMAL, FPRET, STRET, SUPER, SUPER_STRET
+// eax = receiver
// ecx = selector
-// eax = class
-// (all set by CacheLookup's miss case)
-//
-// Stack must be at 0xXXXXXXXc on entrance.
+// edx = class to search
//
-// On exit: esp unchanged
-// imp in eax
+// On exit: calls IMP, eq/ne set for forwarding
//
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
- // stack is already aligned
- pushl %eax // class
+ MESSENGER_END_SLOW
+ pushl %ebp
+ movl %esp, %ebp
+ sub $$12, %esp // align stack
+
+ pushl %edx // class
pushl %ecx // selector
- pushl %edx // receiver
+ pushl %eax // receiver
call __class_lookupMethodAndLoadCache3
- addl $$12, %esp // pop parameters
+
+ // imp in eax
+
+ leave
+
+.if $0 == SUPER
+ // replace "super" arg with "receiver"
+ movl super(%esp), %ecx // get super structure
+ movl receiver(%ecx), %ecx // get messaged object
+ movl %ecx, super(%esp) // make it the first argument
+.elseif $0 == SUPER_STRET
+ // replace "super" arg with "receiver"
+ movl super_stret(%esp), %ecx // get super structure
+ movl receiver(%ecx), %ecx // get messaged object
+ movl %ecx, super_stret(%esp) // make it the first argument
+.endif
+
+.if $0 == STRET || $0 == SUPER_STRET
+ // set ne (stret) for forwarding; eax != 0
+ test %eax, %eax
+ jmp *%eax // call imp
+.else
+ // set eq (non-stret) for forwarding
+ cmp %eax, %eax
+ jmp *%eax // call imp
+.endif
+
.endmacro
-/********************************************************************
- * Method _cache_getMethod(Class cls, SEL sel, IMP msgForward_internal_imp)
- *
- * If found, returns method triplet pointer.
- * If not found, returns NULL.
- *
- * NOTE: _cache_getMethod never returns any cache entry whose implementation
- * is _objc_msgForward_internal. It returns 1 instead. This prevents thread-
- * safety and memory management bugs in _class_lookupMethodAndLoadCache.
- * See _class_lookupMethodAndLoadCache for details.
- *
- * _objc_msgForward_internal is passed as a parameter because it's more
- * efficient to do the (PIC) lookup once in the caller than repeatedly here.
- ********************************************************************/
-
- .private_extern __cache_getMethod
- ENTRY __cache_getMethod
+/////////////////////////////////////////////////////////////////////
+//
+// NilTest return-type
+//
+// Takes: $0 = NORMAL or FPRET or STRET
+// eax = receiver
+//
+// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp),
+// or returns zero.
+//
+// NilTestSupport return-type
+//
+// Takes: $0 = NORMAL or FPRET or STRET
+// eax = receiver
+//
+// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp),
+// or returns zero.
+//
+/////////////////////////////////////////////////////////////////////
-// load the class and selector
- movl selector(%esp), %ecx
- movl self(%esp), %edx
+.macro NilTest
+ testl %eax, %eax
+ jz LNilTestSlow_f
+LNilTestDone:
+.endmacro
-// do lookup
- CacheLookup WORD_RETURN, CACHE_GET, LGetMethodMiss
+.macro NilTestSupport
+ .align 3
+LNilTestSlow:
-// cache hit, method triplet in %eax
- movl first_arg(%esp), %ecx // check for _objc_msgForward_internal
- cmpl method_imp(%eax), %ecx // if (imp==_objc_msgForward_internal)
- je 1f // return (Method)1
- ret // else return method triplet address
-1: movl $1, %eax
+.if $0 == FPRET
+ fldz
+ MESSENGER_END_NIL
ret
-
-LGetMethodMiss:
-// cache miss, return nil
- xorl %eax, %eax // zero %eax
+.elseif $0 == STRET
+ MESSENGER_END_NIL
+ ret $4
+.elseif $0 == NORMAL
+ // eax is already zero
+ xorl %edx, %edx
+ xorps %xmm0, %xmm0
+ xorps %xmm1, %xmm1
+ MESSENGER_END_NIL
ret
-
-LGetMethodExit:
- END_ENTRY __cache_getMethod
+.endif
+.endmacro
/********************************************************************
* If not found, returns NULL.
********************************************************************/
- .private_extern __cache_getImp
- ENTRY __cache_getImp
+ .private_extern _cache_getImp
+ ENTRY _cache_getImp
// load the class and selector
movl selector(%esp), %ecx
movl self(%esp), %edx
-// do lookup
- CacheLookup WORD_RETURN, CACHE_GET, LGetImpMiss
+ CacheLookup GETIMP // returns IMP on success
-// cache hit, method triplet in %eax
- movl method_imp(%eax), %eax // return method imp
- ret
-
-LGetImpMiss:
+LCacheMiss:
// cache miss, return nil
- xorl %eax, %eax // zero %eax
+ xorl %eax, %eax
ret
LGetImpExit:
- END_ENTRY __cache_getImp
+ END_ENTRY _cache_getImp
/********************************************************************
********************************************************************/
ENTRY _objc_msgSend
-
-// load receiver and selector
+ MESSENGER_START
+
movl selector(%esp), %ecx
movl self(%esp), %eax
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
-// check whether selector is ignored
- cmpl $ kIgnore, %ecx
- je LMsgSendDone // return self from %eax
-#endif
+ NilTest NORMAL
-// check whether receiver is nil
- testl %eax, %eax
- je LMsgSendNilSelf
-
-// receiver (in %eax) is non-nil: search the cache
-LMsgSendReceiverOk:
movl isa(%eax), %edx // class = self->isa
- CacheLookup WORD_RETURN, MSG_SEND, LMsgSendCacheMiss
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax
+ CacheLookup NORMAL // calls IMP on success
+
+ NilTestSupport NORMAL
+
+LCacheMiss:
+ // isa still in edx
+ movl selector(%esp), %ecx
+ movl self(%esp), %eax
+ MethodTableLookup NORMAL // calls IMP
-// cache miss: go search the method lists
-LMsgSendCacheMiss:
- MethodTableLookup WORD_RETURN, MSG_SEND
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
-
-// message sent to nil: redirect to nil receiver, if any
-LMsgSendNilSelf:
- call 1f // load new receiver
-1: popl %edx
- movl __objc_nilReceiver-1b(%edx),%eax
- testl %eax, %eax // return nil if no new receiver
- je LMsgSendReturnZero
- movl %eax, self(%esp) // send to new receiver
- jmp LMsgSendReceiverOk // receiver must be in %eax
-LMsgSendReturnZero:
- // %eax is already zero
- movl $0,%edx
-LMsgSendDone:
- ret
LMsgSendExit:
END_ENTRY _objc_msgSend
+
/********************************************************************
*
* id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...);
********************************************************************/
ENTRY _objc_msgSendSuper
+ MESSENGER_START
-// load selector and class to search
- movl super(%esp), %eax // struct objc_super
movl selector(%esp), %ecx
+ movl super(%esp), %eax // struct objc_super
movl class(%eax), %edx // struct objc_super->class
+ CacheLookup SUPER // calls IMP on success
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
-// check whether selector is ignored
- cmpl $ kIgnore, %ecx
- je LMsgSendSuperIgnored // return self from %eax
-#endif
-
-// search the cache (class in %edx)
- CacheLookup WORD_RETURN, MSG_SENDSUPER, LMsgSendSuperCacheMiss
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
-
-// cache miss: go search the method lists
-LMsgSendSuperCacheMiss:
- MethodTableLookup WORD_RETURN, MSG_SENDSUPER
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
-
-// ignored selector: return self
-LMsgSendSuperIgnored:
+LCacheMiss:
+ // class still in edx
+ movl selector(%esp), %ecx
movl super(%esp), %eax
- movl receiver(%eax), %eax
- ret
+ movl receiver(%eax), %eax
+ MethodTableLookup SUPER // calls IMP
LMsgSendSuperExit:
END_ENTRY _objc_msgSendSuper
ENTRY _objc_msgSendSuper2
+ MESSENGER_START
-// load selector and class to search
- movl super(%esp), %eax // struct objc_super
movl selector(%esp), %ecx
+ movl super(%esp), %eax // struct objc_super
movl class(%eax), %eax // struct objc_super->class
mov superclass(%eax), %edx // edx = objc_super->class->super_class
+ CacheLookup SUPER // calls IMP on success
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
-// check whether selector is ignored
- cmpl $ kIgnore, %ecx
- je LMsgSendSuperIgnored // return self from %eax
-#endif
-
-// search the cache (class in %edx)
- CacheLookup WORD_RETURN, MSG_SENDSUPER2, LMsgSendSuper2CacheMiss
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
-
-// cache miss: go search the method lists
-LMsgSendSuper2CacheMiss:
- MethodTableLookup WORD_RETURN, MSG_SENDSUPER2
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
-
-// ignored selector: return self
-LMsgSendSuper2Ignored:
+LCacheMiss:
+ // class still in edx
+ movl selector(%esp), %ecx
movl super(%esp), %eax
- movl receiver(%eax), %eax
- ret
-
+ movl receiver(%eax), %eax
+ MethodTableLookup SUPER // calls IMP
+
LMsgSendSuper2Exit:
END_ENTRY _objc_msgSendSuper2
********************************************************************/
ENTRY _objc_msgSend_fpret
+ MESSENGER_START
-// load receiver and selector
movl selector(%esp), %ecx
movl self(%esp), %eax
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
-// check whether selector is ignored
- cmpl $ kIgnore, %ecx
- je LMsgSendFpretDone // return self from %eax
-#endif
+ NilTest FPRET
-// check whether receiver is nil
- testl %eax, %eax
- je LMsgSendFpretNilSelf
-
-// receiver (in %eax) is non-nil: search the cache
-LMsgSendFpretReceiverOk:
movl isa(%eax), %edx // class = self->isa
- CacheLookup WORD_RETURN, MSG_SEND, LMsgSendFpretCacheMiss
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
+ CacheLookup FPRET // calls IMP on success
-// cache miss: go search the method lists
-LMsgSendFpretCacheMiss:
- MethodTableLookup WORD_RETURN, MSG_SEND
- xor %edx, %edx // set nonstret for msgForward_internal
- jmp *%eax // goto *imp
-
-// message sent to nil: redirect to nil receiver, if any
-LMsgSendFpretNilSelf:
- call 1f // load new receiver
-1: popl %edx
- movl __objc_nilReceiver-1b(%edx),%eax
- testl %eax, %eax // return zero if no new receiver
- je LMsgSendFpretReturnZero
- movl %eax, self(%esp) // send to new receiver
- jmp LMsgSendFpretReceiverOk // receiver must be in %eax
-LMsgSendFpretReturnZero:
- fldz
-LMsgSendFpretDone:
- ret
+ NilTestSupport FPRET
+
+LCacheMiss:
+ // class still in edx
+ movl selector(%esp), %ecx
+ movl self(%esp), %eax
+ MethodTableLookup FPRET // calls IMP
LMsgSendFpretExit:
END_ENTRY _objc_msgSend_fpret
********************************************************************/
ENTRY _objc_msgSend_stret
+ MESSENGER_START
-// load receiver and selector
+ movl selector_stret(%esp), %ecx
movl self_stret(%esp), %eax
- movl (selector_stret)(%esp), %ecx
-// check whether receiver is nil
- testl %eax, %eax
- je LMsgSendStretNilSelf
+ NilTest STRET
-// receiver (in %eax) is non-nil: search the cache
-LMsgSendStretReceiverOk:
- movl isa(%eax), %edx // class = self->isa
- CacheLookup STRUCT_RETURN, MSG_SEND, LMsgSendStretCacheMiss
- movl $1, %edx // set stret for objc_msgForward
- jmp *%eax // goto *imp
+ movl isa(%eax), %edx // class = self->isa
+ CacheLookup STRET // calls IMP on success
+
+ NilTestSupport STRET
+
+LCacheMiss:
+ // class still in edx
+ movl selector_stret(%esp), %ecx
+ movl self_stret(%esp), %eax
+ MethodTableLookup STRET // calls IMP
-// cache miss: go search the method lists
-LMsgSendStretCacheMiss:
- MethodTableLookup STRUCT_RETURN, MSG_SEND
- movl $1, %edx // set stret for objc_msgForward
- jmp *%eax // goto *imp
-
-// message sent to nil: redirect to nil receiver, if any
-LMsgSendStretNilSelf:
- call 1f // load new receiver
-1: popl %edx
- movl __objc_nilReceiver-1b(%edx),%eax
- testl %eax, %eax // return nil if no new receiver
- je LMsgSendStretDone
- movl %eax, self_stret(%esp) // send to new receiver
- jmp LMsgSendStretReceiverOk // receiver must be in %eax
-LMsgSendStretDone:
- ret $4 // pop struct return address (#2995932)
LMsgSendStretExit:
END_ENTRY _objc_msgSend_stret
+
/********************************************************************
*
* void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...);
********************************************************************/
ENTRY _objc_msgSendSuper_stret
+ MESSENGER_START
-// load selector and class to search
+ movl selector_stret(%esp), %ecx
movl super_stret(%esp), %eax // struct objc_super
- movl (selector_stret)(%esp), %ecx // get selector
movl class(%eax), %edx // struct objc_super->class
+ CacheLookup SUPER_STRET // calls IMP on success
-// search the cache (class in %edx)
- CacheLookup STRUCT_RETURN, MSG_SENDSUPER, LMsgSendSuperStretCacheMiss
- movl $1, %edx // set stret for objc_msgForward
- jmp *%eax // goto *imp
-
-// cache miss: go search the method lists
-LMsgSendSuperStretCacheMiss:
- MethodTableLookup STRUCT_RETURN, MSG_SENDSUPER
- movl $1, %edx // set stret for objc_msgForward
- jmp *%eax // goto *imp
+LCacheMiss:
+ // class still in edx
+ movl selector_stret(%esp), %ecx
+ movl super_stret(%esp), %eax
+ movl receiver(%eax), %eax
+ MethodTableLookup SUPER_STRET // calls IMP
LMsgSendSuperStretExit:
END_ENTRY _objc_msgSendSuper_stret
ENTRY _objc_msgSendSuper2_stret
+ MESSENGER_START
-// load selector and class to search
+ movl selector_stret(%esp), %ecx
movl super_stret(%esp), %eax // struct objc_super
- movl (selector_stret)(%esp), %ecx // get selector
movl class(%eax), %eax // struct objc_super->class
mov superclass(%eax), %edx // edx = objc_super->class->super_class
-
-// search the cache (class in %edx)
- CacheLookup STRUCT_RETURN, MSG_SENDSUPER2, LMsgSendSuper2StretCacheMiss
- movl $1, %edx // set stret for objc_msgForward
- jmp *%eax // goto *imp
+ CacheLookup SUPER_STRET // calls IMP on success
// cache miss: go search the method lists
-LMsgSendSuper2StretCacheMiss:
- MethodTableLookup STRUCT_RETURN, MSG_SENDSUPER2
- movl $1, %edx // set stret for objc_msgForward
- jmp *%eax // goto *imp
+LCacheMiss:
+ // class still in edx
+ movl selector_stret(%esp), %ecx
+ movl super_stret(%esp), %eax
+ movl receiver(%eax), %eax
+ MethodTableLookup SUPER_STRET // calls IMP
LMsgSendSuper2StretExit:
END_ENTRY _objc_msgSendSuper2_stret
+/********************************************************************
+ *
+ * _objc_msgSend_uncached_impcache
+ * _objc_msgSend_uncached
+ * _objc_msgSend_stret_uncached
+ *
+ * Used to erase method cache entries in-place by
+ * bouncing them to the uncached lookup.
+ *
+ ********************************************************************/
+
+ STATIC_ENTRY __objc_msgSend_uncached_impcache
+ // Method cache version
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band condition register is NE for stret, EQ otherwise.
+ // Out-of-band edx is the searched class
+
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
+
+ jne __objc_msgSend_stret_uncached
+ jmp __objc_msgSend_uncached
+
+ END_ENTRY __objc_msgSend_uncached_impcache
+
+
+ STATIC_ENTRY __objc_msgSend_uncached
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band edx is the searched class
+
+ // edx is already the class to search
+ movl selector(%esp), %ecx
+ MethodTableLookup NORMAL // calls IMP
+
+ END_ENTRY __objc_msgSend_uncached
+
+
+ STATIC_ENTRY __objc_msgSend_stret_uncached
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band edx is the searched class
+
+ // edx is already the class to search
+ movl selector_stret(%esp), %ecx
+ MethodTableLookup STRET // calls IMP
+
+ END_ENTRY __objc_msgSend_stret_uncached
+
+
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
.private_extern __objc_forward_stret_handler
__objc_forward_stret_handler: .long 0
- ENTRY __objc_msgForward_internal
- .private_extern __objc_msgForward_internal
+ ENTRY __objc_msgForward_impcache
+ .private_extern __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
- // Out-of-band register %edx is nonzero for stret, zero otherwise
-
- // Check return type (stret or not)
- testl %edx, %edx
- jnz __objc_msgForward_stret
+ // Out-of-band condition register is NE for stret, EQ otherwise.
+
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
+
+ jne __objc_msgForward_stret
jmp __objc_msgForward
- END_ENTRY _objc_msgForward_internal
+ END_ENTRY _objc_msgForward_impcache
ENTRY __objc_msgForward
movl %esp, %ebp
// Die if forwarding "forward::"
- movl (selector_stret+4)(%ebp), %eax
+ movl selector_stret+4(%ebp), %eax
movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx), %ecx
cmpl %ecx, %eax
je LMsgForwardStretError
END_ENTRY __objc_ignored_method
#endif
+
+
+.section __DATA,__objc_msg_break
+.long 0
+.long 0
#endif
}\r
\r
\r
-__declspec(naked) id _objc_msgForward_internal(id a, SEL b, ...)\r
+__declspec(naked) id _objc_msgForward_cached(id a, SEL b, ...)\r
{\r
__asm {\r
cmp edx, kFwdMsgSendStret\r
********************************************************************
********************************************************************/
-#define __OBJC2__ 1
-
/********************************************************************
* Data used by the ObjC runtime.
*
.private_extern _objc_entryPoints
_objc_entryPoints:
- .quad __cache_getImp
- .quad __cache_getMethod
+ .quad _cache_getImp
.quad _objc_msgSend
.quad _objc_msgSend_fpret
.quad _objc_msgSend_fp2ret
.private_extern _objc_exitPoints
_objc_exitPoints:
- .quad LExit__cache_getImp
- .quad LExit__cache_getMethod
+ .quad LExit_cache_getImp
.quad LExit_objc_msgSend
.quad LExit_objc_msgSend_fpret
.quad LExit_objc_msgSend_fp2ret
.quad 0
+/********************************************************************
+* List every exit insn from every messenger for debugger use.
+* Format:
+* (
+* 1 word instruction's address
+* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT)
+* )
+* 1 word zero
+*
+* ENTER is the start of a dispatcher
+* FAST_EXIT is method dispatch
+* SLOW_EXIT is uncached method lookup
+* NIL_EXIT is returning zero from a message sent to nil
+* These must match objc-gdb.h.
+********************************************************************/
+
+#define ENTER 1
+#define FAST_EXIT 2
+#define SLOW_EXIT 3
+#define NIL_EXIT 4
+
+.section __DATA,__objc_msg_break
+.globl _gdb_objc_messenger_breakpoints
+_gdb_objc_messenger_breakpoints:
+// contents populated by the macros below
+
+.macro MESSENGER_START
+4:
+ .section __DATA,__objc_msg_break
+ .quad 4b
+ .quad ENTER
+ .text
+.endmacro
+.macro MESSENGER_END_FAST
+4:
+ .section __DATA,__objc_msg_break
+ .quad 4b
+ .quad FAST_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_SLOW
+4:
+ .section __DATA,__objc_msg_break
+ .quad 4b
+ .quad SLOW_EXIT
+ .text
+.endmacro
+.macro MESSENGER_END_NIL
+4:
+ .section __DATA,__objc_msg_break
+ .quad 4b
+ .quad NIL_EXIT
+ .text
+.endmacro
+
+
/********************************************************************
* Recommended multi-byte NOP instructions
* (Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2B)
#define nop9 .byte 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00
+/********************************************************************
+ * Harmless branch prefix hint for instruction alignment
+ ********************************************************************/
+
+#define PN .byte 0x2e
+
+
/********************************************************************
* Names for parameter registers.
********************************************************************/
* Macro parameters
********************************************************************/
-#define STRET -1
#define NORMAL 0
#define FPRET 1
#define FP2RET 2
-
+#define GETIMP 3
+#define STRET 4
+#define SUPER 5
+#define SUPER_STRET 6
+#define SUPER2 7
+#define SUPER2_STRET 8
+
/********************************************************************
*
// Selected field offsets in class structure
// #define isa 0 USE GetIsa INSTEAD
-#define cache 16
// Method descriptor
#define method_name 0
#define method_imp 16
-// Cache header
-#define mask 0
-#define occupied 8
-#define buckets 16
-
// typedef struct {
// uint128_t floatingPointArgs[8]; // xmm0..xmm7
// long linkageArea[4]; // r10, rax, ebp, ret
// DW_START: set by CIE
.if $1 == 1
- // CacheLookup
-
- // push
- .byte DW_CFA_advance_loc4
- .long L_dw_push_$0 - L_dw_start_$0
- .byte DW_CFA_def_cfa_offset // CFA = rsp+16
- .byte 16
-
- // pop
- .byte DW_CFA_advance_loc4
- .long L_dw_pop_$0 - L_dw_push_$0
- .byte DW_CFA_def_cfa_offset // CFA = rsp+8
- .byte 8
-
- // cache miss: push is back in effect
- .byte DW_CFA_advance_loc4
- .long L_dw_miss_$0 - L_dw_pop_$0
- .byte DW_CFA_def_cfa_offset // CFA = rsp+16
- .byte 16
-
- // pop during cache miss
- .byte DW_CFA_advance_loc4
- .long L_dw_miss_pop_$0 - L_dw_miss_$0
- .byte DW_CFA_def_cfa_offset // CFA = rsp+8
- .byte 8
-
-.endif
-
-.if $2 == 1
// Save/RestoreRegisters or MethodTableLookup
// enter
.byte DW_CFA_advance_loc4
-.if $1 == 1
- .long L_dw_enter_$0 - L_dw_miss_pop_$0
-.else
.long L_dw_enter_$0 - L_dw_start_$0
-.endif
.byte DW_CFA_def_cfa_offset
.byte 16
.byte DW_CFA_offset | DW_rbp // rbp => 2*-8(CFA)
.macro DW_START
L_dw_start_$0:
.endmacro
-
-// After `push` in CacheLookup
-.macro DW_PUSH
-L_dw_push_$0:
-.endmacro
-
-// After `pop` in CacheLookup
-.macro DW_POP
-L_dw_pop_$0:
-.endmacro
-
-// After cache miss label
-.macro DW_MISS
-L_dw_miss_$0:
-.endmacro
-
-// After pop in MethodTableLookup
-.macro DW_MISS_POP
-L_dw_miss_pop_$0:
-.endmacro
// After `enter` in SaveRegisters
.macro DW_ENTER
.endmacro
// End of function
-// $1 == 1 iff you called CacheLookup
-// $2 == 1 iff you called MethodTableLookup or Save/RestoreRegsters
+// $1 == 1 iff you called MethodTableLookup or Save/RestoreRegsters
.macro DW_END
.set L_dw_len_$0, . - L_dw_start_$0
- EMIT_FDE $0, $1, $2
+ EMIT_FDE $0, $1
.endmacro
/////////////////////////////////////////////////////////////////////
//
-//
// CacheLookup return-type, caller
//
-// Locate the implementation for a selector in a class method cache.
+// Locate the implementation for a class in a selector's method cache.
//
// Takes:
-// $0 = NORMAL, FPRET, FP2RET, STRET
-// $1 = caller's symbol name for DWARF
-// a2 or a3 (STRET) = selector
-// %r11 = class whose cache is to be searched
+// $0 = NORMAL, FPRET, FP2RET, STRET, SUPER, SUPER_STRET, SUPER2, SUPER2_STRET, GETIMP
+// a2 or a3 (STRET) = selector a.k.a. cache
+// r11 = class to search
//
-// On exit: (found) method in %r11, stack unchanged, eq/ne set for forwarding
-// (not found) jumps to LCacheMiss, %rax on stack
+// On exit: r10 clobbered
+// (found) calls or returns IMP, eq/ne/r11 set for forwarding
+// (not found) jumps to LCacheMiss, class still in r11
//
/////////////////////////////////////////////////////////////////////
-.macro CacheLookup
+.macro CacheHit
- push %rax
- DW_PUSH $1
+ // CacheHit must always be preceded by a not-taken `jne` instruction
+ // in order to set the correct flags for _objc_msgForward_impcache.
+
+ // r10 = found bucket
- movq cache(%r11), %r10 // cache = class->cache
-.if $0 != STRET
- mov %a2d, %eax // index = sel
+.if $0 == GETIMP
+ movq 8(%r10), %rax // return imp
+ leaq __objc_msgSend_uncached_impcache(%rip), %r11
+ cmpq %rax, %r11
+ jne 4f
+ xorl %eax, %eax // don't return msgSend_uncached
+4: ret
+.elseif $0 == NORMAL || $0 == FPRET || $0 == FP2RET
+ // eq already set for forwarding by `jne`
+ MESSENGER_END_FAST
+ jmp *8(%r10) // call imp
+
+.elseif $0 == SUPER
+ movq receiver(%a1), %a1 // load real receiver
+ cmp %r10, %r10 // set eq for non-stret forwarding
+ MESSENGER_END_FAST
+ jmp *8(%r10) // call imp
+
+.elseif $0 == SUPER2
+ movq receiver(%a1), %a1 // load real receiver
+ cmp %r10, %r10 // set eq for non-stret forwarding
+ MESSENGER_END_FAST
+ jmp *8(%r10) // call imp
+
+.elseif $0 == STRET
+ test %r10, %r10 // set ne for stret forwarding
+ MESSENGER_END_FAST
+ jmp *8(%r10) // call imp
+
+.elseif $0 == SUPER_STRET
+ movq receiver(%a2), %a2 // load real receiver
+ test %r10, %r10 // set ne for stret forwarding
+ MESSENGER_END_FAST
+ jmp *8(%r10) // call imp
+
+.elseif $0 == SUPER2_STRET
+ movq receiver(%a2), %a2 // load real receiver
+ test %r10, %r10 // set ne for stret forwarding
+ MESSENGER_END_FAST
+ jmp *8(%r10) // call imp
.else
- mov %a3d, %eax // index = sel
+.abort oops
.endif
-// search the receiver's cache
-// r11 = method (soon)
-// eax = index
-// r10 = cache
-// a2 or a3 = sel
+.endmacro
+
+
+.macro CacheLookup
+.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
+ movq %a2, %r10 // r10 = _cmd
+.else
+ movq %a3, %r10 // r10 = _cmd
+.endif
+ andl 24(%r11), %r10d // r10 = _cmd & class->cache.mask
+ shlq $$4, %r10 // r10 = offset = (_cmd & mask)<<4
+ addq 16(%r11), %r10 // r10 = class->cache.buckets + offset
+
+.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
+ cmpq (%r10), %a2 // if (bucket->sel != _cmd)
+.else
+ cmpq (%r10), %a3 // if (bucket->sel != _cmd)
+.endif
+ jne 1f // scan more
+ // CacheHit must always be preceded by a not-taken `jne` instruction
+ CacheHit $0 // call or return imp
+
1:
- andl mask(%r10), %eax // index &= mask
- movq buckets(%r10, %rax, 8), %r11 // method = cache->buckets[index]
- incl %eax // index++
- testq %r11, %r11 // if (method == NULL)
- je LCacheMiss_f // goto cacheMissLabel
-.if $0 != STRET
- cmpq method_name(%r11), %a2 // if (method_name != sel)
+ // loop
+ cmpq $$0, (%r10)
+ je LCacheMiss_f // if (bucket->sel == 0) cache miss
+ cmpq 16(%r11), %r10
+ je 3f // if (bucket == cache->buckets) wrap
+
+ subq $$16, %r10 // bucket--
+.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
+ cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
- cmpq method_name(%r11), %a3 // if (method_name != sel)
+ cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
- jne 1b // goto loop
+ jne 1b // scan more
+ // CacheHit must always be preceded by a not-taken `jne` instruction
+ CacheHit $0 // call or return imp
- // cache hit, r11 = method triplet
- // restore saved registers
- pop %rax
- DW_POP $1
+3:
+ // wrap
+ movl 24(%r11), %r10d // r10 = mask a.k.a. last bucket index
+ shlq $$4, %r10 // r10 = offset = mask<<4
+ addq 16(%r11), %r10 // r10 = &cache->buckets[mask]
+ jmp 2f
-.if $0 != STRET
- // eq (non-stret) flag already set above
+ // clone scanning loop to crash instead of hang when cache is corrupt
+
+1:
+ // loop
+ cmpq $$0, (%r10)
+ je LCacheMiss_f // if (bucket->sel == 0) cache miss
+ cmpq 16(%r11), %r10
+ je 3f // if (bucket == cache->buckets) wrap
+
+ subq $$16, %r10 // bucket--
+2:
+.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
+ cmpq (%r10), %a2 // if (bucket->sel != _cmd)
.else
- // set ne (stret) for forwarding; r11 != 0
- test %r11, %r11
+ cmpq (%r10), %a3 // if (bucket->sel != _cmd)
.endif
+ jne 1b // scan more
+ // CacheHit must always be preceded by a not-taken `jne` instruction
+ CacheHit $0 // call or return imp
+3:
+ // double wrap - busted
+.if $0 == STRET || $0 == SUPER_STRET || $0 == SUPER2_STRET
+ movq %a2, %a1
+ movq %a3, %a2
+.elseif $0 == GETIMP
+ movq $$0, %a1
+.endif
+ // a1 = receiver
+ // a2 = SEL
+ movq %r11, %a3 // a3 = isa
+ movq %r10, %a4 // a4 = bucket
+.if $0 == GETIMP
+ jmp _cache_getImp_corrupt_cache_error
+.else
+ jmp _objc_msgSend_corrupt_cache_error
+.endif
.endmacro
// $2 = caller's symbol name for DWARF
// r11 = class to search
//
-// Stack: ret, rax (pushed by CacheLookup)
-//
-// On exit: pops registers pushed by CacheLookup
-// imp in %r11
+// On exit: imp in %r11
//
/////////////////////////////////////////////////////////////////////
.macro MethodTableLookup
-
- pop %rax // saved by CacheLookup
- DW_MISS_POP $2
+
+ MESSENGER_END_SLOW
SaveRegisters $2
/////////////////////////////////////////////////////////////////////
//
-// GetIsa return-type
// GetIsaFast return-type
// GetIsaSupport return-type
//
// r10 is clobbered
//
/////////////////////////////////////////////////////////////////////
-
-.macro GetIsa
-
-.if $0 != STRET
- testb $$1, %a1b
- jnz 1f
- movq (%a1), %r11
- jmp 2f
-1: movl %a1d, %r10d
-.else
- testb $$1, %a2b
- jnz 1f
- movq (%a2), %r11
- jmp 2f
-1: movl %a2d, %r10d
-.endif
- andl $$0xF, %r10d
- leaq __objc_tagged_isa_table(%rip), %r11
- movq (%r11, %r10, 8), %r11 // read isa from table
-2:
-.endmacro
.macro GetIsaFast
.if $0 != STRET
testb $$1, %a1b
- .byte 0x2e // harmless branch hint prefix to align IFETCH blocks
+ PN
jnz LGetIsaSlow_f
movq (%a1), %r11
.else
testb $$1, %a2b
- .byte 0x2e // harmless branch hint prefix to align IFETCH blocks
+ PN
jnz LGetIsaSlow_f
movq (%a2), %r11
.endif
LGetIsaDone:
.endmacro
-.macro GetIsaSupport
+.macro GetIsaSupport2
LGetIsaSlow:
- leaq __objc_tagged_isa_table(%rip), %r11
+ leaq _objc_debug_taggedpointer_classes(%rip), %r11
.if $0 != STRET
movl %a1d, %r10d
.else
.endif
andl $$0xF, %r10d
movq (%r11, %r10, 8), %r11 // read isa from table
+.endmacro
+
+.macro GetIsaSupport
+ GetIsaSupport2 $0
jmp LGetIsaDone_b
.endmacro
+
+.macro GetIsa
+ GetIsaFast $0
+ jmp LGetIsaDone_f
+ GetIsaSupport2 $0
+LGetIsaDone:
+.endmacro
+
/////////////////////////////////////////////////////////////////////
//
/////////////////////////////////////////////////////////////////////
.macro NilTest
+.if $0 == SUPER || $0 == SUPER_STRET
+ error super dispatch does not test for nil
+.endif
+
.if $0 != STRET
testq %a1, %a1
.else
testq %a2, %a2
.endif
+ PN
jz LNilTestSlow_f
LNilTestDone:
.endmacro
fldz
fldz
.endif
-.if $0 != STRET
+.if $0 == STRET
+ movq %rdi, %rax
+.else
xorl %eax, %eax
xorl %edx, %edx
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
.endif
+ MESSENGER_END_NIL
ret
.endmacro
-
-
-/********************************************************************
- * Method _cache_getMethod(Class cls, SEL sel, IMP msgForward_internal_imp)
- *
- * On entry: a1 = class whose cache is to be searched
- * a2 = selector to search for
- * a3 = _objc_msgForward_internal IMP
- *
- * If found, returns method triplet pointer.
- * If not found, returns NULL.
- *
- * NOTE: _cache_getMethod never returns any cache entry whose implementation
- * is _objc_msgForward_internal. It returns 1 instead. This prevents thread-
- * thread-safety and memory management bugs in _class_lookupMethodAndLoadCache.
- * See _class_lookupMethodAndLoadCache for details.
- *
- * _objc_msgForward_internal is passed as a parameter because it's more
- * efficient to do the (PIC) lookup once in the caller than repeatedly here.
- ********************************************************************/
-
- STATIC_ENTRY __cache_getMethod
- DW_START __cache_getMethod
-
-// do lookup
- movq %a1, %r11 // move class to r11 for CacheLookup
- CacheLookup NORMAL, __cache_getMethod
-
-// cache hit, method triplet in %r11
- cmpq method_imp(%r11), %a3 // if (imp==_objc_msgForward_internal)
- je 1f // return (Method)1
- movq %r11, %rax // return method triplet address
- ret
-1: movl $1, %eax
- ret
-
-LCacheMiss:
-// cache miss, return nil
- DW_MISS __cache_getMethod
- pop %rax // pushed by CacheLookup
- DW_MISS_POP __cache_getMethod
- xorl %eax, %eax
- ret
-
-LGetMethodExit:
- DW_END __cache_getMethod, 1, 0
- END_ENTRY __cache_getMethod
/********************************************************************
- * IMP _cache_getImp(Class cls, SEL sel)
+ * IMP cache_getImp(Class cls, SEL sel)
*
* On entry: a1 = class whose cache is to be searched
* a2 = selector to search for
* If not found, returns NULL.
********************************************************************/
- STATIC_ENTRY __cache_getImp
- DW_START __cache_getImp
+ STATIC_ENTRY _cache_getImp
+ DW_START _cache_getImp
// do lookup
movq %a1, %r11 // move class to r11 for CacheLookup
- CacheLookup NORMAL, __cache_getImp
-
-// cache hit, method triplet in %r11
- movq method_imp(%r11), %rax // return method imp address
- ret
+ CacheLookup GETIMP // returns IMP on success
LCacheMiss:
// cache miss, return nil
- DW_MISS __cache_getImp
- pop %rax // pushed by CacheLookup
- DW_MISS_POP __cache_getImp
xorl %eax, %eax
ret
LGetImpExit:
- DW_END __cache_getImp, 1, 0
- END_ENTRY __cache_getImp
+ DW_END _cache_getImp, 0
+ END_ENTRY _cache_getImp
/********************************************************************
.data
.align 3
- .private_extern __objc_tagged_isa_table
-__objc_tagged_isa_table:
+ .globl _objc_debug_taggedpointer_classes
+_objc_debug_taggedpointer_classes:
.fill 16, 8, 0
ENTRY _objc_msgSend
DW_START _objc_msgSend
+ MESSENGER_START
NilTest NORMAL
GetIsaFast NORMAL // r11 = self->isa
- CacheLookup NORMAL, _objc_msgSend // r11=method, eq set (nonstret fwd)
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup NORMAL // calls IMP on success
NilTestSupport NORMAL
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSend
- GetIsa NORMAL // r11 = self->isa
+ // isa still in r11
MethodTableLookup %a1, %a2, _objc_msgSend // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
- DW_END _objc_msgSend, 1, 1
+ DW_END _objc_msgSend, 1
END_ENTRY _objc_msgSend
-#if __OBJC2__
- ENTRY _objc_msgSend_fixup
- DW_START _objc_msgSend_fixup
-
- NilTest NORMAL
-
- SaveRegisters _objc_msgSend_fixup
- // Dereference obj/isa/cache to crash before _objc_fixupMessageRef
- movq 8(%a2), %a6 // selector
- GetIsa NORMAL // r11 = isa = *receiver
- movq cache(%r11), %a5 // cache = *isa
- movq mask(%a5), %a4 // *cache
-
- // a1 = receiver
- // a2 = address of message ref
- movq %a2, %a3
- xorl %a2d, %a2d
- // __objc_fixupMessageRef(receiver, 0, ref)
- call __objc_fixupMessageRef
- movq %rax, %r11
-
- RestoreRegisters _objc_msgSend_fixup
-
- // imp is in r11
- // Load _cmd from the message_ref
- movq 8(%a2), %a2
- cmp %r11, %r11 // set nonstret (eq) for forwarding
- jmp *%r11
+ ENTRY _objc_msgSend_fixup
+ int3
+ END_ENTRY _objc_msgSend_fixup
- NilTestSupport NORMAL
- DW_END _objc_msgSend_fixup, 0, 1
- END_ENTRY _objc_msgSend_fixup
-
-
STATIC_ENTRY _objc_msgSend_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend
END_ENTRY _objc_msgSend_fixedup
-#endif
/********************************************************************
ENTRY _objc_msgSendSuper
DW_START _objc_msgSendSuper
-
+ MESSENGER_START
+
// search the cache (objc_super in %a1)
movq class(%a1), %r11 // class = objc_super->class
- CacheLookup NORMAL, _objc_msgSendSuper // r11 = method, eq set (nonstret fwd)
- movq receiver(%a1), %a1 // load real receiver
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup SUPER // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSendSuper
+ // class still in r11
movq receiver(%a1), %r10
- movq class(%a1), %r11
MethodTableLookup %r10, %a2, _objc_msgSendSuper // r11 = IMP
movq receiver(%a1), %a1 // load real receiver
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
- DW_END _objc_msgSendSuper, 1, 1
+ DW_END _objc_msgSendSuper, 1
END_ENTRY _objc_msgSendSuper
* id objc_msgSendSuper2
********************************************************************/
-#if __OBJC2__
- ENTRY _objc_msgSendSuper2_fixup
- DW_START _objc_msgSendSuper2_fixup
-
- SaveRegisters _objc_msgSendSuper2_fixup
- // a1 = address of objc_super2
- // a2 = address of message ref
- movq %a2, %a3
- movq %a1, %a2
- movq receiver(%a1), %a1
- // __objc_fixupMessageRef(receiver, objc_super, ref)
- call __objc_fixupMessageRef
- movq %rax, %r11
- RestoreRegisters _objc_msgSendSuper2_fixup
-
- // imp is in r11
- // Load _cmd from the message_ref
- movq 8(%a2), %a2
- // Load receiver from objc_super2
- movq receiver(%a1), %a1
- cmp %r11, %r11 // set nonstret (eq) for forwarding
- jmp *%r11
-
- DW_END _objc_msgSendSuper2_fixup, 0, 1
- END_ENTRY _objc_msgSendSuper2_fixup
-
-
- STATIC_ENTRY _objc_msgSendSuper2_fixedup
- movq 8(%a2), %a2 // load _cmd from message_ref
- jmp _objc_msgSendSuper2
- END_ENTRY _objc_msgSendSuper2_fixedup
-
-
ENTRY _objc_msgSendSuper2
DW_START _objc_msgSendSuper2
+ MESSENGER_START
+
// objc_super->class is superclass of class to search
// search the cache (objc_super in %a1)
movq class(%a1), %r11 // cls = objc_super->class
movq 8(%r11), %r11 // cls = class->superclass
- CacheLookup NORMAL, _objc_msgSendSuper2 // r11 = method, eq set (nonstret fwd)
- movq receiver(%a1), %a1 // load real receiver
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup SUPER2 // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSendSuper2
+ // superclass still in r11
movq receiver(%a1), %r10
- movq class(%a1), %r11
- movq 8(%r11), %r11
MethodTableLookup %r10, %a2, _objc_msgSendSuper2 // r11 = IMP
movq receiver(%a1), %a1 // load real receiver
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
- DW_END _objc_msgSendSuper2, 1, 1
+ DW_END _objc_msgSendSuper2, 1
END_ENTRY _objc_msgSendSuper2
-#endif
+
+
+ ENTRY _objc_msgSendSuper2_fixup
+ int3
+ END_ENTRY _objc_msgSendSuper2_fixup
+
+
+ STATIC_ENTRY _objc_msgSendSuper2_fixedup
+ // Load _cmd from the message_ref
+ movq 8(%a2), %a2
+ jmp _objc_msgSendSuper2
+ END_ENTRY _objc_msgSendSuper2_fixedup
/********************************************************************
ENTRY _objc_msgSend_fpret
DW_START _objc_msgSend_fpret
-
+ MESSENGER_START
+
NilTest FPRET
GetIsaFast FPRET // r11 = self->isa
- CacheLookup FPRET, _objc_msgSend_fpret // r11 = method, eq set (nonstret fwd)
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup FPRET // calls IMP on success
NilTestSupport FPRET
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSend_fpret
- GetIsa FPRET // r11 = self->isa
+ // isa still in r11
MethodTableLookup %a1, %a2, _objc_msgSend_fpret // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
- DW_END _objc_msgSend_fpret, 1, 1
+ DW_END _objc_msgSend_fpret, 1
END_ENTRY _objc_msgSend_fpret
+
-#if __OBJC2__
ENTRY _objc_msgSend_fpret_fixup
- DW_START _objc_msgSend_fpret_fixup
-
- NilTest FPRET
+ int3
+ END_ENTRY _objc_msgSend_fpret_fixup
- SaveRegisters _objc_msgSend_fpret_fixup
-
- // Dereference obj/isa/cache to crash before _objc_fixupMessageRef
- movq 8(%a2), %a6 // selector
- GetIsa FPRET // r11 = isa = *receiver
- movq cache(%r11), %a5 // cache = *isa
- movq mask(%a5), %a4 // *cache
-
- // a1 = receiver
- // a2 = address of message ref
- movq %a2, %a3
- xorl %a2d, %a2d
- // __objc_fixupMessageRef(receiver, 0, ref)
- call __objc_fixupMessageRef
- movq %rax, %r11
-
- RestoreRegisters _objc_msgSend_fpret_fixup
-
- // imp is in r11
- // Load _cmd from the message_ref
- movq 8(%a2), %a2
- cmp %r11, %r11 // set nonstret (eq) for forwarding
- jmp *%r11
-
- NilTestSupport FPRET
- DW_END _objc_msgSend_fpret_fixup, 0, 1
- END_ENTRY _objc_msgSend_fpret_fixup
-
-
STATIC_ENTRY _objc_msgSend_fpret_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend_fpret
END_ENTRY _objc_msgSend_fpret_fixedup
-#endif
/********************************************************************
ENTRY _objc_msgSend_fp2ret
DW_START _objc_msgSend_fp2ret
-
+ MESSENGER_START
+
NilTest FP2RET
GetIsaFast FP2RET // r11 = self->isa
- CacheLookup FP2RET, _objc_msgSend_fp2ret // r11 = method, eq set (nonstret fwd)
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup FP2RET // calls IMP on success
NilTestSupport FP2RET
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSend_fp2ret
- GetIsa FP2RET // r11 = self->isa
+ // isa still in r11
MethodTableLookup %a1, %a2, _objc_msgSend_fp2ret // r11 = IMP
cmp %r11, %r11 // set eq (nonstret) for forwarding
jmp *%r11 // goto *imp
- DW_END _objc_msgSend_fp2ret, 1, 1
+ DW_END _objc_msgSend_fp2ret, 1
END_ENTRY _objc_msgSend_fp2ret
-#if __OBJC2__
- ENTRY _objc_msgSend_fp2ret_fixup
- DW_START _objc_msgSend_fp2ret_fixup
-
- NilTest FP2RET
-
- SaveRegisters _objc_msgSend_fp2ret_fixup
-
- // Dereference obj/isa/cache to crash before _objc_fixupMessageRef
- movq 8(%a2), %a6 // selector
- GetIsa FP2RET // r11 = isa = *receiver
- movq cache(%r11), %a5 // cache = *isa
- movq mask(%a5), %a4 // *cache
-
- // a1 = receiver
- // a2 = address of message ref
- movq %a2, %a3
- xorl %a2d, %a2d
- // __objc_fixupMessageRef(receiver, 0, ref)
- call __objc_fixupMessageRef
- movq %rax, %r11
-
- RestoreRegisters _objc_msgSend_fp2ret_fixup
- // imp is in r11
- // Load _cmd from the message_ref
- movq 8(%a2), %a2
- cmp %r11, %r11 // set nonstret (eq) for forwarding
- jmp *%r11
+ ENTRY _objc_msgSend_fp2ret_fixup
+ int3
+ END_ENTRY _objc_msgSend_fp2ret_fixup
- NilTestSupport FP2RET
- DW_END _objc_msgSend_fp2ret_fixup, 0, 1
- END_ENTRY _objc_msgSend_fp2ret_fixup
-
-
STATIC_ENTRY _objc_msgSend_fp2ret_fixedup
// Load _cmd from the message_ref
movq 8(%a2), %a2
jmp _objc_msgSend_fp2ret
END_ENTRY _objc_msgSend_fp2ret_fixedup
-#endif
/********************************************************************
ENTRY _objc_msgSend_stret
DW_START _objc_msgSend_stret
-
+ MESSENGER_START
+
NilTest STRET
GetIsaFast STRET // r11 = self->isa
- CacheLookup STRET, _objc_msgSend_stret // r11 = method, ne set (stret fwd)
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup STRET // calls IMP on success
NilTestSupport STRET
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSend_stret
- GetIsa STRET // r11 = self->isa
+ // isa still in r11
MethodTableLookup %a2, %a3, _objc_msgSend_stret // r11 = IMP
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
- DW_END _objc_msgSend_stret, 1, 1
+ DW_END _objc_msgSend_stret, 1
END_ENTRY _objc_msgSend_stret
-#if __OBJC2__
- ENTRY _objc_msgSend_stret_fixup
- DW_START _objc_msgSend_stret_fixup
- NilTest STRET
-
- SaveRegisters _objc_msgSend_stret_fixup
-
- // Dereference obj/isa/cache to crash before _objc_fixupMessageRef
- movq 8(%a3), %a6 // selector
- GetIsa STRET // r11 = isa = *receiver
- movq cache(%r11), %a5 // cache = *isa
- movq mask(%a5), %a4 // *cache
-
- // a2 = receiver
- // a3 = address of message ref
- movq %a2, %a1
- xorl %a2d, %a2d
- // __objc_fixupMessageRef(receiver, 0, ref)
- call __objc_fixupMessageRef
- movq %rax, %r11
-
- RestoreRegisters _objc_msgSend_stret_fixup
-
- // imp is in r11
- // Load _cmd from the message_ref
- movq 8(%a3), %a3
- test %r11, %r11 // set stret (ne) for forward; r11!=0
- jmp *%r11 // goto *imp
-
- NilTestSupport STRET
-
- DW_END _objc_msgSend_stret_fixup, 0, 1
- END_ENTRY _objc_msgSend_stret_fixup
+ ENTRY _objc_msgSend_stret_fixup
+ int3
+ END_ENTRY _objc_msgSend_stret_fixup
STATIC_ENTRY _objc_msgSend_stret_fixedup
movq 8(%a3), %a3
jmp _objc_msgSend_stret
END_ENTRY _objc_msgSend_stret_fixedup
-#endif
/********************************************************************
ENTRY _objc_msgSendSuper_stret
DW_START _objc_msgSendSuper_stret
-
+ MESSENGER_START
+
// search the cache (objc_super in %a2)
movq class(%a2), %r11 // class = objc_super->class
- CacheLookup STRET, _objc_msgSendSuper_stret // r11 = method, ne set (stret fwd)
- movq receiver(%a2), %a2 // load real receiver
- jmp *method_imp(%r11) // goto *imp
+ CacheLookup SUPER_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSendSuper_stret
+ // class still in r11
movq receiver(%a2), %r10
- movq class(%a2), %r11
MethodTableLookup %r10, %a3, _objc_msgSendSuper_stret // r11 = IMP
movq receiver(%a2), %a2 // load real receiver
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
- DW_END _objc_msgSendSuper_stret, 1, 1
+ DW_END _objc_msgSendSuper_stret, 1
END_ENTRY _objc_msgSendSuper_stret
* id objc_msgSendSuper2_stret
********************************************************************/
-#if __OBJC2__
- ENTRY _objc_msgSendSuper2_stret_fixup
- DW_START _objc_msgSendSuper2_stret_fixup
-
- SaveRegisters _objc_msgSendSuper2_stret_fixup
- // a2 = address of objc_super2
- // a3 = address of message ref
- movq receiver(%a2), %a1
- // __objc_fixupMessageRef(receiver, objc_super, ref)
- call __objc_fixupMessageRef
- movq %rax, %r11
- RestoreRegisters _objc_msgSendSuper2_stret_fixup
-
- // imp is in r11
- // Load _cmd from the message_ref
- movq 8(%a3), %a3
- // Load receiver from objc_super2
- movq receiver(%a2), %a2
- test %r11, %r11 // set stret (ne) for forward; r11!=0
- jmp *%r11 // goto *imp
-
- DW_END _objc_msgSendSuper2_stret_fixup, 0, 1
- END_ENTRY _objc_msgSendSuper2_stret_fixup
-
-
- STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup
- movq 8(%a3), %a3 // load _cmd from message_ref
- jmp _objc_msgSendSuper2_stret
- END_ENTRY _objc_msgSendSuper2_stret_fixedup
-
-
ENTRY _objc_msgSendSuper2_stret
DW_START _objc_msgSendSuper2_stret
-
+ MESSENGER_START
+
// search the cache (objc_super in %a2)
movq class(%a2), %r11 // class = objc_super->class
- movq 8(%r11), %r11 // class = class->super_class
- CacheLookup STRET, _objc_msgSendSuper2_stret // r11 = method, ne set (stret fwd)
- movq receiver(%a2), %a2 // load real receiver
- jmp *method_imp(%r11) // goto *imp
+ movq 8(%r11), %r11 // class = class->superclass
+ CacheLookup SUPER2_STRET // calls IMP on success
// cache miss: go search the method lists
LCacheMiss:
- DW_MISS _objc_msgSendSuper2_stret
+ // superclass still in r11
movq receiver(%a2), %r10
- movq class(%a2), %r11
- movq 8(%r11), %r11
MethodTableLookup %r10, %a3, _objc_msgSendSuper2_stret // r11 = IMP
movq receiver(%a2), %a2 // load real receiver
test %r11, %r11 // set ne (stret) for forward; r11!=0
jmp *%r11 // goto *imp
- DW_END _objc_msgSendSuper2_stret, 1, 1
+ DW_END _objc_msgSendSuper2_stret, 1
END_ENTRY _objc_msgSendSuper2_stret
-#endif
+
+
+ ENTRY _objc_msgSendSuper2_stret_fixup
+ int3
+ END_ENTRY _objc_msgSendSuper2_stret_fixup
+
+
+ STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup
+ // Load _cmd from the message_ref
+ movq 8(%a3), %a3
+ jmp _objc_msgSendSuper2_stret
+ END_ENTRY _objc_msgSendSuper2_stret_fixedup
+/********************************************************************
+ *
+ * _objc_msgSend_uncached_impcache
+ * _objc_msgSend_uncached
+ * _objc_msgSend_stret_uncached
+ *
+ * Used to erase method cache entries in-place by
+ * bouncing them to the uncached lookup.
+ *
+ ********************************************************************/
+
+ STATIC_ENTRY __objc_msgSend_uncached_impcache
+ // Method cache version
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band condition register is NE for stret, EQ otherwise.
+ // Out-of-band r11 is the searched class
+
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
+
+ jne __objc_msgSend_stret_uncached
+ jmp __objc_msgSend_uncached
+
+ END_ENTRY __objc_msgSend_uncached_impcache
+
+
+ STATIC_ENTRY __objc_msgSend_uncached
+ DW_START __objc_msgSend_uncached
+
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band r11 is the searched class
+
+ // r11 is already the class to search
+ MethodTableLookup %a1, %a2, __objc_msgSend_uncached // r11 = IMP
+ cmp %r11, %r11 // set eq (nonstret) for forwarding
+ jmp *%r11 // goto *imp
+
+ DW_END __objc_msgSend_uncached, 1
+ END_ENTRY __objc_msgSend_uncached
+
+
+ STATIC_ENTRY __objc_msgSend_stret_uncached
+ DW_START __objc_msgSend_stret_uncached
+ // THIS IS NOT A CALLABLE C FUNCTION
+ // Out-of-band r11 is the searched class
+
+ // r11 is already the class to search
+ MethodTableLookup %a2, %a3, __objc_msgSend_stret_uncached // r11 = IMP
+ test %r11, %r11 // set ne (stret) for forward; r11!=0
+ jmp *%r11 // goto *imp
+
+ DW_END __objc_msgSend_stret_uncached, 1
+ END_ENTRY __objc_msgSend_stret_uncached
+
+
/********************************************************************
*
* id _objc_msgForward(id self, SEL _cmd,...);
__objc_forward_stret_handler: .quad 0
- STATIC_ENTRY __objc_msgForward_internal
+ STATIC_ENTRY __objc_msgForward_impcache
// Method cache version
// THIS IS NOT A CALLABLE C FUNCTION
// Out-of-band condition register is NE for stret, EQ otherwise.
+ MESSENGER_START
+ nop
+ MESSENGER_END_SLOW
+
jne __objc_msgForward_stret
jmp __objc_msgForward
- END_ENTRY __objc_msgForward_internal
+ END_ENTRY __objc_msgForward_impcache
ENTRY __objc_msgForward
movq %r11, 16+LINK_AREA(%rsp)
// Save parameter registers
- movq %a1, 0+REG_AREA(%rsp)
+ movq %a1, 0+REG_AREA(%rsp) // note: used again below
movq %a2, 8+REG_AREA(%rsp)
movq %a3, 16+REG_AREA(%rsp)
movq %a4, 24+REG_AREA(%rsp)
call _objc_msgSend // forward:: is NOT struct-return
+ // Set return value register to the passed-in struct address
+ movq 0+REG_AREA(%rsp), %rax
// Retrieve return address from linkage area
movq 16+LINK_AREA(%rsp), %r11
// Pop stack frame
END_ENTRY __objc_ignored_method
-
-/********************************************************************
- *
- * id vtable_prototype(id self, message_ref *msg, ...)
- *
- * This code is copied to create vtable trampolines.
- * The instruction following LvtableIndex is modified to
- * insert each vtable index.
- * The instructions following LvtableTagTable are modified to
- * load the tagged isa table.
- *
- * This code is placed in its own section to prevent dtrace from
- * instrumenting it. Otherwise, dtrace would insert an INT3, the
- * code would be copied, and the copied INT3 would cause a crash.
- *
- * ABI WARNING ABI WARNING ABI WARNING ABI WARNING ABI WARNING
- * vtable_prototype steals %rax and does not clear %rdx on return
- * in order to precisely pack instructions into ifetch and cache lines
- * This means vtable dispatch must never be used for vararg calls
- * or very large return values.
- * ABI WARNING ABI WARNING ABI WARNING ABI WARNING ABI WARNING
- *
- ********************************************************************/
-
-.macro VTABLE /* byte-offset, name */
-
- .align 6
- .private_extern _$1
-_$1:
- test %a1, %a1
- je LvtableReturnZero_$1 // nil check
- testl $$1, %a1d
- jne LvtableTaggedPointer_$1 // tag check
-
- movq (%a1), %rax // load isa (see ABI WARNING)
- movq 24(%rax), %rax // load vtable
- movq 8(%a2), %a2 // load _cmd
-LvtableIndex_$1:
- jmpq * $0 (%rax) // load imp (DO NOT CHANGE)
-
-LvtableReturnZero_$1:
- // integer registers only; not used for fpret / stret / etc
- xorl %eax, %eax
- // xorl %edx, %edx (see ABI WARNING)
- ret
-
- nop
-LvtableTaggedPointer_$1:
- // extract isa (bits 1-2-3) from %a1, bit 0 is kept around for the heck of it
- movl %a1d, %eax
- andl $$0xF, %eax
-LvtableTagTable_$1:
-.if $0 == 0x7fff
- movq $$0x1122334455667788, %r10 // vtable_prototype (DO NOT CHANGE)
-.else
- leaq __objc_tagged_isa_table(%rip), %r10
-.endif
-LvtableTagTableEnd_$1:
- movq (%r10, %rax, 8), %r10 // load isa from table (see ABI WARNING
- movq 24(%r10), %rax // load vtable
- movq 8(%a2), %a2 // load _cmd
-LvtableIndex2_$1:
- jmpq * $0 (%rax) // load imp (DO NOT CHANGE)
-
-LvtableEnd_$1:
-
-.endmacro
-
- .section __TEXT,__objc_codegen,regular
- VTABLE 0x7fff, vtable_prototype
-
- .data
- .align 2
- .private_extern _vtable_prototype_size
-_vtable_prototype_size:
- .long LvtableEnd_vtable_prototype - _vtable_prototype
-
- .private_extern _vtable_prototype_index_offset
-_vtable_prototype_index_offset:
- .long LvtableIndex_vtable_prototype - _vtable_prototype
-
- .private_extern _vtable_prototype_index2_offset
-_vtable_prototype_index2_offset:
- .long LvtableIndex2_vtable_prototype - _vtable_prototype
- .private_extern _vtable_prototype_tagtable_offset
-_vtable_prototype_tagtable_offset:
- .long LvtableTagTable_vtable_prototype - _vtable_prototype
-
- .private_extern _vtable_prototype_tagtable_size
-_vtable_prototype_tagtable_size:
- .long LvtableTagTableEnd_vtable_prototype - LvtableTagTable_vtable_prototype
-
-/********************************************************************
- *
- * id vtable_ignored(id self, message_ref *msg, ...)
- *
- * Vtable trampoline for GC-ignored selectors. Immediately returns self.
- *
- ********************************************************************/
-
- STATIC_ENTRY _vtable_ignored
- movq %a1, %rax
- ret
-
-
-/********************************************************************
- *
- * id objc_msgSend_vtable<n>(id self, message_ref *msg, ...)
- *
- * Built-in expansions of vtable_prototype for the default vtable.
- *
- ********************************************************************/
-
- .text
-
- .align 4
- .private_extern _defaultVtableTrampolineDescriptors
-_defaultVtableTrampolineDescriptors:
- // objc_trampoline_header
- .short 16 // headerSize
- .short 8 // descSize
- .long 16 // descCount
- .quad 0 // next
-
- // objc_trampoline_descriptor[16]
-.macro TDESC /* n */
-L_tdesc$0:
- .long _objc_msgSend_vtable$0 - L_tdesc$0
- .long (1<<0) + (1<<2) // MESSAGE and VTABLE
-.endmacro
-
- TDESC 0
- TDESC 1
- TDESC 2
- TDESC 3
- TDESC 4
- TDESC 5
- TDESC 6
- TDESC 7
- TDESC 8
- TDESC 9
- TDESC 10
- TDESC 11
- TDESC 12
- TDESC 13
- TDESC 14
- TDESC 15
-
- // trampoline code
- .align 4
- VTABLE 0*8, objc_msgSend_vtable0
- VTABLE 1*8, objc_msgSend_vtable1
- VTABLE 2*8, objc_msgSend_vtable2
- VTABLE 3*8, objc_msgSend_vtable3
- VTABLE 4*8, objc_msgSend_vtable4
- VTABLE 5*8, objc_msgSend_vtable5
- VTABLE 6*8, objc_msgSend_vtable6
- VTABLE 7*8, objc_msgSend_vtable7
- VTABLE 8*8, objc_msgSend_vtable8
- VTABLE 9*8, objc_msgSend_vtable9
- VTABLE 10*8, objc_msgSend_vtable10
- VTABLE 11*8, objc_msgSend_vtable11
- VTABLE 12*8, objc_msgSend_vtable12
- VTABLE 13*8, objc_msgSend_vtable13
- VTABLE 14*8, objc_msgSend_vtable14
- VTABLE 15*8, objc_msgSend_vtable15
+.section __DATA,__objc_msg_break
+.quad 0
+.quad 0
#endif
--- /dev/null
+/* NSObjCRuntime.h
+ Copyright (c) 1994-2012, Apple Inc. All rights reserved.
+*/
+
+#ifndef _OBJC_NSOBJCRUNTIME_H_
+#define _OBJC_NSOBJCRUNTIME_H_
+
+#include <TargetConditionals.h>
+#include <objc/objc.h>
+
+#if __LP64__ || (TARGET_OS_EMBEDDED && !TARGET_OS_IPHONE) || TARGET_OS_WIN32 || NS_BUILD_32_LIKE_64
+typedef long NSInteger;
+typedef unsigned long NSUInteger;
+#else
+typedef int NSInteger;
+typedef unsigned int NSUInteger;
+#endif
+
+#define NSIntegerMax LONG_MAX
+#define NSIntegerMin LONG_MIN
+#define NSUIntegerMax ULONG_MAX
+
+#define NSINTEGER_DEFINED 1
+
+
+#endif
--- /dev/null
+/* NSObject.h
+ Copyright (c) 1994-2012, Apple Inc. All rights reserved.
+*/
+
+#ifndef _OBJC_NSOBJECT_H_
+#define _OBJC_NSOBJECT_H_
+
+#include <objc/objc.h>
+#include <objc/NSObjCRuntime.h>
+
+@class NSString, NSMethodSignature, NSInvocation;
+
+@protocol NSObject
+
+- (BOOL)isEqual:(id)object;
+- (NSUInteger)hash;
+
+- (Class)superclass;
+- (Class)class;
+- (id)self;
+- (struct _NSZone *)zone OBJC_ARC_UNAVAILABLE;
+
+- (id)performSelector:(SEL)aSelector;
+- (id)performSelector:(SEL)aSelector withObject:(id)object;
+- (id)performSelector:(SEL)aSelector withObject:(id)object1 withObject:(id)object2;
+
+- (BOOL)isProxy;
+
+- (BOOL)isKindOfClass:(Class)aClass;
+- (BOOL)isMemberOfClass:(Class)aClass;
+- (BOOL)conformsToProtocol:(Protocol *)aProtocol;
+
+- (BOOL)respondsToSelector:(SEL)aSelector;
+
+- (id)retain OBJC_ARC_UNAVAILABLE;
+- (oneway void)release OBJC_ARC_UNAVAILABLE;
+- (id)autorelease OBJC_ARC_UNAVAILABLE;
+- (NSUInteger)retainCount OBJC_ARC_UNAVAILABLE;
+
+- (NSString *)description;
+@optional
+- (NSString *)debugDescription;
+
+@end
+
+
+__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
+OBJC_ROOT_CLASS
+OBJC_EXPORT
+@interface NSObject <NSObject> {
+ Class isa OBJC_ISA_AVAILABILITY;
+}
+
++ (void)load;
+
++ (void)initialize;
+- (id)init;
+
++ (id)new;
++ (id)allocWithZone:(struct _NSZone *)zone;
++ (id)alloc;
+- (void)dealloc;
+
+- (void)finalize;
+
+- (id)copy;
+- (id)mutableCopy;
+
++ (id)copyWithZone:(struct _NSZone *)zone OBJC_ARC_UNAVAILABLE;
++ (id)mutableCopyWithZone:(struct _NSZone *)zone OBJC_ARC_UNAVAILABLE;
+
++ (Class)superclass;
++ (Class)class;
++ (BOOL)instancesRespondToSelector:(SEL)aSelector;
++ (BOOL)conformsToProtocol:(Protocol *)protocol;
+- (IMP)methodForSelector:(SEL)aSelector;
++ (IMP)instanceMethodForSelector:(SEL)aSelector;
+- (void)doesNotRecognizeSelector:(SEL)aSelector;
+
+- (id)forwardingTargetForSelector:(SEL)aSelector __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+- (void)forwardInvocation:(NSInvocation *)anInvocation;
+- (NSMethodSignature *)methodSignatureForSelector:(SEL)aSelector;
+
++ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)aSelector;
+
+- (BOOL)allowsWeakReference UNAVAILABLE_ATTRIBUTE;
+- (BOOL)retainWeakReference UNAVAILABLE_ATTRIBUTE;
+
++ (NSString *)description;
+
++ (BOOL)isSubclassOfClass:(Class)aClass;
+
++ (BOOL)resolveClassMethod:(SEL)sel __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
++ (BOOL)resolveInstanceMethod:(SEL)sel __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+@end
+
+#endif
* @APPLE_LICENSE_HEADER_END@
*/
-#include "objc-weak.h"
#include "objc-private.h"
-#include "objc-internal.h"
-#include "objc-os.h"
-#if __OBJC2__
-#include "objc-runtime-new.h"
-#endif
-#include "runtime.h"
+#include "NSObject.h"
+
+#include "objc-weak.h"
#include "llvm-DenseMap.h"
+#include "NSObject.h"
#include <malloc/malloc.h>
#include <stdint.h>
- (SEL)selector;
@end
-// better to not rely on Foundation to build
-@class NSString;
-@class NSMethodSignature;
-#ifdef __LP64__
-typedef unsigned long NSUInteger;
-#else
-typedef unsigned int NSUInteger;
-#endif
-typedef struct _NSZone NSZone;
-
-@protocol NSObject
-
-- (BOOL)isEqual:(id)object;
-- (NSUInteger)hash;
-
-- (Class)superclass;
-- (Class)class;
-- (id)self;
-- (NSZone *)zone;
-
-- (id)performSelector:(SEL)aSelector;
-- (id)performSelector:(SEL)aSelector withObject:(id)object;
-- (id)performSelector:(SEL)aSelector withObject:(id)object1 withObject:(id)object2;
-
-- (BOOL)isProxy;
-
-- (BOOL)isKindOfClass:(Class)aClass;
-- (BOOL)isMemberOfClass:(Class)aClass;
-- (BOOL)conformsToProtocol:(Protocol *)aProtocol;
-
-- (BOOL)respondsToSelector:(SEL)aSelector;
-
-- (id)retain;
-- (oneway void)release;
-- (id)autorelease;
-- (NSUInteger)retainCount;
-
-- (NSString *)description;
-- (NSString *)debugDescription;
-
-@end
-
-OBJC_EXPORT
-@interface NSObject <NSObject>
-{
- Class isa;
-}
-@end
-
// HACK -- the use of these functions must be after the @implementation
id bypass_msgSend_retain(NSObject *obj) asm("-[NSObject retain]");
void bypass_msgSend_release(NSObject *obj) asm("-[NSObject release]");
* Weak ivar support
**********************************************************************/
-static bool seen_weak_refs;
-
static id defaultBadAllocHandler(Class cls)
{
_objc_fatal("attempt to allocate object of class '%s' failed",
}
-#define ARR_LOGGING 0
-
-#if ARR_LOGGING
-struct {
- int retains;
- int releases;
- int autoreleases;
- int blockCopies;
-} CompilerGenerated, ExplicitlyCoded;
-
-void (^objc_arr_log)(const char *, id param) =
- ^(const char *str, id param) { printf("%s %p\n", str, param); };
-#endif
-
-
namespace {
#if TARGET_OS_EMBEDDED
#endif
// should be a multiple of cache line size (64)
-#define SIDE_TABLE_SIZE 64
+#define SIDE_TABLE_SIZE 128
+
+// The order of these bits is important.
+#define SIDE_TABLE_WEAKLY_REFERENCED (1<<0)
+#define SIDE_TABLE_DEALLOCATING (1<<1) // MSB-ward of weak bit
+#define SIDE_TABLE_RC_ONE (1<<2) // MSB-ward of deallocating bit
+
+#define SIDE_TABLE_RC_SHIFT 2
+
typedef objc::DenseMap<id,size_t,true> RefcountMap;
static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
public:
- OSSpinLock slock;
+ spinlock_t slock;
RefcountMap refcnts;
weak_table_t weak_table;
- SideTable() : slock(OS_SPINLOCK_INIT)
+ SideTable() : slock(SPINLOCK_INITIALIZER)
{
memset(&weak_table, 0, sizeof(weak_table));
}
new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
}
}
-
- static bool noLocksHeld(void) {
- bool gotAll = true;
- for (int i = 0; i < SIDE_TABLE_STRIPE && gotAll; i++) {
- SideTable *s = (SideTable *)(&table_buf[i * SIDE_TABLE_SIZE]);
- if (OSSpinLockTry(&s->slock)) {
- OSSpinLockUnlock(&s->slock);
- } else {
- gotAll = false;
- }
- }
- return gotAll;
- }
};
STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
// anonymous namespace
};
-bool noSideTableLocksHeld(void)
-{
- return SideTable::noLocksHeld();
-}
//
// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
//
id objc_retainBlock(id x) {
-#if ARR_LOGGING
- objc_arr_log("objc_retain_block", x);
- ++CompilerGenerated.blockCopies;
-#endif
return (id)_Block_copy(x);
}
return objc_autorelease(objc_retain(obj));
}
+/**
+ * This function stores a new value into a __weak variable. It would
+ * be used anywhere a __weak variable is the target of an assignment.
+ *
+ * @param location The address of the weak pointer itself
+ * @param newObj The new object this weak ptr should now point to
+ *
+ * @return \e newObj
+ */
id
objc_storeWeak(id *location, id newObj)
{
id oldObj;
SideTable *oldTable;
SideTable *newTable;
- OSSpinLock *lock1;
+ spinlock_t *lock1;
#if SIDE_TABLE_STRIPE > 1
- OSSpinLock *lock2;
+ spinlock_t *lock2;
#endif
- if (!seen_weak_refs) {
- seen_weak_refs = true;
- }
-
// Acquire locks for old and new values.
// Order by lock address to prevent lock ordering problems.
// Retry if the old value changes underneath us.
#if SIDE_TABLE_STRIPE > 1
lock2 = &oldTable->slock;
if (lock1 > lock2) {
- OSSpinLock *temp = lock1;
+ spinlock_t *temp = lock1;
lock1 = lock2;
lock2 = temp;
}
- if (lock1 != lock2) OSSpinLockLock(lock2);
+ if (lock1 != lock2) spinlock_lock(lock2);
#endif
- OSSpinLockLock(lock1);
+ spinlock_lock(lock1);
if (*location != oldObj) {
- OSSpinLockUnlock(lock1);
+ spinlock_unlock(lock1);
#if SIDE_TABLE_STRIPE > 1
- if (lock1 != lock2) OSSpinLockUnlock(lock2);
+ if (lock1 != lock2) spinlock_unlock(lock2);
#endif
goto retry;
}
- if (oldObj) {
- weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
- }
- if (newObj) {
- newObj = weak_register_no_lock(&newTable->weak_table, newObj,location);
- // weak_register_no_lock returns NULL if weak store should be rejected
+ weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
+ newObj = weak_register_no_lock(&newTable->weak_table, newObj, location);
+ // weak_register_no_lock returns nil if weak store should be rejected
+
+ // Set is-weakly-referenced bit in refcount table.
+ if (newObj && !newObj->isTaggedPointer()) {
+ newTable->refcnts[DISGUISE(newObj)] |= SIDE_TABLE_WEAKLY_REFERENCED;
}
+
// Do not set *location anywhere else. That would introduce a race.
*location = newObj;
- OSSpinLockUnlock(lock1);
+ spinlock_unlock(lock1);
#if SIDE_TABLE_STRIPE > 1
- if (lock1 != lock2) OSSpinLockUnlock(lock2);
+ if (lock1 != lock2) spinlock_unlock(lock2);
#endif
return newObj;
id result;
SideTable *table;
- OSSpinLock *lock;
+ spinlock_t *lock;
retry:
result = *location;
- if (!result) return NULL;
+ if (!result) return nil;
table = SideTable::tableForPointer(result);
lock = &table->slock;
- OSSpinLockLock(lock);
+ spinlock_lock(lock);
if (*location != result) {
- OSSpinLockUnlock(lock);
+ spinlock_unlock(lock);
goto retry;
}
- result = arr_read_weak_reference(&table->weak_table, location);
+ result = weak_read_no_lock(&table->weak_table, location);
- OSSpinLockUnlock(lock);
+ spinlock_unlock(lock);
return result;
}
+/**
+ * This loads the object referenced by a weak pointer and returns it, after
+ * retaining and autoreleasing the object to ensure that it stays alive
+ * long enough for the caller to use it. This function would be used
+ * anywhere a __weak variable is used in an expression.
+ *
+ * @param location The weak pointer address
+ *
+ * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
+ */
id
objc_loadWeak(id *location)
{
+ if (!*location) return nil;
return objc_autorelease(objc_loadWeakRetained(location));
}
+/**
+ * Initialize a fresh weak pointer to some object location.
+ * It would be used for code like:
+ *
+ * (The nil case)
+ * __weak id weakPtr;
+ * (The non-nil case)
+ * NSObject *o = ...;
+ * __weak id weakPtr = o;
+ *
+ * @param addr Address of __weak ptr.
+ * @param val Object ptr.
+ */
id
objc_initWeak(id *addr, id val)
{
*addr = 0;
+ if (!val) return nil;
return objc_storeWeak(addr, val);
}
+__attribute__((noinline, used)) void
+objc_destroyWeak_slow(id *addr)
+{
+ SideTable *oldTable;
+ spinlock_t *lock;
+ id oldObj;
+
+ // No need to see weak refs, we are destroying
+
+ // Acquire lock for old value only
+ // retry if the old value changes underneath us
+ retry:
+ oldObj = *addr;
+ oldTable = SideTable::tableForPointer(oldObj);
+
+ lock = &oldTable->slock;
+ spinlock_lock(lock);
+
+ if (*addr != oldObj) {
+ spinlock_unlock(lock);
+ goto retry;
+ }
+
+ weak_unregister_no_lock(&oldTable->weak_table, oldObj, addr);
+
+ spinlock_unlock(lock);
+}
+
+/**
+ * Destroys the relationship between a weak pointer
+ * and the object it is referencing in the internal weak
+ * table. If the weak pointer is not referencing anything,
+ * there is no need to edit the weak table.
+ *
+ * @param addr The weak pointer address.
+ */
void
objc_destroyWeak(id *addr)
{
- objc_storeWeak(addr, 0);
+ if (!*addr) return;
+ return objc_destroyWeak_slow(addr);
}
+/**
+ * This function copies a weak pointer from one location to another,
+ * when the destination doesn't already contain a weak pointer. It
+ * would be used for code like:
+ *
+ * __weak id weakPtr1 = ...;
+ * __weak id weakPtr2 = weakPtr1;
+ *
+ * @param to weakPtr2 in this ex
+ * @param from weakPtr1
+ */
void
objc_copyWeak(id *to, id *from)
{
objc_release(val);
}
+/**
+ * Move a weak pointer from one location to another.
+ * Before the move, the destination must be uninitialized.
+ * After the move, the source is nil.
+ */
void
objc_moveWeak(id *to, id *from)
{
objc_copyWeak(to, from);
- objc_destroyWeak(from);
+ objc_storeWeak(from, 0);
}
objects are stored.
*/
-extern "C" BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
+BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj));
namespace {
class AutoreleasePoolPage
{
-#define POOL_SENTINEL 0
+#define POOL_SENTINEL nil
static pthread_key_t const key = AUTORELEASE_POOL_KEY;
static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing
static size_t const SIZE =
#if PROTECT_AUTORELEASEPOOL
- 4096; // must be multiple of vm page size
+ PAGE_SIZE; // must be multiple of vm page size
#else
- 4096; // size and alignment, power of 2
+ PAGE_SIZE; // size and alignment, power of 2
#endif
static size_t const COUNT = SIZE / sizeof(id);
AutoreleasePoolPage(AutoreleasePoolPage *newParent)
: magic(), next(begin()), thread(pthread_self()),
- parent(newParent), child(NULL),
+ parent(newParent), child(nil),
depth(parent ? 1+parent->depth : 0),
hiwat(parent ? parent->hiwat : 0)
{
page = page->parent;
if (page) {
page->unprotect();
- page->child = NULL;
+ page->child = nil;
page->protect();
}
delete deathptr;
// reinstate TLS value while we work
setHotPage((AutoreleasePoolPage *)p);
pop(0);
- setHotPage(NULL);
+ setHotPage(nil);
}
static AutoreleasePoolPage *pageForPointer(const void *p)
assert(!page || page->full());
if (!page) {
+ // No pool. Silently push one.
assert(obj != POOL_SENTINEL);
- _objc_inform("Object %p of class %s autoreleased "
- "with no pool in place - just leaking - "
- "break on objc_autoreleaseNoPool() to debug",
- obj, object_getClassName(obj));
- objc_autoreleaseNoPool(obj);
- return NULL;
+
+ if (DebugMissingPools) {
+ _objc_inform("MISSING POOLS: Object %p of class %s "
+ "autoreleased with no pool in place - "
+ "just leaking - break on "
+ "objc_autoreleaseNoPool() to debug",
+ (void*)obj, object_getClassName(obj));
+ objc_autoreleaseNoPool(obj);
+ return nil;
+ }
+
+ push();
+ page = hotPage();
}
do {
static inline id autorelease(id obj)
{
assert(obj);
- assert(!OBJC_IS_TAGGED_PTR(obj));
+ assert(!obj->isTaggedPointer());
id *dest __unused = autoreleaseFast(obj);
assert(!dest || *dest == obj);
return obj;
static inline void *push()
{
if (!hotPage()) {
- setHotPage(new AutoreleasePoolPage(NULL));
+ setHotPage(new AutoreleasePoolPage(nil));
}
id *dest = autoreleaseFast(POOL_SENTINEL);
assert(*dest == POOL_SENTINEL);
// memory: delete empty children
// hysteresis: keep one empty child if this page is more than half full
// special case: delete everything for pop(0)
- if (!token) {
+ // special case: delete everything for pop(top) with DebugMissingPools
+ if (!token ||
+ (DebugMissingPools && page->empty() && !page->parent))
+ {
page->kill();
- setHotPage(NULL);
+ setHotPage(nil);
} else if (page->child) {
if (page->lessThanHalfFull()) {
page->child->kill();
extern "C" {
__attribute__((used,noinline,nothrow))
-static id _objc_rootRetain_slow(id obj);
+static id _objc_rootRetain_slow(id obj, SideTable *table);
__attribute__((used,noinline,nothrow))
-static bool _objc_rootReleaseWasZero_slow(id obj);
+static bool _objc_rootReleaseWasZero_slow(id obj, SideTable *table);
};
id
-_objc_rootRetain_slow(id obj)
+_objc_rootRetain_slow(id obj, SideTable *table)
{
- SideTable *table = SideTable::tableForPointer(obj);
- OSSpinLockLock(&table->slock);
- table->refcnts[DISGUISE(obj)] += 2;
- OSSpinLockUnlock(&table->slock);
+ spinlock_lock(&table->slock);
+ table->refcnts[DISGUISE(obj)] += SIDE_TABLE_RC_ONE;
+ spinlock_unlock(&table->slock);
return obj;
}
assert(obj);
assert(!UseGC);
- if (OBJC_IS_TAGGED_PTR(obj)) return true;
+ if (obj->isTaggedPointer()) return true;
SideTable *table = SideTable::tableForPointer(obj);
// NO SPINLOCK HERE
// _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
// which already acquired the lock on our behalf.
- if (table->slock == 0) {
- _objc_fatal("Do not call -_tryRetain.");
- }
+
+ // fixme can't do this efficiently with os_lock_handoff_s
+ // if (table->slock == 0) {
+ // _objc_fatal("Do not call -_tryRetain.");
+ // }
bool result = true;
RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
if (it == table->refcnts.end()) {
- table->refcnts[DISGUISE(obj)] = 2;
- } else if (it->second & 1) {
+ table->refcnts[DISGUISE(obj)] = SIDE_TABLE_RC_ONE;
+ } else if (it->second & SIDE_TABLE_DEALLOCATING) {
result = false;
} else {
- it->second += 2;
+ it->second += SIDE_TABLE_RC_ONE;
}
return result;
assert(obj);
assert(!UseGC);
- if (OBJC_IS_TAGGED_PTR(obj)) return false;
+ if (obj->isTaggedPointer()) return false;
SideTable *table = SideTable::tableForPointer(obj);
// NO SPINLOCK HERE
// _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
// which already acquired the lock on our behalf.
- if (table->slock == 0) {
- _objc_fatal("Do not call -_isDeallocating.");
- }
+
+
+ // fixme can't do this efficiently with os_lock_handoff_s
+ // if (table->slock == 0) {
+ // _objc_fatal("Do not call -_isDeallocating.");
+ // }
RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
- return (it != table->refcnts.end()) && ((it->second & 1) == 1);
+ return (it != table->refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
}
// clear any weak table items
// clear extra retain count and deallocating bit
// (fixme warn or abort if extra retain count == 0 ?)
- OSSpinLockLock(&table->slock);
- if (seen_weak_refs) {
- arr_clear_deallocating(&table->weak_table, obj);
+ spinlock_lock(&table->slock);
+ RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
+ if (it != table->refcnts.end()) {
+ if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
+ weak_clear_no_lock(&table->weak_table, obj);
+ }
+ table->refcnts.erase(it);
}
- table->refcnts.erase(DISGUISE(obj));
- OSSpinLockUnlock(&table->slock);
+ spinlock_unlock(&table->slock);
}
bool
-_objc_rootReleaseWasZero_slow(id obj)
+_objc_rootReleaseWasZero_slow(id obj, SideTable *table)
{
- SideTable *table = SideTable::tableForPointer(obj);
-
bool do_dealloc = false;
- OSSpinLockLock(&table->slock);
+ spinlock_lock(&table->slock);
RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
if (it == table->refcnts.end()) {
do_dealloc = true;
- table->refcnts[DISGUISE(obj)] = 1;
- } else if (it->second == 0) {
+ table->refcnts[DISGUISE(obj)] = SIDE_TABLE_DEALLOCATING;
+ } else if (it->second < SIDE_TABLE_DEALLOCATING) {
+ // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
do_dealloc = true;
- it->second = 1;
+ it->second |= SIDE_TABLE_DEALLOCATING;
} else {
- it->second -= 2;
+ it->second -= SIDE_TABLE_RC_ONE;
}
- OSSpinLockUnlock(&table->slock);
+ spinlock_unlock(&table->slock);
return do_dealloc;
}
assert(obj);
assert(!UseGC);
- if (OBJC_IS_TAGGED_PTR(obj)) return false;
+ if (obj->isTaggedPointer()) return false;
SideTable *table = SideTable::tableForPointer(obj);
bool do_dealloc = false;
- if (OSSpinLockTry(&table->slock)) {
+ if (spinlock_trylock(&table->slock)) {
RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
if (it == table->refcnts.end()) {
do_dealloc = true;
- table->refcnts[DISGUISE(obj)] = 1;
- } else if (it->second == 0) {
+ table->refcnts[DISGUISE(obj)] = SIDE_TABLE_DEALLOCATING;
+ } else if (it->second < SIDE_TABLE_DEALLOCATING) {
+ // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
do_dealloc = true;
- it->second = 1;
+ it->second |= SIDE_TABLE_DEALLOCATING;
} else {
- it->second -= 2;
+ it->second -= SIDE_TABLE_RC_ONE;
}
- OSSpinLockUnlock(&table->slock);
+ spinlock_unlock(&table->slock);
return do_dealloc;
}
- return _objc_rootReleaseWasZero_slow(obj);
+
+ return _objc_rootReleaseWasZero_slow(obj, table);
}
__attribute__((noinline,used))
static id _objc_rootAutorelease2(id obj)
{
- if (OBJC_IS_TAGGED_PTR(obj)) return obj;
+ if (obj->isTaggedPointer()) return obj;
return AutoreleasePoolPage::autorelease(obj);
}
// threaded environment because the result is immediately stale by the
// time the caller receives it.
- if (OBJC_IS_TAGGED_PTR(obj)) return (uintptr_t)obj;
+ if (obj->isTaggedPointer()) return (uintptr_t)obj;
SideTable *table = SideTable::tableForPointer(obj);
size_t refcnt_result = 1;
- OSSpinLockLock(&table->slock);
+ spinlock_lock(&table->slock);
RefcountMap::iterator it = table->refcnts.find(DISGUISE(obj));
if (it != table->refcnts.end()) {
- refcnt_result = (it->second >> 1) + 1;
+ refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
}
- OSSpinLockUnlock(&table->slock);
+ spinlock_unlock(&table->slock);
return refcnt_result;
}
id
_objc_rootAlloc(Class cls)
{
-#if 0 && __OBJC2__
+#if __OBJC2__
// Skip over the +allocWithZone: call if the class doesn't override it.
- // fixme not - this breaks ObjectAlloc
- if (! ((class_t *)cls)->isa->hasCustomAWZ()) {
- return class_createInstance(cls, 0);
+ if (! cls->ISA()->hasCustomAWZ()) {
+ id obj = class_createInstance(cls, 0);
+ if (!obj) obj = callBadAllocHandler(cls);
+ return obj;
+ }
+#endif
+ return [cls allocWithZone: nil];
+}
+
+id
+objc_alloc(Class cls)
+{
+#if __OBJC2__
+ // Skip over +alloc and +allocWithZone: if the class doesn't override them.
+ if (cls &&
+ cls->ISA()->isInitialized_meta() &&
+ ! cls->ISA()->hasCustomAWZ())
+ {
+ id obj = class_createInstance(cls, 0);
+ if (!obj) obj = callBadAllocHandler(cls);
+ return obj;
+ }
+#endif
+ return [cls alloc];
+}
+
+id
+objc_allocWithZone(Class cls)
+{
+#if __OBJC2__
+ // Skip over the +allocWithZone: call if the class doesn't override it.
+ if (cls &&
+ cls->ISA()->isInitialized_meta() &&
+ ! cls->ISA()->hasCustomAWZ())
+ {
+ id obj = class_createInstance(cls, 0);
+ if (!obj) obj = callBadAllocHandler(cls);
+ return obj;
}
#endif
return [cls allocWithZone: nil];
assert(obj);
assert(!UseGC);
- if (OBJC_IS_TAGGED_PTR(obj)) return;
+ if (obj->isTaggedPointer()) return;
object_dispose(obj);
}
void *
objc_autoreleasePoolPush(void)
{
- if (UseGC) return NULL;
+ if (UseGC) return nil;
return AutoreleasePoolPage::push();
}
objc_autoreleaseReturnValue(id obj)
{
#if SUPPORT_RETURN_AUTORELEASE
- assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
+ assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == nil);
if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
}
+ (Class)superclass {
- return class_getSuperclass(self);
+ return self->superclass;
}
- (Class)superclass {
- return class_getSuperclass([self class]);
+ return [self class]->superclass;
}
+ (BOOL)isMemberOfClass:(Class)cls {
}
+ (BOOL)isKindOfClass:(Class)cls {
- for (Class tcls = object_getClass((id)self); tcls; tcls = class_getSuperclass(tcls)) {
+ for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) {
if (tcls == cls) return YES;
}
return NO;
}
- (BOOL)isKindOfClass:(Class)cls {
- for (Class tcls = [self class]; tcls; tcls = class_getSuperclass(tcls)) {
+ for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)isSubclassOfClass:(Class)cls {
- for (Class tcls = self; tcls; tcls = class_getSuperclass(tcls)) {
+ for (Class tcls = self; tcls; tcls = tcls->superclass) {
if (tcls == cls) return YES;
}
return NO;
}
+ (BOOL)isAncestorOfObject:(NSObject *)obj {
- for (Class tcls = [obj class]; tcls; tcls = class_getSuperclass(tcls)) {
+ for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) {
if (tcls == self) return YES;
}
return NO;
+ (BOOL)conformsToProtocol:(Protocol *)protocol {
if (!protocol) return NO;
- for (Class tcls = self; tcls; tcls = class_getSuperclass(tcls)) {
+ for (Class tcls = self; tcls; tcls = tcls->superclass) {
if (class_conformsToProtocol(tcls, protocol)) return YES;
}
return NO;
- (BOOL)conformsToProtocol:(Protocol *)protocol {
if (!protocol) return NO;
- for (Class tcls = [self class]; tcls; tcls = class_getSuperclass(tcls)) {
+ for (Class tcls = [self class]; tcls; tcls = tcls->superclass) {
if (class_conformsToProtocol(tcls, protocol)) return YES;
}
return NO;
return NO;
}
-+ (BOOL)isBlock {
- return NO;
-}
-
-- (BOOL)isBlock {
- return NO;
-}
-
+ (IMP)instanceMethodForSelector:(SEL)sel {
if (!sel) [self doesNotRecognizeSelector:sel];
+ (IMP)methodForSelector:(SEL)sel {
if (!sel) [self doesNotRecognizeSelector:sel];
- return class_getMethodImplementation(object_getClass((id)self), sel);
+ return object_getMethodImplementation((id)self, sel);
}
- (IMP)methodForSelector:(SEL)sel {
if (!sel) [self doesNotRecognizeSelector:sel];
- return class_getMethodImplementation([self class], sel);
+ return object_getMethodImplementation(self, sel);
}
+ (BOOL)resolveClassMethod:(SEL)sel {
}
// Replaced by ObjectAlloc
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-method-attributes"
- (id)retain
__attribute__((aligned(16)))
{
- if (OBJC_IS_TAGGED_PTR(self)) return self;
+ if (((id)self)->isTaggedPointer()) return self;
SideTable *table = SideTable::tableForPointer(self);
- if (OSSpinLockTry(&table->slock)) {
- table->refcnts[DISGUISE(self)] += 2;
- OSSpinLockUnlock(&table->slock);
+ if (spinlock_trylock(&table->slock)) {
+ table->refcnts[DISGUISE(self)] += SIDE_TABLE_RC_ONE;
+ spinlock_unlock(&table->slock);
return self;
}
- return _objc_rootRetain_slow(self);
+ return _objc_rootRetain_slow(self, table);
}
+#pragma clang diagnostic pop
+ (BOOL)_tryRetain {
}
// Replaced by ObjectAlloc
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-method-attributes"
- (oneway void)release
__attribute__((aligned(16)))
{
}
[self dealloc];
}
+#pragma clang diagnostic pop
+ (id)autorelease {
return (id)self;
}
// Replaced by ObjectAlloc
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-method-attributes"
- (id)autorelease
__attribute__((aligned(16)))
{
// no tag check here: tagged pointers DO use fast autoreleasing
#if SUPPORT_RETURN_AUTORELEASE
- assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == NULL);
+ assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == nil);
if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, self);
#endif
return _objc_rootAutorelease2(self);
}
+#pragma clang diagnostic pop
+ (NSUInteger)retainCount {
return ULONG_MAX;
}
// Replaced by ObjectAlloc
-+ (id)allocWithZone:(NSZone *)zone {
++ (id)allocWithZone:(struct _NSZone *)zone {
return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone);
}
_objc_rootFinalize(self);
}
-+ (NSZone *)zone {
- return (NSZone *)_objc_rootZone(self);
++ (struct _NSZone *)zone {
+ return (struct _NSZone *)_objc_rootZone(self);
}
-- (NSZone *)zone {
- return (NSZone *)_objc_rootZone(self);
+- (struct _NSZone *)zone {
+ return (struct _NSZone *)_objc_rootZone(self);
}
+ (id)copy {
return (id)self;
}
-+ (id)copyWithZone:(NSZone *)zone {
++ (id)copyWithZone:(struct _NSZone *)zone {
return (id)self;
}
- (id)copy {
- return [(id)self copyWithZone:NULL];
+ return [(id)self copyWithZone:nil];
}
+ (id)mutableCopy {
return (id)self;
}
-+ (id)mutableCopyWithZone:(NSZone *)zone {
++ (id)mutableCopyWithZone:(struct _NSZone *)zone {
return (id)self;
}
- (id)mutableCopy {
- return [(id)self mutableCopyWithZone:NULL];
+ return [(id)self mutableCopyWithZone:nil];
}
@end
id
objc_retain(id obj)
{
- if (!obj || OBJC_IS_TAGGED_PTR(obj)) {
+ if (!obj || obj->isTaggedPointer()) {
goto out_slow;
}
#if __OBJC2__
- if (((class_t *)obj->isa)->hasCustomRR()) {
+ if (((Class)obj->isa)->hasCustomRR()) {
return [obj retain];
}
return bypass_msgSend_retain(obj);
void
objc_release(id obj)
{
- if (!obj || OBJC_IS_TAGGED_PTR(obj)) {
+ if (!obj || obj->isTaggedPointer()) {
return;
}
#if __OBJC2__
- if (((class_t *)obj->isa)->hasCustomRR()) {
+ if (((Class)obj->isa)->hasCustomRR()) {
return (void)[obj release];
}
return bypass_msgSend_release(obj);
id
objc_autorelease(id obj)
{
- if (!obj || OBJC_IS_TAGGED_PTR(obj)) {
+ if (!obj || obj->isTaggedPointer()) {
goto out_slow;
}
#if __OBJC2__
- if (((class_t *)obj->isa)->hasCustomRR()) {
+ if (((Class)obj->isa)->hasCustomRR()) {
return [obj autorelease];
}
return bypass_msgSend_autorelease(obj);
assert(obj);
assert(!UseGC);
- if (OBJC_IS_TAGGED_PTR(obj)) return obj;
+ if (obj->isTaggedPointer()) return obj;
return bypass_msgSend_retain(obj);
}
assert(obj);
assert(!UseGC);
- if (OBJC_IS_TAGGED_PTR(obj)) return;
+ if (obj->isTaggedPointer()) return;
bypass_msgSend_release(obj);
}
#if ! __OBJC2__
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_NA)
+OBJC_ROOT_CLASS
@interface Object
{
Class isa; /* A pointer to the instance's class structure */
+++ /dev/null
-/*
- * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- Object.m
- Copyright 1988-1996 NeXT Software, Inc.
-*/
-
-#if __OBJC2__
-
-#include "objc-private.h"
-
-__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_NA)
-@interface Object {
- Class isa;
-}
-@end
-
-@implementation Object
-
-+ (id)initialize
-{
- return self;
-}
-
-+ (id)class
-{
- return self;
-}
-
--(id) retain
-{
- return _objc_rootRetain(self);
-}
-
--(void) release
-{
- _objc_rootRelease(self);
-}
-
--(id) autorelease
-{
- return _objc_rootAutorelease(self);
-}
-
-+(id) retain
-{
- return self;
-}
-
-+(void) release
-{
-}
-
-+(id) autorelease
-{
- return self;
-}
-
-
-@end
-
-
-// __OBJC2__
-#else
-// not __OBJC2__
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <malloc/malloc.h>
-
-#include "Object.h"
-#include "Protocol.h"
-#include "objc-runtime.h"
-#include "objc-auto.h"
-
-// hack
-extern void _objc_error(id, const char *, va_list);
-
-
-// Error Messages
-static const char
- _errShouldHaveImp[] = "should have implemented the '%s' method.",
- _errShouldNotImp[] = "should NOT have implemented the '%s' method.",
- _errLeftUndone[] = "method '%s' not implemented",
- _errBadSel[] = "method %s given invalid selector %s",
- _errDoesntRecognize[] = "does not recognize selector %c%s";
-
-
-@implementation Object
-
-
-+ (id)initialize
-{
- return self;
-}
-
-- (id)awake
-{
- return self;
-}
-
-+ (id)poseAs: aFactory
-{
- return class_poseAs(self, aFactory);
-}
-
-+ (id)new
-{
- id newObject = (*_alloc)((Class)self, 0);
- Class metaClass = self->isa;
- if (class_getVersion(metaClass) > 1)
- return [newObject init];
- else
- return newObject;
-}
-
-+ (id)alloc
-{
- return (*_zoneAlloc)((Class)self, 0, malloc_default_zone());
-}
-
-+ (id)allocFromZone:(void *) z
-{
- return (*_zoneAlloc)((Class)self, 0, z);
-}
-
-- (id)init
-{
- return self;
-}
-
-- (const char *)name
-{
- return class_getName(isa);
-}
-
-+ (const char *)name
-{
- return class_getName((Class)self);
-}
-
-- (unsigned)hash
-{
- return (unsigned)(((uintptr_t)self) >> 2);
-}
-
-- (BOOL)isEqual:anObject
-{
- return anObject == self;
-}
-
-- (id)free
-{
- return (*_dealloc)(self);
-}
-
-+ (id)free
-{
- return nil;
-}
-
-- (id)self
-{
- return self;
-}
-
-
--(id)class
-{
- return (id)isa;
-}
-
-+ (id)class
-{
- return self;
-}
-
-- (void *)zone
-{
- void *z = malloc_zone_from_ptr(self);
- return z ? z : malloc_default_zone();
-}
-
-+ (id)superclass
-{
- return class_getSuperclass((Class)self);
-}
-
-- (id)superclass
-{
- return class_getSuperclass(isa);
-}
-
-+ (int) version
-{
- return class_getVersion((Class)self);
-}
-
-+ (id)setVersion: (int) aVersion
-{
- class_setVersion((Class)self, aVersion);
- return self;
-}
-
-- (BOOL)isKindOf:aClass
-{
- register Class cls;
- for (cls = isa; cls; cls = class_getSuperclass(cls))
- if (cls == (Class)aClass)
- return YES;
- return NO;
-}
-
-- (BOOL)isMemberOf:aClass
-{
- return isa == (Class)aClass;
-}
-
-- (BOOL)isKindOfClassNamed:(const char *)aClassName
-{
- register Class cls;
- for (cls = isa; cls; cls = class_getSuperclass(cls))
- if (strcmp(aClassName, class_getName(cls)) == 0)
- return YES;
- return NO;
-}
-
-- (BOOL)isMemberOfClassNamed:(const char *)aClassName
-{
- return strcmp(aClassName, class_getName(isa)) == 0;
-}
-
-+ (BOOL)instancesRespondTo:(SEL)aSelector
-{
- return class_respondsToMethod((Class)self, aSelector);
-}
-
-- (BOOL)respondsTo:(SEL)aSelector
-{
- return class_respondsToMethod(isa, aSelector);
-}
-
-- (id)copy
-{
- return [self copyFromZone: [self zone]];
-}
-
-- (id)copyFromZone:(void *)z
-{
- return (*_zoneCopy)(self, 0, z);
-}
-
-- (IMP)methodFor:(SEL)aSelector
-{
- return class_lookupMethod(isa, aSelector);
-}
-
-+ (IMP)instanceMethodFor:(SEL)aSelector
-{
- return class_lookupMethod(self, aSelector);
-}
-
-- (id)perform:(SEL)aSelector
-{
- if (aSelector)
- return ((id(*)(id, SEL))objc_msgSend)(self, aSelector);
- else
- return [self error:_errBadSel, sel_getName(_cmd), aSelector];
-}
-
-- (id)perform:(SEL)aSelector with:anObject
-{
- if (aSelector)
- return ((id(*)(id, SEL, id))objc_msgSend)(self, aSelector, anObject);
- else
- return [self error:_errBadSel, sel_getName(_cmd), aSelector];
-}
-
-- (id)perform:(SEL)aSelector with:obj1 with:obj2
-{
- if (aSelector)
- return ((id(*)(id, SEL, id, id))objc_msgSend)(self, aSelector, obj1, obj2);
- else
- return [self error:_errBadSel, sel_getName(_cmd), aSelector];
-}
-
-- (id)subclassResponsibility:(SEL)aSelector
-{
- return [self error:_errShouldHaveImp, sel_getName(aSelector)];
-}
-
-- (id)notImplemented:(SEL)aSelector
-{
- return [self error:_errLeftUndone, sel_getName(aSelector)];
-}
-
-- (id)doesNotRecognize:(SEL)aMessage
-{
- return [self error:_errDoesntRecognize,
- class_isMetaClass(isa) ? '+' : '-', sel_getName(aMessage)];
-}
-
-- (id)error:(const char *)aCStr, ...
-{
- va_list ap;
- va_start(ap,aCStr);
- (*_error)(self, aCStr, ap);
- _objc_error (self, aCStr, ap); /* In case (*_error)() returns. */
- va_end(ap);
- return nil;
-}
-
-- (void) printForDebugger:(void *)stream
-{
-}
-
-- (id)write:(void *) stream
-{
- return self;
-}
-
-- (id)read:(void *) stream
-{
- return self;
-}
-
-- (id)forward: (SEL) sel : (marg_list) args
-{
- return [self doesNotRecognize: sel];
-}
-
-/* this method is not part of the published API */
-
-- (unsigned)methodArgSize:(SEL)sel
-{
- Method method = class_getInstanceMethod((Class)isa, sel);
- if (! method) return 0;
- return method_getSizeOfArguments(method);
-}
-
-- (id)performv: (SEL) sel : (marg_list) args
-{
- unsigned size;
-
- // Messages to nil object always return nil
- if (! self) return nil;
-
- // Calculate size of the marg_list from the method's
- // signature. This looks for the method in self
- // and its superclasses.
- size = [self methodArgSize: sel];
-
- // If neither self nor its superclasses implement
- // it, forward the message because self might know
- // someone who does. This is a "chained" forward...
- if (! size) return [self forward: sel: args];
-
- // Message self with the specified selector and arguments
- return objc_msgSendv (self, sel, size, args);
-}
-
-/* Testing protocol conformance */
-
-- (BOOL) conformsTo: (Protocol *)aProtocolObj
-{
- return [(id)isa conformsTo:aProtocolObj];
-}
-
-+ (BOOL) conformsTo: (Protocol *)aProtocolObj
-{
- Class class;
- for (class = self; class; class = class_getSuperclass(class))
- {
- if (class_conformsToProtocol(class, aProtocolObj)) return YES;
- }
- return NO;
-}
-
-
-/* Looking up information for a method */
-
-- (struct objc_method_description *) descriptionForMethod:(SEL)aSelector
-{
- Class cls;
- struct objc_method_description *m;
-
- /* Look in the protocols first. */
- for (cls = isa; cls; cls = cls->super_class)
- {
- if (cls->isa->version >= 3)
- {
- struct objc_protocol_list *protocols = cls->protocols;
-
- while (protocols)
- {
- int i;
-
- for (i = 0; i < protocols->count; i++)
- {
- Protocol *p = protocols->list[i];
-
- if (class_isMetaClass(cls))
- m = [p descriptionForClassMethod:aSelector];
- else
- m = [p descriptionForInstanceMethod:aSelector];
-
- if (m) {
- return m;
- }
- }
-
- if (cls->isa->version <= 4)
- break;
-
- protocols = protocols->next;
- }
- }
- }
-
- /* Then try the class implementations. */
- for (cls = isa; cls; cls = cls->super_class) {
- void *iterator = 0;
- int i;
- struct objc_method_list *mlist;
- while ( (mlist = class_nextMethodList( cls, &iterator )) ) {
- for (i = 0; i < mlist->method_count; i++)
- if (mlist->method_list[i].method_name == aSelector) {
- m = (struct objc_method_description *)&mlist->method_list[i];
- return m;
- }
- }
- }
- return 0;
-}
-
-+ (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSelector
-{
- Class cls;
-
- /* Look in the protocols first. */
- for (cls = self; cls; cls = cls->super_class)
- {
- if (cls->isa->version >= 3)
- {
- struct objc_protocol_list *protocols = cls->protocols;
-
- while (protocols)
- {
- int i;
-
- for (i = 0; i < protocols->count; i++)
- {
- Protocol *p = protocols->list[i];
- struct objc_method_description *m;
-
- if ((m = [p descriptionForInstanceMethod:aSelector]))
- return m;
- }
-
- if (cls->isa->version <= 4)
- break;
-
- protocols = protocols->next;
- }
- }
- }
-
- /* Then try the class implementations. */
- for (cls = self; cls; cls = cls->super_class) {
- void *iterator = 0;
- int i;
- struct objc_method_list *mlist;
- while ( (mlist = class_nextMethodList( cls, &iterator )) ) {
- for (i = 0; i < mlist->method_count; i++)
- if (mlist->method_list[i].method_name == aSelector) {
- struct objc_method_description *m;
- m = (struct objc_method_description *)&mlist->method_list[i];
- return m;
- }
- }
- }
- return 0;
-}
-
-
-/* Obsolete methods (for binary compatibility only). */
-
-+ (id)superClass
-{
- return [self superclass];
-}
-
-- (id)superClass
-{
- return [self superclass];
-}
-
-- (BOOL)isKindOfGivenName:(const char *)aClassName
-{
- return [self isKindOfClassNamed: aClassName];
-}
-
-- (BOOL)isMemberOfGivenName:(const char *)aClassName
-{
- return [self isMemberOfClassNamed: aClassName];
-}
-
-- (struct objc_method_description *) methodDescFor:(SEL)aSelector
-{
- return [self descriptionForMethod: aSelector];
-}
-
-+ (struct objc_method_description *) instanceMethodDescFor:(SEL)aSelector
-{
- return [self descriptionForInstanceMethod: aSelector];
-}
-
-- (id)findClass:(const char *)aClassName
-{
- return objc_lookUpClass(aClassName);
-}
-
-- (id)shouldNotImplement:(SEL)aSelector
-{
- return [self error:_errShouldNotImp, sel_getName(aSelector)];
-}
-
-
-@end
-
-#endif
--- /dev/null
+/*
+ * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ Object.m
+ Copyright 1988-1996 NeXT Software, Inc.
+*/
+
+#include "objc-private.h"
+
+#undef id
+#undef Class
+
+typedef struct objc_class *Class;
+typedef struct objc_object *id;
+
+#if __OBJC2__
+
+__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_NA)
+OBJC_ROOT_CLASS
+@interface Object {
+ Class isa;
+}
+@end
+
+@implementation Object
+
++ (id)initialize
+{
+ return self;
+}
+
++ (id)class
+{
+ return self;
+}
+
+-(id) retain
+{
+ return _objc_rootRetain(self);
+}
+
+-(void) release
+{
+ _objc_rootRelease(self);
+}
+
+-(id) autorelease
+{
+ return _objc_rootAutorelease(self);
+}
+
++(id) retain
+{
+ return self;
+}
+
++(void) release
+{
+}
+
++(id) autorelease
+{
+ return self;
+}
+
+
+@end
+
+
+// __OBJC2__
+#else
+// not __OBJC2__
+
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <malloc/malloc.h>
+
+#include "Object.h"
+#include "Protocol.h"
+#include "objc-runtime.h"
+#include "objc-auto.h"
+
+
+// Error Messages
+static const char
+ _errShouldHaveImp[] = "should have implemented the '%s' method.",
+ _errShouldNotImp[] = "should NOT have implemented the '%s' method.",
+ _errLeftUndone[] = "method '%s' not implemented",
+ _errBadSel[] = "method %s given invalid selector %s",
+ _errDoesntRecognize[] = "does not recognize selector %c%s";
+
+
+@implementation Object
+
+
++ (id)initialize
+{
+ return self;
+}
+
+- (id)awake
+{
+ return self;
+}
+
++ (id)poseAs: aFactory
+{
+ return class_poseAs(self, aFactory);
+}
+
++ (id)new
+{
+ id newObject = (*_alloc)((Class)self, 0);
+ Class metaClass = self->ISA();
+ if (class_getVersion(metaClass) > 1)
+ return [newObject init];
+ else
+ return newObject;
+}
+
++ (id)alloc
+{
+ return (*_zoneAlloc)((Class)self, 0, malloc_default_zone());
+}
+
++ (id)allocFromZone:(void *) z
+{
+ return (*_zoneAlloc)((Class)self, 0, z);
+}
+
+- (id)init
+{
+ return self;
+}
+
+- (const char *)name
+{
+ return class_getName(isa);
+}
+
++ (const char *)name
+{
+ return class_getName((Class)self);
+}
+
+- (unsigned)hash
+{
+ return (unsigned)(((uintptr_t)self) >> 2);
+}
+
+- (BOOL)isEqual:anObject
+{
+ return anObject == self;
+}
+
+- (id)free
+{
+ return (*_dealloc)(self);
+}
+
++ (id)free
+{
+ return nil;
+}
+
+- (id)self
+{
+ return self;
+}
+
+
+-(id)class
+{
+ return (id)isa;
+}
+
++ (id)class
+{
+ return self;
+}
+
+- (void *)zone
+{
+ void *z = malloc_zone_from_ptr(self);
+ return z ? z : malloc_default_zone();
+}
+
++ (id)superclass
+{
+ return self->superclass;
+}
+
+- (id)superclass
+{
+ return isa->superclass;
+}
+
++ (int) version
+{
+ return class_getVersion((Class)self);
+}
+
++ (id)setVersion: (int) aVersion
+{
+ class_setVersion((Class)self, aVersion);
+ return self;
+}
+
+- (BOOL)isKindOf:aClass
+{
+ register Class cls;
+ for (cls = isa; cls; cls = cls->superclass)
+ if (cls == (Class)aClass)
+ return YES;
+ return NO;
+}
+
+- (BOOL)isMemberOf:aClass
+{
+ return isa == (Class)aClass;
+}
+
+- (BOOL)isKindOfClassNamed:(const char *)aClassName
+{
+ register Class cls;
+ for (cls = isa; cls; cls = cls->superclass)
+ if (strcmp(aClassName, class_getName(cls)) == 0)
+ return YES;
+ return NO;
+}
+
+- (BOOL)isMemberOfClassNamed:(const char *)aClassName
+{
+ return strcmp(aClassName, class_getName(isa)) == 0;
+}
+
++ (BOOL)instancesRespondTo:(SEL)aSelector
+{
+ return class_respondsToMethod((Class)self, aSelector);
+}
+
+- (BOOL)respondsTo:(SEL)aSelector
+{
+ return class_respondsToMethod(isa, aSelector);
+}
+
+- (id)copy
+{
+ return [self copyFromZone: [self zone]];
+}
+
+- (id)copyFromZone:(void *)z
+{
+ return (*_zoneCopy)(self, 0, z);
+}
+
+- (IMP)methodFor:(SEL)aSelector
+{
+ return class_lookupMethod(isa, aSelector);
+}
+
++ (IMP)instanceMethodFor:(SEL)aSelector
+{
+ return class_lookupMethod(self, aSelector);
+}
+
+- (id)perform:(SEL)aSelector
+{
+ if (aSelector)
+ return ((id(*)(id, SEL))objc_msgSend)(self, aSelector);
+ else
+ return [self error:_errBadSel, sel_getName(_cmd), aSelector];
+}
+
+- (id)perform:(SEL)aSelector with:anObject
+{
+ if (aSelector)
+ return ((id(*)(id, SEL, id))objc_msgSend)(self, aSelector, anObject);
+ else
+ return [self error:_errBadSel, sel_getName(_cmd), aSelector];
+}
+
+- (id)perform:(SEL)aSelector with:obj1 with:obj2
+{
+ if (aSelector)
+ return ((id(*)(id, SEL, id, id))objc_msgSend)(self, aSelector, obj1, obj2);
+ else
+ return [self error:_errBadSel, sel_getName(_cmd), aSelector];
+}
+
+- (id)subclassResponsibility:(SEL)aSelector
+{
+ return [self error:_errShouldHaveImp, sel_getName(aSelector)];
+}
+
+- (id)notImplemented:(SEL)aSelector
+{
+ return [self error:_errLeftUndone, sel_getName(aSelector)];
+}
+
+- (id)doesNotRecognize:(SEL)aMessage
+{
+ return [self error:_errDoesntRecognize,
+ class_isMetaClass(isa) ? '+' : '-', sel_getName(aMessage)];
+}
+
+- (id)error:(const char *)aCStr, ...
+{
+ va_list ap;
+ va_start(ap,aCStr);
+ (*_error)(self, aCStr, ap);
+ _objc_error (self, aCStr, ap); /* In case (*_error)() returns. */
+ va_end(ap);
+ return nil;
+}
+
+- (void) printForDebugger:(void *)stream
+{
+}
+
+- (id)write:(void *) stream
+{
+ return self;
+}
+
+- (id)read:(void *) stream
+{
+ return self;
+}
+
+- (id)forward: (SEL) sel : (marg_list) args
+{
+ return [self doesNotRecognize: sel];
+}
+
+/* this method is not part of the published API */
+
+- (unsigned)methodArgSize:(SEL)sel
+{
+ Method method = class_getInstanceMethod((Class)isa, sel);
+ if (! method) return 0;
+ return method_getSizeOfArguments(method);
+}
+
+- (id)performv: (SEL) sel : (marg_list) args
+{
+ unsigned size;
+
+ // Messages to nil object always return nil
+ if (! self) return nil;
+
+ // Calculate size of the marg_list from the method's
+ // signature. This looks for the method in self
+ // and its superclasses.
+ size = [self methodArgSize: sel];
+
+ // If neither self nor its superclasses implement
+ // it, forward the message because self might know
+ // someone who does. This is a "chained" forward...
+ if (! size) return [self forward: sel: args];
+
+ // Message self with the specified selector and arguments
+ return objc_msgSendv (self, sel, size, args);
+}
+
+/* Testing protocol conformance */
+
+- (BOOL) conformsTo: (Protocol *)aProtocolObj
+{
+ return [(id)isa conformsTo:aProtocolObj];
+}
+
++ (BOOL) conformsTo: (Protocol *)aProtocolObj
+{
+ Class cls;
+ for (cls = self; cls; cls = cls->superclass)
+ {
+ if (class_conformsToProtocol(cls, aProtocolObj)) return YES;
+ }
+ return NO;
+}
+
+
+/* Looking up information for a method */
+
+- (struct objc_method_description *) descriptionForMethod:(SEL)aSelector
+{
+ Class cls;
+ struct objc_method_description *m;
+
+ /* Look in the protocols first. */
+ for (cls = isa; cls; cls = cls->superclass)
+ {
+ if (cls->ISA()->version >= 3)
+ {
+ struct objc_protocol_list *protocols =
+ (struct objc_protocol_list *)cls->protocols;
+
+ while (protocols)
+ {
+ int i;
+
+ for (i = 0; i < protocols->count; i++)
+ {
+ Protocol *p = protocols->list[i];
+
+ if (class_isMetaClass(cls))
+ m = [p descriptionForClassMethod:aSelector];
+ else
+ m = [p descriptionForInstanceMethod:aSelector];
+
+ if (m) {
+ return m;
+ }
+ }
+
+ if (cls->ISA()->version <= 4)
+ break;
+
+ protocols = protocols->next;
+ }
+ }
+ }
+
+ /* Then try the class implementations. */
+ for (cls = isa; cls; cls = cls->superclass) {
+ void *iterator = 0;
+ int i;
+ struct objc_method_list *mlist;
+ while ( (mlist = class_nextMethodList( cls, &iterator )) ) {
+ for (i = 0; i < mlist->method_count; i++)
+ if (mlist->method_list[i].method_name == aSelector) {
+ m = (struct objc_method_description *)&mlist->method_list[i];
+ return m;
+ }
+ }
+ }
+ return 0;
+}
+
++ (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSelector
+{
+ Class cls;
+
+ /* Look in the protocols first. */
+ for (cls = self; cls; cls = cls->superclass)
+ {
+ if (cls->ISA()->version >= 3)
+ {
+ struct objc_protocol_list *protocols =
+ (struct objc_protocol_list *)cls->protocols;
+
+ while (protocols)
+ {
+ int i;
+
+ for (i = 0; i < protocols->count; i++)
+ {
+ Protocol *p = protocols->list[i];
+ struct objc_method_description *m;
+
+ if ((m = [p descriptionForInstanceMethod:aSelector]))
+ return m;
+ }
+
+ if (cls->ISA()->version <= 4)
+ break;
+
+ protocols = protocols->next;
+ }
+ }
+ }
+
+ /* Then try the class implementations. */
+ for (cls = self; cls; cls = cls->superclass) {
+ void *iterator = 0;
+ int i;
+ struct objc_method_list *mlist;
+ while ( (mlist = class_nextMethodList( cls, &iterator )) ) {
+ for (i = 0; i < mlist->method_count; i++)
+ if (mlist->method_list[i].method_name == aSelector) {
+ struct objc_method_description *m;
+ m = (struct objc_method_description *)&mlist->method_list[i];
+ return m;
+ }
+ }
+ }
+ return 0;
+}
+
+
+/* Obsolete methods (for binary compatibility only). */
+
++ (id)superClass
+{
+ return [self superclass];
+}
+
+- (id)superClass
+{
+ return [self superclass];
+}
+
+- (BOOL)isKindOfGivenName:(const char *)aClassName
+{
+ return [self isKindOfClassNamed: aClassName];
+}
+
+- (BOOL)isMemberOfGivenName:(const char *)aClassName
+{
+ return [self isMemberOfClassNamed: aClassName];
+}
+
+- (struct objc_method_description *) methodDescFor:(SEL)aSelector
+{
+ return [self descriptionForMethod: aSelector];
+}
+
++ (struct objc_method_description *) instanceMethodDescFor:(SEL)aSelector
+{
+ return [self descriptionForInstanceMethod: aSelector];
+}
+
+- (id)findClass:(const char *)aClassName
+{
+ return objc_lookUpClass(aClassName);
+}
+
+- (id)shouldNotImplement:(SEL)aSelector
+{
+ return [self error:_errShouldNotImp, sel_getName(aSelector)];
+}
+
+
+@end
+
+#endif
#include <objc/Object.h>
#include <Availability.h>
-AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED
+DEPRECATED_ATTRIBUTE
@interface List : Object
{
@public
- id *dataPtr AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED; /* data of the List object */
- unsigned numElements AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED; /* Actual number of elements */
- unsigned maxElements AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED; /* Total allocated elements */
+ id *dataPtr DEPRECATED_ATTRIBUTE; /* data of the List object */
+ unsigned numElements DEPRECATED_ATTRIBUTE; /* Actual number of elements */
+ unsigned maxElements DEPRECATED_ATTRIBUTE; /* Total allocated elements */
}
/* Creating, freeing */
-- (id)free AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)freeObjects AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)copyFromZone:(void *)z AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (id)free DEPRECATED_ATTRIBUTE;
+- (id)freeObjects DEPRECATED_ATTRIBUTE;
+- (id)copyFromZone:(void *)z DEPRECATED_ATTRIBUTE;
/* Initializing */
-- (id)init AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)initCount:(unsigned)numSlots AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (id)init DEPRECATED_ATTRIBUTE;
+- (id)initCount:(unsigned)numSlots DEPRECATED_ATTRIBUTE;
/* Comparing two lists */
-- (BOOL)isEqual: anObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (BOOL)isEqual: anObject DEPRECATED_ATTRIBUTE;
/* Managing the storage capacity */
-- (unsigned)capacity AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)setAvailableCapacity:(unsigned)numSlots AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (unsigned)capacity DEPRECATED_ATTRIBUTE;
+- (id)setAvailableCapacity:(unsigned)numSlots DEPRECATED_ATTRIBUTE;
/* Manipulating objects by index */
-- (unsigned)count AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)objectAt:(unsigned)index AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)lastObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)addObject:anObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)insertObject:anObject at:(unsigned)index AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)removeObjectAt:(unsigned)index AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)removeLastObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)replaceObjectAt:(unsigned)index with:newObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)appendList: (List *)otherList AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (unsigned)count DEPRECATED_ATTRIBUTE;
+- (id)objectAt:(unsigned)index DEPRECATED_ATTRIBUTE;
+- (id)lastObject DEPRECATED_ATTRIBUTE;
+- (id)addObject:anObject DEPRECATED_ATTRIBUTE;
+- (id)insertObject:anObject at:(unsigned)index DEPRECATED_ATTRIBUTE;
+- (id)removeObjectAt:(unsigned)index DEPRECATED_ATTRIBUTE;
+- (id)removeLastObject DEPRECATED_ATTRIBUTE;
+- (id)replaceObjectAt:(unsigned)index with:newObject DEPRECATED_ATTRIBUTE;
+- (id)appendList: (List *)otherList DEPRECATED_ATTRIBUTE;
/* Manipulating objects by id */
-- (unsigned)indexOf:anObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)addObjectIfAbsent:anObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)removeObject:anObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)replaceObject:anObject with:newObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (unsigned)indexOf:anObject DEPRECATED_ATTRIBUTE;
+- (id)addObjectIfAbsent:anObject DEPRECATED_ATTRIBUTE;
+- (id)removeObject:anObject DEPRECATED_ATTRIBUTE;
+- (id)replaceObject:anObject with:newObject DEPRECATED_ATTRIBUTE;
/* Emptying the list */
-- (id)empty AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (id)empty DEPRECATED_ATTRIBUTE;
/* Sending messages to elements of the list */
-- (id)makeObjectsPerform:(SEL)aSelector AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-- (id)makeObjectsPerform:(SEL)aSelector with:anObject AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+- (id)makeObjectsPerform:(SEL)aSelector DEPRECATED_ATTRIBUTE;
+- (id)makeObjectsPerform:(SEL)aSelector with:anObject DEPRECATED_ATTRIBUTE;
/*
* The following new... methods are now obsolete. They remain in this
* and the init... methods defined in this class instead.
*/
-+ (id)new AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
-+ (id)newCount:(unsigned)numSlots AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
++ (id)new DEPRECATED_ATTRIBUTE;
++ (id)newCount:(unsigned)numSlots DEPRECATED_ATTRIBUTE;
@end
typedef struct {
@defs(List);
-} NXListId AVAILABLE_MAC_OS_X_VERSION_10_0_AND_LATER_BUT_DEPRECATED;
+} NXListId DEPRECATED_ATTRIBUTE;
#define NX_ADDRESS(x) (((NXListId *)(x))->dataPtr)
#if __OBJC2__
-#include <Foundation/NSObject.h>
+#include <objc/NSObject.h>
// All methods of class Protocol are unavailable.
// Use the functions in objc/runtime.h instead.
/* Looking up information specific to a protocol */
- (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSel
- DEPRECATED_IN_MAC_OS_X_VERSION_10_5_AND_LATER;
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0,__MAC_10_5, __IPHONE_2_0,__IPHONE_2_0);
- (struct objc_method_description *) descriptionForClassMethod:(SEL)aSel
- DEPRECATED_IN_MAC_OS_X_VERSION_10_5_AND_LATER;
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0,__MAC_10_5, __IPHONE_2_0,__IPHONE_2_0);
@end
Copyright 1991-1996 NeXT Software, Inc.
*/
+#include "objc-private.h"
+
+#undef id
+#undef Class
#include <stdlib.h>
#include <string.h>
#include <mach-o/ldsyms.h>
#include "Protocol.h"
-#include "objc-private.h"
-#include "objc-runtime-old.h"
#if __OBJC2__
@interface __IncompleteProtocol : NSObject @end
#endif
-typedef struct {
- uintptr_t count;
- Protocol *list[0];
-} protocol_list_t;
-
- (BOOL) conformsTo: (Protocol *)aProtocolObj
{
return protocol_conformsToProtocol(self, aProtocolObj);
YES/*required*/, YES/*instance*/,
YES/*recursive*/);
#else
- return method_getDescription(_protocol_getMethod(self, aSel,
- YES, YES, YES));
+ return method_getDescription(protocol_getMethod((struct protocol_t *)self,
+ aSel, YES, YES, YES));
#endif
}
YES/*required*/, NO/*instance*/,
YES/*recursive*/);
#else
- return method_getDescription(_protocol_getMethod(self, aSel,
- YES, NO, YES));
+ return method_getDescription(protocol_getMethod((struct protocol_t *)self,
+ aSel, YES, NO, YES));
#endif
}
// check isKindOf:
Class cls;
Class protoClass = objc_getClass("Protocol");
- for (cls = object_getClass(other); cls; cls = class_getSuperclass(cls)) {
+ for (cls = object_getClass(other); cls; cls = cls->superclass) {
if (cls == protoClass) break;
}
if (!cls) return NO;
#if __arm__
#include <arm/arch.h>
+#include <mach/vm_param.h>
.syntax unified
.arm
#endif
-.align 12
+.align PAGE_SHIFT
__a1a2_tramphead_nt:
__a1a2_tramphead:
/*
// load block pointer from trampoline's data
// nt label works around thumb integrated asm bug rdar://11315197
adr r12, __a1a2_tramphead_nt // text page
- sub r12, r12, #4096 // data page precedes text page
+ sub r12, r12, #PAGE_SIZE // data page precedes text page
ldr r12, [r12, r1, LSL #3] // load block pointer from data + index*8
// shuffle parameters
#ifdef __i386__
+#include <mach/vm_param.h>
+
.text
.private_extern __a1a2_tramphead
.private_extern __a1a2_firsttramp
.private_extern __a1a2_nexttramp
.private_extern __a1a2_trampend
-.align 12
+.align PAGE_SHIFT
__a1a2_tramphead:
popl %eax
andl $0xFFFFFFF8, %eax
- subl $0x1000, %eax
+ subl $ PAGE_SIZE, %eax
movl 4(%esp), %ecx // self -> ecx
movl %ecx, 8(%esp) // ecx -> _cmd
movl (%eax), %ecx // blockPtr -> ecx
#ifdef __x86_64__
+#include <mach/vm_param.h>
+
.text
.private_extern __a1a2_tramphead
.private_extern __a1a2_firsttramp
.private_extern __a1a2_nexttramp
.private_extern __a1a2_trampend
-.align 12
+.align PAGE_SHIFT
__a1a2_tramphead:
popq %r10
andq $0xFFFFFFFFFFFFFFF8, %r10
- subq $0x1000, %r10
+ subq $ PAGE_SIZE, %r10
movq %rdi, %rsi // arg1 -> arg2
movq (%r10), %rdi // block -> arg1
jmp *16(%rdi)
#if __arm__
#include <arm/arch.h>
+#include <mach/vm_param.h>
.syntax unified
.arm
#endif
-.align 12
+.align PAGE_SHIFT
.private_extern __a2a3_tramphead
__a2a3_tramphead_nt:
__a2a3_tramphead:
// load block pointer from trampoline's data
// nt label works around thumb integrated asm bug rdar://11315197
adr r12, __a2a3_tramphead_nt // text page
- sub r12, r12, #4096 // data page precedes text page
+ sub r12, r12, #PAGE_SIZE // data page precedes text page
ldr r12, [r12, r2, LSL #3] // load block pointer from data + index*8
// shuffle parameters
#ifdef __i386__
+#include <mach/vm_param.h>
+
.text
.private_extern __a2a3_tramphead
.private_extern __a2a3_firsttramp
.private_extern __a2a3_nexttramp
.private_extern __a2a3_trampend
-.align 12
+.align PAGE_SHIFT
__a2a3_tramphead:
popl %eax
andl $0xFFFFFFF8, %eax
- subl $0x1000, %eax
+ subl $ PAGE_SIZE, %eax
movl 8(%esp), %ecx // self -> ecx
movl %ecx, 12(%esp) // ecx -> _cmd
movl (%eax), %ecx // blockPtr -> ecx
#ifdef __x86_64__
+#include <mach/vm_param.h>
+
.text
.private_extern __a2a3_tramphead
.private_extern __a2a3_firsttramp
.private_extern __a2a3_nexttramp
.private_extern __a2a3_trampend
-.align 12
+.align PAGE_SHIFT
__a2a3_tramphead:
popq %r10
andq $0xFFFFFFFFFFFFFFF8, %r10
- subq $0x1000, %r10
+ subq $ PAGE_SIZE, %r10
// %rdi -- first arg -- is address of return value's space. Don't mess with it.
movq %rsi, %rdx // arg2 -> arg3
movq (%r10), %rsi // block -> arg2
NXHashTable *newt;
NXHashState state = NXInitHashState (table);
void *data;
- void *z = ZONE_FROM_PTR(table);
+ __unused void *z = ZONE_FROM_PTR(table);
newt = ALLOCTABLE(z);
newt->prototype = table->prototype; newt->count = 0;
NXHashTable *old;
NXHashState state;
void *aux;
- void *z = ZONE_FROM_PTR(table);
+ __unused void *z = ZONE_FROM_PTR(table);
old = ALLOCTABLE(z);
old->prototype = table->prototype; old->count = table->count;
unsigned j = bucket->count;
const void **pairs;
const void **newt;
- void *z = ZONE_FROM_PTR(table);
+ __unused void *z = ZONE_FROM_PTR(table);
if (! j) {
bucket->count++; bucket->elements.one = data;
unsigned j = bucket->count;
const void **pairs;
const void **newt;
- void *z = ZONE_FROM_PTR(table);
+ __unused void *z = ZONE_FROM_PTR(table);
if (! j) {
bucket->count++; bucket->elements.one = data;
unsigned j = bucket->count;
const void **pairs;
const void **newt;
- void *z = ZONE_FROM_PTR(table);
+ __unused void *z = ZONE_FROM_PTR(table);
if (! j) return NULL;
if (j == 1) {
--- /dev/null
+//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AlignOf function that computes alignments for
+// arbitrary types.
+//
+//===----------------------------------------------------------------------===//
+
+// Taken from llvmCore-3425.0.31.
+
+#ifndef LLVM_SUPPORT_ALIGNOF_H
+#define LLVM_SUPPORT_ALIGNOF_H
+
+#include <cstddef>
+
+namespace objc {
+
+template <typename T>
+struct AlignmentCalcImpl {
+ char x;
+ T t;
+private:
+ AlignmentCalcImpl() {} // Never instantiate.
+};
+
+/// AlignOf - A templated class that contains an enum value representing
+/// the alignment of the template argument. For example,
+/// AlignOf<int>::Alignment represents the alignment of type "int". The
+/// alignment calculated is the minimum alignment, and not necessarily
+/// the "desired" alignment returned by GCC's __alignof__ (for example). Note
+/// that because the alignment is an enum value, it can be used as a
+/// compile-time constant (e.g., for template instantiation).
+template <typename T>
+struct AlignOf {
+ enum { Alignment =
+ static_cast<unsigned int>(sizeof(AlignmentCalcImpl<T>) - sizeof(T)) };
+
+ enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 };
+ enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 };
+ enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 };
+ enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 };
+
+ enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 };
+ enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 };
+ enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 };
+ enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 };
+
+};
+
+/// alignOf - A templated function that returns the minimum alignment of
+/// of a type. This provides no extra functionality beyond the AlignOf
+/// class besides some cosmetic cleanliness. Example usage:
+/// alignOf<int>() returns the alignment of an int.
+template <typename T>
+inline unsigned alignOf() { return AlignOf<T>::Alignment; }
+
+
+/// \brief Helper for building an aligned character array type.
+///
+/// This template is used to explicitly build up a collection of aligned
+/// character types. We have to build these up using a macro and explicit
+/// specialization to cope with old versions of MSVC and GCC where only an
+/// integer literal can be used to specify an alignment constraint. Once built
+/// up here, we can then begin to indirect between these using normal C++
+/// template parameters.
+template <size_t Alignment> struct AlignedCharArrayImpl;
+
+// MSVC requires special handling here.
+#ifndef _MSC_VER
+
+#if __has_feature(cxx_alignas)
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ char aligned alignas(x); \
+ }
+#elif defined(__GNUC__)
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ char aligned __attribute__((aligned(x))); \
+ }
+#else
+# error No supported align as directive.
+#endif
+
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#else // _MSC_VER
+
+// We provide special variations of this template for the most common
+// alignments because __declspec(align(...)) doesn't actually work when it is
+// a member of a by-value function argument in MSVC, even if the alignment
+// request is something reasonably like 8-byte or 16-byte.
+template <> struct AlignedCharArrayImpl<1> { char aligned; };
+template <> struct AlignedCharArrayImpl<2> { short aligned; };
+template <> struct AlignedCharArrayImpl<4> { int aligned; };
+template <> struct AlignedCharArrayImpl<8> { double aligned; };
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ __declspec(align(x)) char aligned; \
+ }
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
+// Any larger and MSVC complains.
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#endif // _MSC_VER
+
+/// \brief This union template exposes a suitably aligned and sized character
+/// array member which can hold elements of any of up to four types.
+///
+/// These types may be arrays, structs, or any other types. The goal is to
+/// produce a union type containing a character array which, when used, forms
+/// storage suitable to placement new any of these types over. Support for more
+/// than four types can be added at the cost of more boiler plate.
+template <typename T1,
+ typename T2 = char, typename T3 = char, typename T4 = char>
+union AlignedCharArrayUnion {
+private:
+ class AlignerImpl {
+ T1 t1; T2 t2; T3 t3; T4 t4;
+
+ AlignerImpl(); // Never defined or instantiated.
+ };
+ union SizerImpl {
+ char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)];
+ };
+
+public:
+ /// \brief The character array buffer for use by clients.
+ ///
+ /// No other member of this union should be referenced. The exist purely to
+ /// constrain the layout of this character array.
+ char buffer[sizeof(SizerImpl)];
+
+private:
+ // Tests seem to indicate that both Clang and GCC will properly register the
+ // alignment of a struct containing an aligned member, and this alignment
+ // should carry over to the character array in the union.
+ AlignedCharArrayImpl<AlignOf<AlignerImpl>::Alignment> nonce_member;
+};
+
+} // end namespace objc
+#endif
//
//===----------------------------------------------------------------------===//
+// Taken from llvmCore-3425.0.31.
+
#ifndef LLVM_ADT_DENSEMAP_H
#define LLVM_ADT_DENSEMAP_H
#include "llvm-type_traits.h"
+#include "llvm-MathExtras.h"
+#include "llvm-AlignOf.h"
+#include "llvm-DenseMapInfo.h"
#include <algorithm>
#include <iterator>
#include <new>
#include <utility>
#include <cassert>
+#include <climits>
#include <cstddef>
#include <cstring>
#include <TargetConditionals.h>
-namespace objc {
-
-#if TARGET_OS_IPHONE
+#include "objc-private.h"
-// lifted from <MathExtras.h>:
-
-/// CountLeadingZeros_32 - this function performs the platform optimal form of
-/// counting the number of zeros from the most significant bit to the first one
-/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8.
-/// Returns 32 if the word is zero.
-inline unsigned CountLeadingZeros_32(uint32_t Value) {
- unsigned Count; // result
-#if __GNUC__ >= 4
- // PowerPC is defined for __builtin_clz(0)
-#if !defined(__ppc__) && !defined(__ppc64__)
- if (!Value) return 32;
-#endif
- Count = __builtin_clz(Value);
-#else
- if (!Value) return 32;
- Count = 0;
- // bisection method for count leading zeros
- for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) {
- uint32_t Tmp = Value >> Shift;
- if (Tmp) {
- Value = Tmp;
- } else {
- Count |= Shift;
- }
- }
-#endif
- return Count;
-}
-/// CountLeadingOnes_32 - this function performs the operation of
-/// counting the number of ones from the most significant bit to the first zero
-/// bit. Ex. CountLeadingOnes_32(0xFF0FFF00) == 8.
-/// Returns 32 if the word is all ones.
-inline unsigned CountLeadingOnes_32(uint32_t Value) {
- return CountLeadingZeros_32(~Value);
-}
-/// CountLeadingZeros_64 - This function performs the platform optimal form
-/// of counting the number of zeros from the most significant bit to the first
-/// one bit (64 bit edition.)
-/// Returns 64 if the word is zero.
-inline unsigned CountLeadingZeros_64(uint64_t Value) {
- unsigned Count; // result
-#if __GNUC__ >= 4
- // PowerPC is defined for __builtin_clzll(0)
-#if !defined(__ppc__) && !defined(__ppc64__)
- if (!Value) return 64;
-#endif
- Count = __builtin_clzll(Value);
-#else
- if (sizeof(long) == sizeof(int64_t)) {
- if (!Value) return 64;
- Count = 0;
- // bisection method for count leading zeros
- for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) {
- uint64_t Tmp = Value >> Shift;
- if (Tmp) {
- Value = Tmp;
- } else {
- Count |= Shift;
- }
- }
- } else {
- // get hi portion
- uint32_t Hi = Hi_32(Value);
- // if some bits in hi portion
- if (Hi) {
- // leading zeros in hi portion plus all bits in lo portion
- Count = CountLeadingZeros_32(Hi);
- } else {
- // get lo portion
- uint32_t Lo = Lo_32(Value);
- // same as 32 bit value
- Count = CountLeadingZeros_32(Lo)+32;
- }
- }
-#endif
- return Count;
-}
-/// CountLeadingOnes_64 - This function performs the operation
-/// of counting the number of ones from the most significant bit to the first
-/// zero bit (64 bit edition.)
-/// Returns 64 if the word is all ones.
-inline unsigned CountLeadingOnes_64(uint64_t Value) {
- return CountLeadingZeros_64(~Value);
-}
-/// CountTrailingZeros_32 - this function performs the platform optimal form of
-/// counting the number of zeros from the least significant bit to the first one
-/// bit. Ex. CountTrailingZeros_32(0xFF00FF00) == 8.
-/// Returns 32 if the word is zero.
-inline unsigned CountTrailingZeros_32(uint32_t Value) {
-#if __GNUC__ >= 4
- return Value ? __builtin_ctz(Value) : 32;
-#else
- static const unsigned Mod37BitPosition[] = {
- 32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13,
- 4, 7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9,
- 5, 20, 8, 19, 18
- };
- return Mod37BitPosition[(-Value & Value) % 37];
-#endif
-}
-/// CountTrailingOnes_32 - this function performs the operation of
-/// counting the number of ones from the least significant bit to the first zero
-/// bit. Ex. CountTrailingOnes_32(0x00FF00FF) == 8.
-/// Returns 32 if the word is all ones.
-inline unsigned CountTrailingOnes_32(uint32_t Value) {
- return CountTrailingZeros_32(~Value);
-}
-/// CountTrailingZeros_64 - This function performs the platform optimal form
-/// of counting the number of zeros from the least significant bit to the first
-/// one bit (64 bit edition.)
-/// Returns 64 if the word is zero.
-inline unsigned CountTrailingZeros_64(uint64_t Value) {
-#if __GNUC__ >= 4
- return Value ? __builtin_ctzll(Value) : 64;
-#else
- static const unsigned Mod67Position[] = {
- 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54,
- 4, 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55,
- 47, 5, 32, 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27,
- 29, 50, 43, 46, 31, 37, 21, 57, 52, 8, 26, 49, 45, 36, 56,
- 7, 48, 35, 6, 34, 33, 0
- };
- return Mod67Position[(-Value & Value) % 67];
-#endif
-}
-
-/// CountTrailingOnes_64 - This function performs the operation
-/// of counting the number of ones from the least significant bit to the first
-/// zero bit (64 bit edition.)
-/// Returns 64 if the word is all ones.
-inline unsigned CountTrailingOnes_64(uint64_t Value) {
- return CountTrailingZeros_64(~Value);
-}
-/// CountPopulation_32 - this function counts the number of set bits in a value.
-/// Ex. CountPopulation(0xF000F000) = 8
-/// Returns 0 if the word is zero.
-inline unsigned CountPopulation_32(uint32_t Value) {
-#if __GNUC__ >= 4
- return __builtin_popcount(Value);
-#else
- uint32_t v = Value - ((Value >> 1) & 0x55555555);
- v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
- return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
-#endif
-}
-/// CountPopulation_64 - this function counts the number of set bits in a value,
-/// (64 bit edition.)
-inline unsigned CountPopulation_64(uint64_t Value) {
-#if __GNUC__ >= 4
- return __builtin_popcountll(Value);
-#else
- uint64_t v = Value - ((Value >> 1) & 0x5555555555555555ULL);
- v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
- v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
- return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
-#endif
-}
-/// Log2_32 - This function returns the floor log base 2 of the specified value,
-/// -1 if the value is zero. (32 bit edition.)
-/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
-inline unsigned Log2_32(uint32_t Value) {
- return 31 - CountLeadingZeros_32(Value);
-}
-/// Log2_64 - This function returns the floor log base 2 of the specified value,
-/// -1 if the value is zero. (64 bit edition.)
-inline unsigned Log2_64(uint64_t Value) {
- return 63 - CountLeadingZeros_64(Value);
-}
-/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
-/// value, 32 if the value is zero. (32 bit edition).
-/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
-inline unsigned Log2_32_Ceil(uint32_t Value) {
- return 32-CountLeadingZeros_32(Value-1);
-}
-
-#endif /* TARGET_OS_IPHONE */
-
-template<typename T>
-struct DenseMapInfo {
- //static inline T getEmptyKey();
- //static inline T getTombstoneKey();
- //static unsigned getHashValue(const T &Val);
- //static bool isEqual(const T &LHS, const T &RHS);
-};
-
-// Provide DenseMapInfo for all pointers.
-template<typename T>
-struct DenseMapInfo<T*> {
- static inline T* getEmptyKey() {
- intptr_t Val = -1;
- return reinterpret_cast<T*>(Val);
- }
- static inline T* getTombstoneKey() {
- intptr_t Val = -2;
- return reinterpret_cast<T*>(Val);
- }
- static unsigned getHashValue(const T *PtrVal) {
- return (unsigned((uintptr_t)PtrVal) >> 4) ^
- (unsigned((uintptr_t)PtrVal) >> 9);
- }
- static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
-};
-
-// Provide DenseMapInfo for chars.
-template<> struct DenseMapInfo<char> {
- static inline char getEmptyKey() { return ~0; }
- static inline char getTombstoneKey() { return ~0 - 1; }
- static unsigned getHashValue(const char& Val) { return Val * 37; }
- static bool isEqual(const char &LHS, const char &RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for unsigned ints.
-template<> struct DenseMapInfo<unsigned> {
- static inline unsigned getEmptyKey() { return ~0; }
- static inline unsigned getTombstoneKey() { return ~0U - 1; }
- static unsigned getHashValue(const unsigned& Val) { return Val * 37; }
- static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for unsigned longs.
-template<> struct DenseMapInfo<unsigned long> {
- static inline unsigned long getEmptyKey() { return ~0UL; }
- static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
- static unsigned getHashValue(const unsigned long& Val) {
- return (unsigned)(Val * 37UL);
- }
- static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for unsigned long longs.
-template<> struct DenseMapInfo<unsigned long long> {
- static inline unsigned long long getEmptyKey() { return ~0ULL; }
- static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
- static unsigned getHashValue(const unsigned long long& Val) {
- return (unsigned)(Val * 37ULL);
- }
- static bool isEqual(const unsigned long long& LHS,
- const unsigned long long& RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for ints.
-template<> struct DenseMapInfo<int> {
- static inline int getEmptyKey() { return 0x7fffffff; }
- static inline int getTombstoneKey() { return -0x7fffffff - 1; }
- static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37); }
- static bool isEqual(const int& LHS, const int& RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for longs.
-template<> struct DenseMapInfo<long> {
- static inline long getEmptyKey() {
- return (1UL << (sizeof(long) * 8 - 1)) - 1L;
- }
- static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
- static unsigned getHashValue(const long& Val) {
- return (unsigned)(Val * 37L);
- }
- static bool isEqual(const long& LHS, const long& RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for long longs.
-template<> struct DenseMapInfo<long long> {
- static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
- static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
- static unsigned getHashValue(const long long& Val) {
- return (unsigned)(Val * 37LL);
- }
- static bool isEqual(const long long& LHS,
- const long long& RHS) {
- return LHS == RHS;
- }
-};
-
-// Provide DenseMapInfo for all pairs whose members have info.
-template<typename T, typename U>
-struct DenseMapInfo<std::pair<T, U> > {
- typedef std::pair<T, U> Pair;
- typedef DenseMapInfo<T> FirstInfo;
- typedef DenseMapInfo<U> SecondInfo;
-
- static inline Pair getEmptyKey() {
- return std::make_pair(FirstInfo::getEmptyKey(),
- SecondInfo::getEmptyKey());
- }
- static inline Pair getTombstoneKey() {
- return std::make_pair(FirstInfo::getTombstoneKey(),
- SecondInfo::getEmptyKey());
- }
- static unsigned getHashValue(const Pair& PairVal) {
- uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
- | (uint64_t)SecondInfo::getHashValue(PairVal.second);
- key += ~(key << 32);
- key ^= (key >> 22);
- key += ~(key << 13);
- key ^= (key >> 8);
- key += (key << 3);
- key ^= (key >> 15);
- key += ~(key << 27);
- key ^= (key >> 31);
- return (unsigned)key;
- }
- static bool isEqual(const Pair& LHS, const Pair& RHS) { return LHS == RHS; }
-};
-
-} // end namespace objc
+// From llvm/Support/Compiler.h
+#define LLVM_USE_RVALUE_REFERENCES 1
+#define llvm_move(value) (::std::move(value))
+#define MIN_BUCKETS 4
+#define MIN_COMPACT 1024
namespace objc {
template<typename KeyT, typename ValueT,
typename KeyInfoT = DenseMapInfo<KeyT>,
- typename ValueInfoT = DenseMapInfo<ValueT>, bool IsConst = false>
+ bool IsConst = false>
class DenseMapIterator;
// ZeroValuesArePurgeable=true is used by the refcount table.
// For memory size, we allow rehashes and table insertions to
// remove a zero value as if it were a tombstone.
-template<typename KeyT, typename ValueT,
- bool ZeroValuesArePurgeable = false,
- typename KeyInfoT = DenseMapInfo<KeyT>,
- typename ValueInfoT = DenseMapInfo<ValueT> >
-class DenseMap {
+template<typename DerivedT,
+ typename KeyT, typename ValueT, typename KeyInfoT,
+ bool ZeroValuesArePurgeable = false>
+class DenseMapBase {
+protected:
typedef std::pair<KeyT, ValueT> BucketT;
- unsigned NumBuckets;
- BucketT *Buckets;
- unsigned NumEntries;
- unsigned NumTombstones;
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef BucketT value_type;
- DenseMap(const DenseMap &other) {
- NumBuckets = 0;
- CopyFrom(other);
- }
-
- explicit DenseMap(unsigned NumInitBuckets = 64) {
- init(NumInitBuckets);
- }
-
- template<typename InputIt>
- DenseMap(const InputIt &I, const InputIt &E) {
- init(64);
- insert(I, E);
- }
-
- ~DenseMap() {
- const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
-#ifndef NDEBUG
- memset(Buckets, 0x5a, sizeof(BucketT)*NumBuckets);
-#endif
- operator delete(Buckets);
- }
-
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
typedef DenseMapIterator<KeyT, ValueT,
- KeyInfoT, ValueInfoT, true> const_iterator;
+ KeyInfoT, true> const_iterator;
inline iterator begin() {
// When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
- return empty() ? end() : iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
}
inline iterator end() {
- return iterator(Buckets+NumBuckets, Buckets+NumBuckets);
+ return iterator(getBucketsEnd(), getBucketsEnd(), true);
}
inline const_iterator begin() const {
- return empty() ? end() : const_iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
}
inline const_iterator end() const {
- return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets);
+ return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
}
- bool empty() const { return NumEntries == 0; }
- unsigned size() const { return NumEntries; }
+ bool empty() const { return getNumEntries() == 0; }
+ unsigned size() const { return getNumEntries(); }
/// Grow the densemap so that it has at least Size buckets. Does not shrink
- void resize(size_t Size) { grow(Size); }
+ void resize(size_t Size) {
+ if (Size > getNumBuckets())
+ grow(Size);
+ }
void clear() {
- if (NumEntries == 0 && NumTombstones == 0) return;
+ if (getNumEntries() == 0 && getNumTombstones() == 0) return;
// If the capacity of the array is huge, and the # elements used is small,
// shrink the array.
- if (NumEntries * 4 < NumBuckets && NumBuckets > 64) {
+ if (getNumEntries() * 4 < getNumBuckets() &&
+ getNumBuckets() > MIN_BUCKETS) {
shrink_and_clear();
return;
}
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
P->second.~ValueT();
- --NumEntries;
+ decrementNumEntries();
}
P->first = EmptyKey;
}
}
- assert(NumEntries == 0 && "Node count imbalance!");
- NumTombstones = 0;
+ assert(getNumEntries() == 0 && "Node count imbalance!");
+ setNumTombstones(0);
}
/// count - Return true if the specified key is in the map.
bool count(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
return LookupBucketFor(Val, TheBucket);
}
iterator find(const KeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, Buckets+NumBuckets);
+ return iterator(TheBucket, getBucketsEnd(), true);
return end();
}
const_iterator find(const KeyT &Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return const_iterator(TheBucket, getBucketsEnd(), true);
+ return end();
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template<class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, Buckets+NumBuckets);
+ return iterator(TheBucket, getBucketsEnd(), true);
+ return end();
+ }
+ template<class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return const_iterator(TheBucket, getBucketsEnd(), true);
return end();
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
return TheBucket->second;
return ValueT();
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
BucketT *TheBucket;
if (LookupBucketFor(KV.first, TheBucket))
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets),
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
false); // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets),
- true);
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
}
/// insert - Range insertion of pairs.
insert(*I);
}
+ // Clear if empty.
+ // Shrink if at least 15/16 empty and larger than MIN_COMPACT.
+ void compact() {
+ if (getNumEntries() == 0) {
+ shrink_and_clear();
+ }
+ else if (getNumBuckets() / 16 > getNumEntries() &&
+ getNumBuckets() > MIN_COMPACT)
+ {
+ grow(getNumEntries() * 2);
+ }
+ }
bool erase(const KeyT &Val) {
BucketT *TheBucket;
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
- --NumEntries;
- ++NumTombstones;
+ decrementNumEntries();
+ incrementNumTombstones();
+ compact();
return true;
}
void erase(iterator I) {
BucketT *TheBucket = &*I;
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
- --NumEntries;
- ++NumTombstones;
- }
-
- void swap(DenseMap& RHS) {
- std::swap(NumBuckets, RHS.NumBuckets);
- std::swap(Buckets, RHS.Buckets);
- std::swap(NumEntries, RHS.NumEntries);
- std::swap(NumTombstones, RHS.NumTombstones);
+ decrementNumEntries();
+ incrementNumTombstones();
+ compact();
}
value_type& FindAndConstruct(const KeyT &Key) {
return FindAndConstruct(Key).second;
}
- DenseMap& operator=(const DenseMap& other) {
- CopyFrom(other);
- return *this;
+#if LLVM_USE_RVALUE_REFERENCES
+ value_type& FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(Key, ValueT(), TheBucket);
}
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(Key).second;
+ }
+#endif
+
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the DenseMap's array of buckets (i.e. either to a key or
/// value in the DenseMap).
bool isPointerIntoBucketsArray(const void *Ptr) const {
- return Ptr >= Buckets && Ptr < Buckets+NumBuckets;
+ return Ptr >= getBuckets() && Ptr < getBucketsEnd();
}
/// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
/// array. In conjunction with the previous method, this can be used to
/// determine whether an insertion caused the DenseMap to reallocate.
- const void *getPointerIntoBucketsArray() const { return Buckets; }
+ const void *getPointerIntoBucketsArray() const { return getBuckets(); }
-private:
- void CopyFrom(const DenseMap& other) {
- if (NumBuckets != 0 &&
- (!isPodLike<KeyInfoT>::value || !isPodLike<ValueInfoT>::value)) {
- const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
- }
+protected:
+ DenseMapBase() {}
- NumEntries = other.NumEntries;
- NumTombstones = other.NumTombstones;
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
+ !KeyInfoT::isEqual(P->first, TombstoneKey))
+ P->second.~ValueT();
+ P->first.~KeyT();
+ }
- if (NumBuckets) {
#ifndef NDEBUG
- memset(Buckets, 0x5a, sizeof(BucketT)*NumBuckets);
+ memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
#endif
- operator delete(Buckets);
}
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) *
- other.NumBuckets));
- if (isPodLike<KeyInfoT>::value && isPodLike<ValueInfoT>::value)
- memcpy(Buckets, other.Buckets, other.NumBuckets * sizeof(BucketT));
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
+
+ assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+ "# initial buckets must be a power of two!");
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ new (&B->first) KeyT(EmptyKey);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
+ !KeyInfoT::isEqual(B->first, TombstoneKey) &&
+ !(ZeroValuesArePurgeable && B->second == 0)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->first, DestBucket);
+ (void)FoundVal; // silence warning.
+ assert(!FoundVal && "Key already in new map?");
+ DestBucket->first = llvm_move(B->first);
+ new (&DestBucket->second) ValueT(llvm_move(B->second));
+ incrementNumEntries();
+
+ // Free the value.
+ B->second.~ValueT();
+ }
+ B->first.~KeyT();
+ }
+
+#ifndef NDEBUG
+ if (OldBucketsBegin != OldBucketsEnd)
+ memset((void*)OldBucketsBegin, 0x5a,
+ sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
+#endif
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
+ assert(getNumBuckets() == other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
+
+ if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
+ memcpy(getBuckets(), other.getBuckets(),
+ getNumBuckets() * sizeof(BucketT));
else
- for (size_t i = 0; i < other.NumBuckets; ++i) {
- new (&Buckets[i].first) KeyT(other.Buckets[i].first);
- if (!KeyInfoT::isEqual(Buckets[i].first, getEmptyKey()) &&
- !KeyInfoT::isEqual(Buckets[i].first, getTombstoneKey()))
- new (&Buckets[i].second) ValueT(other.Buckets[i].second);
+ for (size_t i = 0; i < getNumBuckets(); ++i) {
+ new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
+ if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
+ new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
}
- NumBuckets = other.NumBuckets;
}
+ void swap(DenseMapBase& RHS) {
+ std::swap(getNumEntries(), RHS.getNumEntries());
+ std::swap(getNumTombstones(), RHS.getNumTombstones());
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ template<typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ static const KeyT getEmptyKey() {
+ return KeyInfoT::getEmptyKey();
+ }
+ static const KeyT getTombstoneKey() {
+ return KeyInfoT::getTombstoneKey();
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+ void incrementNumEntries() {
+ setNumEntries(getNumEntries() + 1);
+ }
+ void decrementNumEntries() {
+ setNumEntries(getNumEntries() - 1);
+ }
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+ void incrementNumTombstones() {
+ setNumTombstones(getNumTombstones() + 1);
+ }
+ void decrementNumTombstones() {
+ setNumTombstones(getNumTombstones() - 1);
+ }
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+ BucketT *getBuckets() {
+ return static_cast<DerivedT *>(this)->getBuckets();
+ }
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+ BucketT *getBucketsEnd() {
+ return getBuckets() + getNumBuckets();
+ }
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) {
+ static_cast<DerivedT *>(this)->grow(AtLeast);
+ }
+
+ void shrink_and_clear() {
+ static_cast<DerivedT *>(this)->shrink_and_clear();
+ }
+
+
BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = Key;
+ new (&TheBucket->second) ValueT(Value);
+ return TheBucket;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
+ BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = Key;
+ new (&TheBucket->second) ValueT(std::move(Value));
+ return TheBucket;
+ }
+
+ BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = std::move(Key);
+ new (&TheBucket->second) ValueT(std::move(Value));
+ return TheBucket;
+ }
+#endif
+
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
// If the load of the hash table is more than 3/4, grow the table.
// If fewer than 1/8 of the buckets are empty (meaning that many are
// filled with tombstones), rehash the table without growing.
// probe almost the entire table until it found the empty bucket. If the
// table completely filled with tombstones, no lookup would ever succeed,
// causing infinite loops in lookup.
- ++NumEntries;
- if (NumEntries*4 >= NumBuckets*3) {
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (NewNumEntries*4 >= NumBuckets*3) {
this->grow(NumBuckets * 2);
LookupBucketFor(Key, TheBucket);
+ NumBuckets = getNumBuckets();
}
- else if (NumBuckets-(NumEntries+NumTombstones) < NumBuckets/8) {
+ if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
this->grow(NumBuckets);
LookupBucketFor(Key, TheBucket);
}
+ assert(TheBucket);
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
// If we are writing over a tombstone or zero value, remember this.
- if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) {
- if (KeyInfoT::isEqual(TheBucket->first, getTombstoneKey())) {
- --NumTombstones;
- } else {
- assert(ZeroValuesArePurgeable && TheBucket->second == 0);
- TheBucket->second.~ValueT();
- --NumEntries;
- }
+ if (KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) {
+ // Replacing an empty bucket.
+ incrementNumEntries();
+ }
+ else if (KeyInfoT::isEqual(TheBucket->first, getTombstoneKey())) {
+ // Replacing a tombstone.
+ incrementNumEntries();
+ decrementNumTombstones();
+ }
+ else if (ZeroValuesArePurgeable && TheBucket->second == 0) {
+ // Purging a zero. No accounting changes.
+ TheBucket->second.~ValueT();
+ } else {
+ // Updating an existing entry. No accounting changes.
}
- TheBucket->first = Key;
- new (&TheBucket->second) ValueT(Value);
return TheBucket;
}
- static unsigned getHashValue(const KeyT &Val) {
- return KeyInfoT::getHashValue(Val);
- }
- static const KeyT getEmptyKey() {
- return KeyInfoT::getEmptyKey();
- }
- static const KeyT getTombstoneKey() {
- return KeyInfoT::getTombstoneKey();
- }
-
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
/// FoundBucket. If the bucket contains the key and a value, this returns
/// true, otherwise it returns a bucket with an empty marker or tombstone
/// or zero value and returns false.
- bool LookupBucketFor(const KeyT &Val, BucketT *&FoundBucket) const {
- unsigned BucketNo = getHashValue(Val);
- unsigned ProbeAmt = 1;
- unsigned ProbeCount = 0;
- BucketT *BucketsPtr = Buckets;
+ template<typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
+
+ if (NumBuckets == 0) {
+ FoundBucket = 0;
+ return false;
+ }
// FoundTombstone - Keep track of whether we find a tombstone or zero value while probing.
- BucketT *FoundTombstone = 0;
+ const BucketT *FoundTombstone = 0;
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
!KeyInfoT::isEqual(Val, TombstoneKey) &&
"Empty/Tombstone value shouldn't be inserted into map!");
- do {
- BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1));
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+ unsigned ProbeAmt = 1;
+ while (1) {
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
// Found Val's bucket? If so, return it.
- if (KeyInfoT::isEqual(ThisBucket->first, Val)) {
+ if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
FoundBucket = ThisBucket;
return true;
}
// Otherwise, it's a hash collision or a tombstone, continue quadratic
// probing.
+ if (ProbeAmt > NumBuckets) {
+ // No empty buckets in table. Die.
+ _objc_fatal("Hash table corrupted. This is probably a memory error "
+ "somewhere. (table at %p, buckets at %p (%zu bytes), "
+ "%u buckets, %u entries, %u tombstones, "
+ "data %p %p %p %p)",
+ this, BucketsPtr, malloc_size(BucketsPtr),
+ NumBuckets, getNumEntries(), getNumTombstones(),
+ ((void**)BucketsPtr)[0], ((void**)BucketsPtr)[1],
+ ((void**)BucketsPtr)[2], ((void**)BucketsPtr)[3]);
+ }
BucketNo += ProbeAmt++;
- ProbeCount++;
- } while (ProbeCount < NumBuckets);
- // If we get here then we did not find a bucket. This is a bug. Emit some diagnostics and abort.
- unsigned EmptyCount = 0, TombstoneCount = 0, ZeroCount = 0, ValueCount = 0;
- BucketsPtr = Buckets;
- for (unsigned i=0; i<NumBuckets; i++) {
- if (KeyInfoT::isEqual(BucketsPtr->first, EmptyKey)) EmptyCount++;
- else if (KeyInfoT::isEqual(BucketsPtr->first, TombstoneKey)) TombstoneCount++;
- else if (KeyInfoT::isEqual(BucketsPtr->first, 0)) ZeroCount++;
- else ValueCount++;
- BucketsPtr++;
+ BucketNo&= (NumBuckets-1);
+ }
+ }
+
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)
+ ->LookupBucketFor(Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
+
+public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ size_t getMemorySize() const {
+ return getNumBuckets() * sizeof(BucketT);
+ }
+};
+
+template<typename KeyT, typename ValueT,
+ bool ZeroValuesArePurgeable = false,
+ typename KeyInfoT = DenseMapInfo<KeyT> >
+class DenseMap
+ : public DenseMapBase<DenseMap<KeyT, ValueT, ZeroValuesArePurgeable, KeyInfoT>,
+ KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> BaseT;
+ typedef typename BaseT::BucketT BucketT;
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable>;
+
+ BucketT *Buckets;
+ unsigned NumEntries;
+ unsigned NumTombstones;
+ unsigned NumBuckets;
+
+public:
+ explicit DenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ DenseMap(const DenseMap &other) {
+ init(0);
+ copyFrom(other);
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ DenseMap(DenseMap &&other) {
+ init(0);
+ swap(other);
+ }
+#endif
+
+ template<typename InputIt>
+ DenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ operator delete(Buckets);
+ }
+
+ void swap(DenseMap& RHS) {
+ std::swap(Buckets, RHS.Buckets);
+ std::swap(NumEntries, RHS.NumEntries);
+ std::swap(NumTombstones, RHS.NumTombstones);
+ std::swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap& operator=(const DenseMap& other) {
+ copyFrom(other);
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ DenseMap& operator=(DenseMap &&other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ init(0);
+ swap(other);
+ return *this;
+ }
+#endif
+
+ void copyFrom(const DenseMap& other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
}
- _objc_fatal("DenseMap::LookupBucketFor() failed to find available bucket.\nNumBuckets = %d, EmptyCount = %d, TombstoneCount = %d, ZeroCount = %d, ValueCount = %d\n", NumBuckets, EmptyCount, TombstoneCount, ZeroCount, ValueCount);
}
void init(unsigned InitBuckets) {
- NumEntries = 0;
- NumTombstones = 0;
- NumBuckets = InitBuckets;
- assert(InitBuckets && (InitBuckets & (InitBuckets-1)) == 0 &&
- "# initial buckets must be a power of two!");
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*InitBuckets));
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0; i != InitBuckets; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
}
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
- // Double the number of buckets.
- while (NumBuckets < AtLeast)
- NumBuckets <<= 1;
- NumTombstones = 0;
+ allocateBuckets(std::max<unsigned>(MIN_BUCKETS, NextPowerOf2(AtLeast)));
+ assert(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
+
+ // Free the old table.
+ operator delete(OldBuckets);
+ }
+
+ void shrink_and_clear() {
+ unsigned OldNumEntries = NumEntries;
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldNumEntries)
+ NewNumBuckets = std::max(MIN_BUCKETS, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+ if (NewNumBuckets == NumBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ operator delete(Buckets);
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ BucketT *getBuckets() const {
+ return Buckets;
+ }
+
+ unsigned getNumBuckets() const {
+ return NumBuckets;
+ }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = 0;
+ return false;
+ }
+
Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ return true;
+ }
+};
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0, e = NumBuckets; i != e; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+template<typename KeyT, typename ValueT,
+ unsigned InlineBuckets = 4,
+ bool ZeroValuesArePurgeable = false,
+ typename KeyInfoT = DenseMapInfo<KeyT> >
+class SmallDenseMap
+ : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, ZeroValuesArePurgeable, KeyInfoT>,
+ KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> BaseT;
+ typedef typename BaseT::BucketT BucketT;
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable>;
+
+ unsigned Small : 1;
+ unsigned NumEntries : 31;
+ unsigned NumTombstones;
- // Insert all the old elements.
- const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey))
- {
- // Valid key/value, or zero value
- if (!ZeroValuesArePurgeable || B->second != 0) {
- // Insert the key/value into the new table.
- BucketT *DestBucket;
- bool FoundVal = LookupBucketFor(B->first, DestBucket);
- (void)FoundVal; // silence warning.
- assert(!FoundVal && "Key already in new map?");
- DestBucket->first = B->first;
- new (&DestBucket->second) ValueT(B->second);
- } else {
- NumEntries--;
+ struct LargeRep {
+ BucketT *Buckets;
+ unsigned NumBuckets;
+ };
+
+ /// A "union" of an inline bucket array and the struct representing
+ /// a large bucket. This union will be discriminated by the 'Small' bit.
+ AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
+
+public:
+ explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ SmallDenseMap(const SmallDenseMap &other) {
+ init(0);
+ copyFrom(other);
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallDenseMap(SmallDenseMap &&other) {
+ init(0);
+ swap(other);
+ }
+#endif
+
+ template<typename InputIt>
+ SmallDenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ ~SmallDenseMap() {
+ this->destroyAll();
+ deallocateBuckets();
+ }
+
+ void swap(SmallDenseMap& RHS) {
+ unsigned TmpNumEntries = RHS.NumEntries;
+ RHS.NumEntries = NumEntries;
+ NumEntries = TmpNumEntries;
+ std::swap(NumTombstones, RHS.NumTombstones);
+
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ if (Small && RHS.Small) {
+ // If we're swapping inline bucket arrays, we have to cope with some of
+ // the tricky bits of DenseMap's storage system: the buckets are not
+ // fully initialized. Thus we swap every key, but we may have
+ // a one-directional move of the value.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *LHSB = &getInlineBuckets()[i],
+ *RHSB = &RHS.getInlineBuckets()[i];
+ bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
+ bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
+ if (hasLHSValue && hasRHSValue) {
+ // Swap together if we can...
+ std::swap(*LHSB, *RHSB);
+ continue;
+ }
+ // Swap separately and handle any assymetry.
+ std::swap(LHSB->first, RHSB->first);
+ if (hasLHSValue) {
+ new (&RHSB->second) ValueT(llvm_move(LHSB->second));
+ LHSB->second.~ValueT();
+ } else if (hasRHSValue) {
+ new (&LHSB->second) ValueT(llvm_move(RHSB->second));
+ RHSB->second.~ValueT();
}
+ }
+ return;
+ }
+ if (!Small && !RHS.Small) {
+ std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+ std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+ return;
+ }
- // Free the value.
- B->second.~ValueT();
+ SmallDenseMap &SmallSide = Small ? *this : RHS;
+ SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+ // First stash the large side's rep and move the small side across.
+ LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
+ LargeSide.getLargeRep()->~LargeRep();
+ LargeSide.Small = true;
+ // This is similar to the standard move-from-old-buckets, but the bucket
+ // count hasn't actually rotated in this case. So we have to carefully
+ // move construct the keys and values into their new locations, but there
+ // is no need to re-hash things.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+ *OldB = &SmallSide.getInlineBuckets()[i];
+ new (&NewB->first) KeyT(llvm_move(OldB->first));
+ OldB->first.~KeyT();
+ if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
+ new (&NewB->second) ValueT(llvm_move(OldB->second));
+ OldB->second.~ValueT();
}
- B->first.~KeyT();
}
-#ifndef NDEBUG
- memset(OldBuckets, 0x5a, sizeof(BucketT)*OldNumBuckets);
+ // The hard part of moving the small buckets across is done, just move
+ // the TmpRep into its new home.
+ SmallSide.Small = false;
+ new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
+ }
+
+ SmallDenseMap& operator=(const SmallDenseMap& other) {
+ copyFrom(other);
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallDenseMap& operator=(SmallDenseMap &&other) {
+ this->destroyAll();
+ deallocateBuckets();
+ init(0);
+ swap(other);
+ return *this;
+ }
#endif
+
+ void copyFrom(const SmallDenseMap& other) {
+ this->destroyAll();
+ deallocateBuckets();
+ Small = true;
+ if (other.getNumBuckets() > InlineBuckets) {
+ Small = false;
+ allocateBuckets(other.getNumBuckets());
+ }
+ this->BaseT::copyFrom(other);
+ }
+
+ void init(unsigned InitBuckets) {
+ Small = true;
+ if (InitBuckets > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+ }
+ this->BaseT::initEmpty();
+ }
+
+ void grow(unsigned AtLeast) {
+ if (AtLeast > InlineBuckets)
+ AtLeast = std::max<unsigned>(MIN_BUCKETS, NextPowerOf2(AtLeast));
+
+ if (Small) {
+ if (AtLeast <= InlineBuckets)
+ return; // Nothing to do.
+
+ // First move the inline buckets into a temporary storage.
+ AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+ BucketT *TmpEnd = TmpBegin;
+
+ // Loop over the buckets, moving non-empty, non-tombstones into the
+ // temporary storage. Have the loop move the TmpEnd forward as it goes.
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
+ !KeyInfoT::isEqual(P->first, TombstoneKey)) {
+ assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+ "Too many inline buckets!");
+ new (&TmpEnd->first) KeyT(llvm_move(P->first));
+ new (&TmpEnd->second) ValueT(llvm_move(P->second));
+ ++TmpEnd;
+ P->second.~ValueT();
+ }
+ P->first.~KeyT();
+ }
+
+ // Now make this map use the large rep, and move all the entries back
+ // into it.
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ this->moveFromOldBuckets(TmpBegin, TmpEnd);
+ return;
+ }
+
+ LargeRep OldRep = llvm_move(*getLargeRep());
+ getLargeRep()->~LargeRep();
+ if (AtLeast <= InlineBuckets) {
+ Small = true;
+ } else {
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
+
+ this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
// Free the old table.
- operator delete(OldBuckets);
+ operator delete(OldRep.Buckets);
}
void shrink_and_clear() {
- unsigned OldNumBuckets = NumBuckets;
- BucketT *OldBuckets = Buckets;
+ unsigned OldSize = this->size();
+ this->destroyAll();
// Reduce the number of buckets.
- NumBuckets = NumEntries > 32 ? 1 << (Log2_32_Ceil(NumEntries) + 1)
- : 64;
- NumTombstones = 0;
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ unsigned NewNumBuckets = 0;
+ if (OldSize) {
+ NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+ if (NewNumBuckets > InlineBuckets && NewNumBuckets < MIN_BUCKETS)
+ NewNumBuckets = MIN_BUCKETS;
+ }
+ if ((Small && NewNumBuckets <= InlineBuckets) ||
+ (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+ this->BaseT::initEmpty();
+ return;
+ }
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0, e = NumBuckets; i != e; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ deallocateBuckets();
+ init(NewNumBuckets);
+ }
- // Free the old buckets.
- const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey)) {
- // Free the value.
- B->second.~ValueT();
- }
- B->first.~KeyT();
- }
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
+ NumEntries = Num;
+ }
-#ifndef NDEBUG
- memset(OldBuckets, 0x5a, sizeof(BucketT)*OldNumBuckets);
-#endif
- // Free the old table.
- operator delete(OldBuckets);
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ const BucketT *getInlineBuckets() const {
+ assert(Small);
+ // Note that this cast does not violate aliasing rules as we assert that
+ // the memory's dynamic type is the small, inline bucket buffer, and the
+ // 'storage.buffer' static type is 'char *'.
+ return reinterpret_cast<const BucketT *>(storage.buffer);
+ }
+ BucketT *getInlineBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+ }
+ const LargeRep *getLargeRep() const {
+ assert(!Small);
+ // Note, same rule about aliasing as with getInlineBuckets.
+ return reinterpret_cast<const LargeRep *>(storage.buffer);
+ }
+ LargeRep *getLargeRep() {
+ return const_cast<LargeRep *>(
+ const_cast<const SmallDenseMap *>(this)->getLargeRep());
+ }
- NumEntries = 0;
+ const BucketT *getBuckets() const {
+ return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+ }
+ BucketT *getBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getBuckets());
+ }
+ unsigned getNumBuckets() const {
+ return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+ }
+
+ void deallocateBuckets() {
+ if (Small)
+ return;
+
+ operator delete(getLargeRep()->Buckets);
+ getLargeRep()->~LargeRep();
+ }
+
+ LargeRep allocateBuckets(unsigned Num) {
+ assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+ LargeRep Rep = {
+ static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
+};
+ return Rep;
}
};
template<typename KeyT, typename ValueT,
- typename KeyInfoT, typename ValueInfoT, bool IsConst>
+ typename KeyInfoT, bool IsConst>
class DenseMapIterator {
typedef std::pair<KeyT, ValueT> Bucket;
typedef DenseMapIterator<KeyT, ValueT,
- KeyInfoT, ValueInfoT, true> ConstIterator;
- friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, ValueInfoT, true>;
+ KeyInfoT, true> ConstIterator;
+ friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
public:
typedef ptrdiff_t difference_type;
typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
public:
DenseMapIterator() : Ptr(0), End(0) {}
- DenseMapIterator(pointer Pos, pointer E) : Ptr(Pos), End(E) {
- AdvancePastEmptyBuckets();
+ DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
+ : Ptr(Pos), End(E) {
+ if (!NoAdvance) AdvancePastEmptyBuckets();
}
// If IsConst is true this is a converting constructor from iterator to
// const_iterator and the default copy constructor is used.
// Otherwise this is a copy constructor for iterator.
DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
- KeyInfoT, ValueInfoT, false>& I)
+ KeyInfoT, false>& I)
: Ptr(I.Ptr), End(I.End) {}
reference operator*() const {
--- /dev/null
+//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DenseMapInfo traits for DenseMap.
+//
+//===----------------------------------------------------------------------===//
+
+// Taken from llvmCore-3425.0.31.
+
+#ifndef LLVM_ADT_DENSEMAPINFO_H
+#define LLVM_ADT_DENSEMAPINFO_H
+
+#include "objc-private.h"
+#include "llvm-type_traits.h"
+
+namespace objc {
+
+template<typename T>
+struct DenseMapInfo {
+ //static inline T getEmptyKey();
+ //static inline T getTombstoneKey();
+ //static unsigned getHashValue(const T &Val);
+ //static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers.
+template<typename T>
+struct DenseMapInfo<T*> {
+ static inline T* getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ return reinterpret_cast<T*>(Val);
+ }
+ static inline T* getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ return reinterpret_cast<T*>(Val);
+ }
+ static unsigned getHashValue(const T *PtrVal) {
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+ static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for cstrings.
+template<> struct DenseMapInfo<const char*> {
+ static inline const char* getEmptyKey() {
+ return reinterpret_cast<const char *>((intptr_t)-1);
+ }
+ static inline const char* getTombstoneKey() {
+ return reinterpret_cast<const char *>((intptr_t)-2);
+ }
+ static unsigned getHashValue(const char* const &Val) {
+ return _objc_strhash(Val);
+ }
+ static bool isEqual(const char* const &LHS, const char* const &RHS) {
+ return 0 == strcmp(LHS, RHS);
+ }
+};
+
+// Provide DenseMapInfo for chars.
+template<> struct DenseMapInfo<char> {
+ static inline char getEmptyKey() { return ~0; }
+ static inline char getTombstoneKey() { return ~0 - 1; }
+ static unsigned getHashValue(const char& Val) { return Val * 37U; }
+ static bool isEqual(const char &LHS, const char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template<> struct DenseMapInfo<unsigned> {
+ static inline unsigned getEmptyKey() { return ~0U; }
+ static inline unsigned getTombstoneKey() { return ~0U - 1; }
+ static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
+ static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template<> struct DenseMapInfo<unsigned long> {
+ static inline unsigned long getEmptyKey() { return ~0UL; }
+ static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
+ static unsigned getHashValue(const unsigned long& Val) {
+ return (unsigned)(Val * 37UL);
+ }
+ static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template<> struct DenseMapInfo<unsigned long long> {
+ static inline unsigned long long getEmptyKey() { return ~0ULL; }
+ static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+ static unsigned getHashValue(const unsigned long long& Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+ static bool isEqual(const unsigned long long& LHS,
+ const unsigned long long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for ints.
+template<> struct DenseMapInfo<int> {
+ static inline int getEmptyKey() { return 0x7fffffff; }
+ static inline int getTombstoneKey() { return -0x7fffffff - 1; }
+ static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
+ static bool isEqual(const int& LHS, const int& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+ static inline long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+ }
+ static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+ static unsigned getHashValue(const long& Val) {
+ return (unsigned)(Val * 37UL);
+ }
+ static bool isEqual(const long& LHS, const long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for long longs.
+template<> struct DenseMapInfo<long long> {
+ static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+ static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
+ static unsigned getHashValue(const long long& Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+ static bool isEqual(const long long& LHS,
+ const long long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template<typename T, typename U>
+struct DenseMapInfo<std::pair<T, U> > {
+ typedef std::pair<T, U> Pair;
+ typedef DenseMapInfo<T> FirstInfo;
+ typedef DenseMapInfo<U> SecondInfo;
+
+ static inline Pair getEmptyKey() {
+ return std::make_pair(FirstInfo::getEmptyKey(),
+ SecondInfo::getEmptyKey());
+ }
+ static inline Pair getTombstoneKey() {
+ return std::make_pair(FirstInfo::getTombstoneKey(),
+ SecondInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue(const Pair& PairVal) {
+ uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
+ | (uint64_t)SecondInfo::getHashValue(PairVal.second);
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return (unsigned)key;
+ }
+ static bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return FirstInfo::isEqual(LHS.first, RHS.first) &&
+ SecondInfo::isEqual(LHS.second, RHS.second);
+ }
+};
+
+} // end namespace objc
+
+#endif
--- /dev/null
+//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some functions that are useful for math stuff.
+//
+//===----------------------------------------------------------------------===//
+
+// Taken from llvmCore-3425.0.31.
+
+#ifndef LLVM_SUPPORT_MATHEXTRAS_H
+#define LLVM_SUPPORT_MATHEXTRAS_H
+
+namespace objc {
+
+// NOTE: The following support functions use the _32/_64 extensions instead of
+// type overloading so that signed and unsigned integers can be used without
+// ambiguity.
+
+/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
+inline uint32_t Hi_32(uint64_t Value) {
+ return static_cast<uint32_t>(Value >> 32);
+}
+
+/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
+inline uint32_t Lo_32(uint64_t Value) {
+ return static_cast<uint32_t>(Value);
+}
+
+/// isInt - Checks if an integer fits into the given bit width.
+template<unsigned N>
+inline bool isInt(int64_t x) {
+ return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
+}
+// Template specializations to get better code for common cases.
+template<>
+inline bool isInt<8>(int64_t x) {
+ return static_cast<int8_t>(x) == x;
+}
+template<>
+inline bool isInt<16>(int64_t x) {
+ return static_cast<int16_t>(x) == x;
+}
+template<>
+inline bool isInt<32>(int64_t x) {
+ return static_cast<int32_t>(x) == x;
+}
+
+/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedInt(int64_t x) {
+ return isInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
+/// isUInt - Checks if an unsigned integer fits into the given bit width.
+template<unsigned N>
+inline bool isUInt(uint64_t x) {
+ return N >= 64 || x < (UINT64_C(1)<<N);
+}
+// Template specializations to get better code for common cases.
+template<>
+inline bool isUInt<8>(uint64_t x) {
+ return static_cast<uint8_t>(x) == x;
+}
+template<>
+inline bool isUInt<16>(uint64_t x) {
+ return static_cast<uint16_t>(x) == x;
+}
+template<>
+inline bool isUInt<32>(uint64_t x) {
+ return static_cast<uint32_t>(x) == x;
+}
+
+/// isShiftedUInt<N,S> - Checks if a unsigned integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedUInt(uint64_t x) {
+ return isUInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
+/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
+/// bit width.
+inline bool isUIntN(unsigned N, uint64_t x) {
+ return x == (x & (~0ULL >> (64 - N)));
+}
+
+/// isIntN - Checks if an signed integer fits into the given (dynamic)
+/// bit width.
+inline bool isIntN(unsigned N, int64_t x) {
+ return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
+}
+
+/// isMask_32 - This function returns true if the argument is a sequence of ones
+/// starting at the least significant bit with the remainder zero (32 bit
+/// version). Ex. isMask_32(0x0000FFFFU) == true.
+inline bool isMask_32(uint32_t Value) {
+ return Value && ((Value + 1) & Value) == 0;
+}
+
+/// isMask_64 - This function returns true if the argument is a sequence of ones
+/// starting at the least significant bit with the remainder zero (64 bit
+/// version).
+inline bool isMask_64(uint64_t Value) {
+ return Value && ((Value + 1) & Value) == 0;
+}
+
+/// isShiftedMask_32 - This function returns true if the argument contains a
+/// sequence of ones with the remainder zero (32 bit version.)
+/// Ex. isShiftedMask_32(0x0000FF00U) == true.
+inline bool isShiftedMask_32(uint32_t Value) {
+ return isMask_32((Value - 1) | Value);
+}
+
+/// isShiftedMask_64 - This function returns true if the argument contains a
+/// sequence of ones with the remainder zero (64 bit version.)
+inline bool isShiftedMask_64(uint64_t Value) {
+ return isMask_64((Value - 1) | Value);
+}
+
+/// isPowerOf2_32 - This function returns true if the argument is a power of
+/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
+inline bool isPowerOf2_32(uint32_t Value) {
+ return Value && !(Value & (Value - 1));
+}
+
+/// isPowerOf2_64 - This function returns true if the argument is a power of two
+/// > 0 (64 bit edition.)
+inline bool isPowerOf2_64(uint64_t Value) {
+ return Value && !(Value & (Value - int64_t(1L)));
+}
+
+/// CountLeadingZeros_32 - this function performs the platform optimal form of
+/// counting the number of zeros from the most significant bit to the first one
+/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8.
+/// Returns 32 if the word is zero.
+inline unsigned CountLeadingZeros_32(uint32_t Value) {
+ unsigned Count; // result
+#if __GNUC__ >= 4
+ // PowerPC is defined for __builtin_clz(0)
+#if !defined(__ppc__) && !defined(__ppc64__)
+ if (!Value) return 32;
+#endif
+ Count = __builtin_clz(Value);
+#else
+ if (!Value) return 32;
+ Count = 0;
+ // bisection method for count leading zeros
+ for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) {
+ uint32_t Tmp = Value >> Shift;
+ if (Tmp) {
+ Value = Tmp;
+ } else {
+ Count |= Shift;
+ }
+ }
+#endif
+ return Count;
+}
+
+/// CountLeadingOnes_32 - this function performs the operation of
+/// counting the number of ones from the most significant bit to the first zero
+/// bit. Ex. CountLeadingOnes_32(0xFF0FFF00) == 8.
+/// Returns 32 if the word is all ones.
+inline unsigned CountLeadingOnes_32(uint32_t Value) {
+ return CountLeadingZeros_32(~Value);
+}
+
+/// CountLeadingZeros_64 - This function performs the platform optimal form
+/// of counting the number of zeros from the most significant bit to the first
+/// one bit (64 bit edition.)
+/// Returns 64 if the word is zero.
+inline unsigned CountLeadingZeros_64(uint64_t Value) {
+ unsigned Count; // result
+#if __GNUC__ >= 4
+ // PowerPC is defined for __builtin_clzll(0)
+#if !defined(__ppc__) && !defined(__ppc64__)
+ if (!Value) return 64;
+#endif
+ Count = __builtin_clzll(Value);
+#else
+ if (sizeof(long) == sizeof(int64_t)) {
+ if (!Value) return 64;
+ Count = 0;
+ // bisection method for count leading zeros
+ for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) {
+ uint64_t Tmp = Value >> Shift;
+ if (Tmp) {
+ Value = Tmp;
+ } else {
+ Count |= Shift;
+ }
+ }
+ } else {
+ // get hi portion
+ uint32_t Hi = Hi_32(Value);
+
+ // if some bits in hi portion
+ if (Hi) {
+ // leading zeros in hi portion plus all bits in lo portion
+ Count = CountLeadingZeros_32(Hi);
+ } else {
+ // get lo portion
+ uint32_t Lo = Lo_32(Value);
+ // same as 32 bit value
+ Count = CountLeadingZeros_32(Lo)+32;
+ }
+ }
+#endif
+ return Count;
+}
+
+/// CountLeadingOnes_64 - This function performs the operation
+/// of counting the number of ones from the most significant bit to the first
+/// zero bit (64 bit edition.)
+/// Returns 64 if the word is all ones.
+inline unsigned CountLeadingOnes_64(uint64_t Value) {
+ return CountLeadingZeros_64(~Value);
+}
+
+/// CountTrailingZeros_32 - this function performs the platform optimal form of
+/// counting the number of zeros from the least significant bit to the first one
+/// bit. Ex. CountTrailingZeros_32(0xFF00FF00) == 8.
+/// Returns 32 if the word is zero.
+inline unsigned CountTrailingZeros_32(uint32_t Value) {
+#if __GNUC__ >= 4
+ return Value ? __builtin_ctz(Value) : 32;
+#else
+ static const unsigned Mod37BitPosition[] = {
+ 32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13,
+ 4, 7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9,
+ 5, 20, 8, 19, 18
+ };
+ return Mod37BitPosition[(-Value & Value) % 37];
+#endif
+}
+
+/// CountTrailingOnes_32 - this function performs the operation of
+/// counting the number of ones from the least significant bit to the first zero
+/// bit. Ex. CountTrailingOnes_32(0x00FF00FF) == 8.
+/// Returns 32 if the word is all ones.
+inline unsigned CountTrailingOnes_32(uint32_t Value) {
+ return CountTrailingZeros_32(~Value);
+}
+
+/// CountTrailingZeros_64 - This function performs the platform optimal form
+/// of counting the number of zeros from the least significant bit to the first
+/// one bit (64 bit edition.)
+/// Returns 64 if the word is zero.
+inline unsigned CountTrailingZeros_64(uint64_t Value) {
+#if __GNUC__ >= 4
+ return Value ? __builtin_ctzll(Value) : 64;
+#else
+ static const unsigned Mod67Position[] = {
+ 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54,
+ 4, 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55,
+ 47, 5, 32, 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27,
+ 29, 50, 43, 46, 31, 37, 21, 57, 52, 8, 26, 49, 45, 36, 56,
+ 7, 48, 35, 6, 34, 33, 0
+ };
+ return Mod67Position[(-Value & Value) % 67];
+#endif
+}
+
+/// CountTrailingOnes_64 - This function performs the operation
+/// of counting the number of ones from the least significant bit to the first
+/// zero bit (64 bit edition.)
+/// Returns 64 if the word is all ones.
+inline unsigned CountTrailingOnes_64(uint64_t Value) {
+ return CountTrailingZeros_64(~Value);
+}
+
+/// CountPopulation_32 - this function counts the number of set bits in a value.
+/// Ex. CountPopulation(0xF000F000) = 8
+/// Returns 0 if the word is zero.
+inline unsigned CountPopulation_32(uint32_t Value) {
+#if __GNUC__ >= 4
+ return __builtin_popcount(Value);
+#else
+ uint32_t v = Value - ((Value >> 1) & 0x55555555);
+ v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
+ return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
+#endif
+}
+
+/// CountPopulation_64 - this function counts the number of set bits in a value,
+/// (64 bit edition.)
+inline unsigned CountPopulation_64(uint64_t Value) {
+#if __GNUC__ >= 4
+ return __builtin_popcountll(Value);
+#else
+ uint64_t v = Value - ((Value >> 1) & 0x5555555555555555ULL);
+ v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
+ v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+ return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
+#endif
+}
+
+/// Log2_32 - This function returns the floor log base 2 of the specified value,
+/// -1 if the value is zero. (32 bit edition.)
+/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
+inline unsigned Log2_32(uint32_t Value) {
+ return 31 - CountLeadingZeros_32(Value);
+}
+
+/// Log2_64 - This function returns the floor log base 2 of the specified value,
+/// -1 if the value is zero. (64 bit edition.)
+inline unsigned Log2_64(uint64_t Value) {
+ return 63 - CountLeadingZeros_64(Value);
+}
+
+/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
+/// value, 32 if the value is zero. (32 bit edition).
+/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
+inline unsigned Log2_32_Ceil(uint32_t Value) {
+ return 32-CountLeadingZeros_32(Value-1);
+}
+
+/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
+/// value, 64 if the value is zero. (64 bit edition.)
+inline unsigned Log2_64_Ceil(uint64_t Value) {
+ return 64-CountLeadingZeros_64(Value-1);
+}
+
+/// GreatestCommonDivisor64 - Return the greatest common divisor of the two
+/// values using Euclid's algorithm.
+inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
+ while (B) {
+ uint64_t T = B;
+ B = A % B;
+ A = T;
+ }
+ return A;
+}
+
+/// BitsToDouble - This function takes a 64-bit integer and returns the bit
+/// equivalent double.
+inline double BitsToDouble(uint64_t Bits) {
+ union {
+ uint64_t L;
+ double D;
+ } T;
+ T.L = Bits;
+ return T.D;
+}
+
+/// BitsToFloat - This function takes a 32-bit integer and returns the bit
+/// equivalent float.
+inline float BitsToFloat(uint32_t Bits) {
+ union {
+ uint32_t I;
+ float F;
+ } T;
+ T.I = Bits;
+ return T.F;
+}
+
+/// DoubleToBits - This function takes a double and returns the bit
+/// equivalent 64-bit integer. Note that copying doubles around
+/// changes the bits of NaNs on some hosts, notably x86, so this
+/// routine cannot be used if these bits are needed.
+inline uint64_t DoubleToBits(double Double) {
+ union {
+ uint64_t L;
+ double D;
+ } T;
+ T.D = Double;
+ return T.L;
+}
+
+/// FloatToBits - This function takes a float and returns the bit
+/// equivalent 32-bit integer. Note that copying floats around
+/// changes the bits of NaNs on some hosts, notably x86, so this
+/// routine cannot be used if these bits are needed.
+inline uint32_t FloatToBits(float Float) {
+ union {
+ uint32_t I;
+ float F;
+ } T;
+ T.F = Float;
+ return T.I;
+}
+
+/// Platform-independent wrappers for the C99 isnan() function.
+int IsNAN(float f);
+int IsNAN(double d);
+
+/// Platform-independent wrappers for the C99 isinf() function.
+int IsInf(float f);
+int IsInf(double d);
+
+/// MinAlign - A and B are either alignments or offsets. Return the minimum
+/// alignment that may be assumed after adding the two together.
+inline uint64_t MinAlign(uint64_t A, uint64_t B) {
+ // The largest power of 2 that divides both A and B.
+ return (A | B) & -(A | B);
+}
+
+/// NextPowerOf2 - Returns the next power of two (in 64-bits)
+/// that is strictly greater than A. Returns zero on overflow.
+inline uint64_t NextPowerOf2(uint64_t A) {
+ A |= (A >> 1);
+ A |= (A >> 2);
+ A |= (A >> 4);
+ A |= (A >> 8);
+ A |= (A >> 16);
+ A |= (A >> 32);
+ return A + 1;
+}
+
+/// NextPowerOf2 - Returns the next power of two (in 32-bits)
+/// that is strictly greater than A. Returns zero on overflow.
+inline uint32_t NextPowerOf2(uint32_t A) {
+ A |= (A >> 1);
+ A |= (A >> 2);
+ A |= (A >> 4);
+ A |= (A >> 8);
+ A |= (A >> 16);
+ return A + 1;
+}
+
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
+///
+/// Examples:
+/// \code
+/// RoundUpToAlignment(5, 8) = 8
+/// RoundUpToAlignment(17, 8) = 24
+/// RoundUpToAlignment(~0LL, 8) = 0
+/// \endcode
+inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
+ return ((Value + Align - 1) / Align) * Align;
+}
+
+/// Returns the offset to the next integer (mod 2**64) that is greater than
+/// or equal to \p Value and is a multiple of \p Align. \p Align must be
+/// non-zero.
+inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
+ return RoundUpToAlignment(Value, Align) - Value;
+}
+
+/// abs64 - absolute value of a 64-bit int. Not all environments support
+/// "abs" on whatever their name for the 64-bit int type is. The absolute
+/// value of the largest negative number is undefined, as with "abs".
+inline int64_t abs64(int64_t x) {
+ return (x < 0) ? -x : x;
+}
+
+/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
+/// Usage int32_t r = SignExtend32<5>(x);
+template <unsigned B> inline int32_t SignExtend32(uint32_t x) {
+ return int32_t(x << (32 - B)) >> (32 - B);
+}
+
+/// \brief Sign extend number in the bottom B bits of X to a 32-bit int.
+/// Requires 0 < B <= 32.
+inline int32_t SignExtend32(uint32_t X, unsigned B) {
+ return int32_t(X << (32 - B)) >> (32 - B);
+}
+
+/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
+/// Usage int64_t r = SignExtend64<5>(x);
+template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
+ return int64_t(x << (64 - B)) >> (64 - B);
+}
+
+/// \brief Sign extend number in the bottom B bits of X to a 64-bit int.
+/// Requires 0 < B <= 64.
+inline int64_t SignExtend64(uint64_t X, unsigned B) {
+ return int64_t(X << (64 - B)) >> (64 - B);
+}
+
+} // End llvm namespace
+
+#endif
//
//===----------------------------------------------------------------------===//
+// Taken from llvmCore-3425.0.31.
+
#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
#define LLVM_SUPPORT_TYPE_TRAITS_H
+#include <cstddef>
#include <utility>
+#ifndef __has_feature
+#define LLVM_DEFINED_HAS_FEATURE
+#define __has_feature(x) 0
+#endif
+
// This is actually the conforming implementation which works with abstract
// classes. However, enough compilers have trouble with it that most will use
// the one in boost/type_traits/object_traits.hpp. This implementation actually
// is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For
// more details:
// http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1
- public:
- enum { value = sizeof(char) == sizeof(dont_use::is_class_helper<T>(0)) };
+public:
+ static const bool value =
+ sizeof(char) == sizeof(dont_use::is_class_helper<T>(0));
};
/// type can be copied around with memcpy instead of running ctors etc.
template <typename T>
struct isPodLike {
+#if __has_feature(is_trivially_copyable)
+ // If the compiler supports the is_trivially_copyable trait use it, as it
+ // matches the definition of isPodLike closely.
+ static const bool value = __is_trivially_copyable(T);
+#else
// If we don't know anything else, we can (at least) assume that all non-class
// types are PODs.
static const bool value = !is_class<T>::value;
+#endif
};
// std::pair's are pod-like if their elements are.
template<typename T, typename U>
struct isPodLike<std::pair<T, U> > {
- static const bool value = isPodLike<T>::value & isPodLike<U>::value;
+ static const bool value = isPodLike<T>::value && isPodLike<U>::value;
};
+template <class T, T v>
+struct integral_constant {
+ typedef T value_type;
+ static const value_type value = v;
+ typedef integral_constant<T,v> type;
+ operator value_type() { return value; }
+};
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
/// \brief Metafunction that determines whether the two given types are
/// equivalent.
-template<typename T, typename U>
-struct is_same {
- static const bool value = false;
+template<typename T, typename U> struct is_same : public false_type {};
+template<typename T> struct is_same<T, T> : public true_type {};
+
+/// \brief Metafunction that removes const qualification from a type.
+template <typename T> struct remove_const { typedef T type; };
+template <typename T> struct remove_const<const T> { typedef T type; };
+
+/// \brief Metafunction that removes volatile qualification from a type.
+template <typename T> struct remove_volatile { typedef T type; };
+template <typename T> struct remove_volatile<volatile T> { typedef T type; };
+
+/// \brief Metafunction that removes both const and volatile qualification from
+/// a type.
+template <typename T> struct remove_cv {
+ typedef typename remove_const<typename remove_volatile<T>::type>::type type;
};
-template<typename T>
-struct is_same<T, T> {
- static const bool value = true;
+/// \brief Helper to implement is_integral metafunction.
+template <typename T> struct is_integral_impl : false_type {};
+template <> struct is_integral_impl< bool> : true_type {};
+template <> struct is_integral_impl< char> : true_type {};
+template <> struct is_integral_impl< signed char> : true_type {};
+template <> struct is_integral_impl<unsigned char> : true_type {};
+template <> struct is_integral_impl< wchar_t> : true_type {};
+template <> struct is_integral_impl< short> : true_type {};
+template <> struct is_integral_impl<unsigned short> : true_type {};
+template <> struct is_integral_impl< int> : true_type {};
+template <> struct is_integral_impl<unsigned int> : true_type {};
+template <> struct is_integral_impl< long> : true_type {};
+template <> struct is_integral_impl<unsigned long> : true_type {};
+template <> struct is_integral_impl< long long> : true_type {};
+template <> struct is_integral_impl<unsigned long long> : true_type {};
+
+/// \brief Metafunction that determines whether the given type is an integral
+/// type.
+template <typename T>
+struct is_integral : is_integral_impl<T> {};
+
+/// \brief Metafunction to remove reference from a type.
+template <typename T> struct remove_reference { typedef T type; };
+template <typename T> struct remove_reference<T&> { typedef T type; };
+
+/// \brief Metafunction that determines whether the given type is a pointer
+/// type.
+template <typename T> struct is_pointer : false_type {};
+template <typename T> struct is_pointer<T*> : true_type {};
+template <typename T> struct is_pointer<T* const> : true_type {};
+template <typename T> struct is_pointer<T* volatile> : true_type {};
+template <typename T> struct is_pointer<T* const volatile> : true_type {};
+
+/// \brief Metafunction that determines whether the given type is either an
+/// integral type or an enumeration type.
+///
+/// Note that this accepts potentially more integral types than we whitelist
+/// above for is_integral because it is based on merely being convertible
+/// implicitly to an integral type.
+template <typename T> class is_integral_or_enum {
+ // Provide an overload which can be called with anything implicitly
+ // convertible to an unsigned long long. This should catch integer types and
+ // enumeration types at least. We blacklist classes with conversion operators
+ // below.
+ static double check_int_convertible(unsigned long long);
+ static char check_int_convertible(...);
+
+ typedef typename remove_reference<T>::type UnderlyingT;
+ static UnderlyingT &nonce_instance;
+
+public:
+ static const bool
+ value = (!is_class<UnderlyingT>::value && !is_pointer<UnderlyingT>::value &&
+ !is_same<UnderlyingT, float>::value &&
+ !is_same<UnderlyingT, double>::value &&
+ sizeof(char) != sizeof(check_int_convertible(nonce_instance)));
};
-
+
// enable_if_c - Enable/disable a template based on a metafunction
template<bool Cond, typename T = void>
struct enable_if_c {
}
+#ifdef LLVM_DEFINED_HAS_FEATURE
+#undef __has_feature
+#endif
+
#endif
static unsigned _mapPtrHash(NXMapTable *table, const void *key) {
#ifdef __LP64__
- return ((uintptr_t)key) >> 3;
+ return (unsigned)(((uintptr_t)key) >> 3);
#else
return ((uintptr_t)key) >> 2;
#endif
#ifndef OBJC_SUPER
#define OBJC_SUPER
+
+/// Specifies the superclass of an instance.
struct objc_super {
+ /// Specifies an instance of a class.
__unsafe_unretained id receiver;
+
+ /// Specifies the particular superclass of the instance to message.
#if !defined(__cplusplus) && !__OBJC2__
- __unsafe_unretained Class class; /* For compatibility with old objc-runtime.h header */
+ /* For compatibility with old objc-runtime.h header */
+ __unsafe_unretained Class class;
#else
__unsafe_unretained Class super_class;
#endif
OBJC_EXPORT void objc_msgSendSuper(void /* struct objc_super *super, SEL op, ... */ )
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
#else
+/**
+ * Sends a message with a simple return value to an instance of a class.
+ *
+ * @param self A pointer to the instance of the class that is to receive the message.
+ * @param op The selector of the method that handles the message.
+ * @param ...
+ * A variable argument list containing the arguments to the method.
+ *
+ * @return The return value of the method.
+ *
+ * @note When it encounters a method call, the compiler generates a call to one of the
+ * functions \c objc_msgSend, \c objc_msgSend_stret, \c objc_msgSendSuper, or \c objc_msgSendSuper_stret.
+ * Messages sent to an object’s superclass (using the \c super keyword) are sent using \c objc_msgSendSuper;
+ * other messages are sent using \c objc_msgSend. Methods that have data structures as return values
+ * are sent using \c objc_msgSendSuper_stret and \c objc_msgSend_stret.
+ */
OBJC_EXPORT id objc_msgSend(id self, SEL op, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+/**
+ * Sends a message with a simple return value to the superclass of an instance of a class.
+ *
+ * @param super A pointer to an \c objc_super data structure. Pass values identifying the
+ * context the message was sent to, including the instance of the class that is to receive the
+ * message and the superclass at which to start searching for the method implementation.
+ * @param op A pointer of type SEL. Pass the selector of the method that will handle the message.
+ * @param ...
+ * A variable argument list containing the arguments to the method.
+ *
+ * @return The return value of the method identified by \e op.
+ *
+ * @see objc_msgSend
+ */
OBJC_EXPORT id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
#endif
OBJC_EXPORT void objc_msgSendSuper_stret(void /* struct objc_super *super, SEL op, ... */ )
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
#else
+/**
+ * Sends a message with a data-structure return value to an instance of a class.
+ *
+ * @see objc_msgSend
+ */
OBJC_EXPORT void objc_msgSend_stret(id self, SEL op, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+/**
+ * Sends a message with a data-structure return value to the superclass of an instance of a class.
+ *
+ * @see objc_msgSendSuper
+ */
OBJC_EXPORT void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
#endif
// OBJC_OLD_DISPATCH_PROTOTYPES
# if defined(__i386__)
+/**
+ * Sends a message with a floating-point return value to an instance of a class.
+ *
+ * @see objc_msgSend
+ * @note On the i386 platform, the ABI for functions returning a floating-point value is
+ * incompatible with that for functions returning an integral type. On the i386 platform, therefore,
+ * you must use \c objc_msgSend_fpret for functions returning non-integral type. For \c float or
+ * \c long \c double return types, cast the function to an appropriate function pointer type first.
+ */
OBJC_EXPORT double objc_msgSend_fpret(id self, SEL op, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0);
/* See also objc_msgSendv_fpret() below. */
# elif defined(__x86_64__)
-
+/**
+ * Sends a message with a floating-point return value to an instance of a class.
+ *
+ * @see objc_msgSend
+ */
OBJC_EXPORT long double objc_msgSend_fpret(id self, SEL op, ...)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
#if __OBJC2__ && defined(__x86_64__)
// objc_msgSend_fixup() is used for vtable-dispatchable call sites.
-OBJC_EXPORT id objc_msgSend_fixup(id self, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-OBJC_EXPORT void objc_msgSend_stret_fixup(id self, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-OBJC_EXPORT id objc_msgSendSuper2_fixup(struct objc_super *super, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-OBJC_EXPORT void objc_msgSendSuper2_stret_fixup(struct objc_super *super, SEL op,...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-OBJC_EXPORT long double objc_msgSend_fpret_fixup(id self, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-# if __STDC_VERSION__ >= 199901L
-OBJC_EXPORT _Complex long double objc_msgSend_fp2ret_fixup(id self, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-# else
-OBJC_EXPORT void objc_msgSend_fp2ret_fixup(id self, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_NA);
-# endif
+OBJC_EXPORT void objc_msgSend_fixup(void)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA);
+OBJC_EXPORT void objc_msgSend_stret_fixup(void)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA);
+OBJC_EXPORT void objc_msgSendSuper2_fixup(void)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA);
+OBJC_EXPORT void objc_msgSendSuper2_stret_fixup(void)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA);
+OBJC_EXPORT void objc_msgSend_fpret_fixup(void)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA);
+OBJC_EXPORT void objc_msgSend_fp2ret_fixup(void)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_8, __IPHONE_NA, __IPHONE_NA);
#endif
/* C++-compatible exception handling. */
# define __has_extension __has_feature
#endif
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
/*
* OBJC_API_VERSION 0 or undef: Tiger and earlier API only
#endif
+/* OBJC_ISA_AVAILABILITY: `isa` will be deprecated or unavailable
+ * in the future */
+#if !defined(OBJC_ISA_AVAILABILITY)
+# define OBJC_ISA_AVAILABILITY /* still available */
+#endif
+
+
/* OBJC2_UNAVAILABLE: unavailable in objc 2.0, deprecated in Leopard */
#if !defined(OBJC2_UNAVAILABLE)
# if __OBJC2__
# define OBJC2_UNAVAILABLE UNAVAILABLE_ATTRIBUTE
# else
-# define OBJC2_UNAVAILABLE DEPRECATED_IN_MAC_OS_X_VERSION_10_5_AND_LATER
+ /* plain C code also falls here, but this is close enough */
+# define OBJC2_UNAVAILABLE __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_5, __IPHONE_2_0,__IPHONE_2_0)
# endif
#endif
/* OBJC_ARC_UNAVAILABLE: unavailable with -fobjc-arc */
#if !defined(OBJC_ARC_UNAVAILABLE)
-# if __has_feature(objc_arr)
+# if __has_feature(objc_arc)
# if __has_extension(attribute_unavailable_with_message)
# define OBJC_ARC_UNAVAILABLE __attribute__((unavailable("not available in automatic reference counting mode")))
# else
# define OBJC_IMPORT extern
#endif
+#if !defined(OBJC_ROOT_CLASS)
+# if __has_attribute(objc_root_class)
+# define OBJC_ROOT_CLASS __attribute__((objc_root_class))
+# else
+# define OBJC_ROOT_CLASS
+# endif
+#endif
+
#ifndef __DARWIN_NULL
#define __DARWIN_NULL NULL
#endif
+++ /dev/null
-/*
- * Copyright (c) 2008 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include "objc-auto.h"
-
-#ifndef OBJC_NO_GC
-
-#include <auto_zone.h>
-#include <objc/objc.h>
-#include <objc/runtime.h>
-#include "objc-auto-dump.h"
-#include "objc-private.h"
-#include <strings.h>
-
-/*
- * Utilities
- */
-
-static char myType() {
- char type = 0;
- if (sizeof(void *) == 8) type |= SixtyFour;
-#if __LITTLE_ENDIAN__
- type |= Little;
-#endif
- return type;
-}
-
-/*
- * Sigh, a mutable set.
- */
-
-typedef struct {
- long *items;
- long count;
- long capacity;
-} pointer_set_t;
-
-static pointer_set_t *new_pointer_set() {
- pointer_set_t *result = malloc_zone_malloc(_objc_internal_zone(), sizeof(pointer_set_t));
- result->items = calloc(64, sizeof(long));
- result->count = 0;
- result->capacity = 63; // last valid ptr, also mask
- return result;
-}
-
-static void pointer_set_grow(pointer_set_t *set);
-
-static void pointer_set_add(pointer_set_t *set, long ptr) {
- long hash = ptr & set->capacity;
- while (1) {
- if (!set->items[hash]) {
- set->items[hash] = ptr;
- ++set->count;
- if (set->count*3 > set->capacity*2)
- pointer_set_grow(set);
- return;
- }
- if (set->items[hash] == ptr) return;
- hash = (hash + 1) & set->capacity;
- }
-}
-
-static void pointer_set_grow(pointer_set_t *set) {
- long oldCapacity = set->capacity;
- long *oldItems = set->items;
- long i;
- set->count = 0;
- set->capacity = 2*(oldCapacity+1)-1;
- set->items = malloc_zone_calloc(_objc_internal_zone(), 2*(oldCapacity+1), sizeof(long));
- for (i = 0; i < oldCapacity; ++i)
- if (oldItems[i]) pointer_set_add(set, oldItems[i]);
- free(oldItems);
-}
-
-static void pointer_set_iterate(pointer_set_t *set, void (^block)(long item)) {
- long i;
- for (i = 0; i < set->capacity; ++i)
- if (set->items[i]) block(set->items[i]);
-}
-
-static void pointer_set_dispose(pointer_set_t *set) {
- free(set->items);
- free(set);
-}
-
-/*
- Quickly dump heap to a named file in a pretty raw format.
- */
-BOOL _objc_dumpHeap(auto_zone_t *zone, const char *filename) {
- // just write interesting info to disk
- int fd = secure_open(filename, O_WRONLY|O_CREAT, geteuid());
- if (fd < 0) return NO;
- FILE *fp = fdopen(fd, "w");
- if (fp == NULL) {
- return NO;
- }
-
- fwrite(HEADER, strlen(HEADER), 1, fp);
- char type2 = myType();
- fwrite(&type2, 1, 1, fp);
-
- // for each thread...
-
- // do registers first
- auto_zone_register_dump dump_registers = ^(const void *base, unsigned long byte_size) {
- char type = REGISTER;
- fwrite(&type, 1, 1, fp);
- //fwrite(REGISTER, strlen(REGISTER), 1, fp);
- fwrite(&byte_size, sizeof(byte_size), 1, fp);
- fwrite(base, byte_size, 1, fp);
- };
-
- // then stacks
- auto_zone_stack_dump dump_stack = ^(const void *base, unsigned long byte_size) {
- char type = THREAD;
- fwrite(&type, 1, 1, fp);
- //fwrite(THREAD, strlen(THREAD), 1, fp);
- fwrite(&byte_size, sizeof(byte_size), 1, fp);
- fwrite(base, byte_size, 1, fp);
- };
-
- // then locals
- void (^dump_local)(const void *, unsigned long, unsigned int, unsigned long) =
- ^(const void *address, unsigned long size, unsigned int layout, unsigned long refcount) {
- // just write the value - rely on it showing up again as a node later
- char type = LOCAL;
- fwrite(&type, 1, 1, fp);
- fwrite(&address, sizeof(address), 1, fp);
- };
-
-
-
- // roots
- auto_zone_root_dump dump_root = ^(const void **address) {
- char type = ROOT;
- fwrite(&type, 1, 1, fp);
- // write the address so that we can catch misregistered globals
- fwrite(&address, sizeof(address), 1, fp);
- // write content, even (?) if zero
- fwrite(address, sizeof(*address), 1, fp);
- };
-
- // the nodes
- pointer_set_t *classes = new_pointer_set();
- auto_zone_node_dump dump_node = ^(const void *address, unsigned long size, unsigned int layout, unsigned long refcount) {
- char type = NODE;
- fwrite(&type, 1, 1, fp);
- fwrite(&address, sizeof(address), 1, fp);
- fwrite(&size, sizeof(size), 1, fp);
- fwrite(&layout, sizeof(layout), 1, fp);
- fwrite(&refcount, sizeof(refcount), 1, fp);
- if ((layout & AUTO_UNSCANNED) != AUTO_UNSCANNED) {
- // now the nodes unfiltered content
- fwrite(address, size, 1, fp);
- }
- if ((layout & AUTO_OBJECT) == AUTO_OBJECT) {
- long theClass = *(long *)address;
- if (theClass) pointer_set_add(classes, theClass);
- }
- };
-
- // weak
- auto_zone_weak_dump dump_weak = ^(const void **address, const void *item) {
- char type = WEAK;
- fwrite(&type, 1, 1, fp);
- fwrite(&address, sizeof(address), 1, fp);
- fwrite(&item, sizeof(item), 1, fp);
- };
-
- auto_zone_dump(zone, dump_stack, dump_registers, dump_local, dump_root, dump_node, dump_weak);
-
- pointer_set_iterate(classes, ^(long class) {
- char type = CLASS;
- fwrite(&type, 1, 1, fp);
- fwrite(&class, sizeof(class), 1, fp); // write address so that we can map it from node isa's
- // classname (for grins)
- const char *className = class_getName((Class)class);
- unsigned int length = (int)strlen(className);
- fwrite(&length, sizeof(length), 1, fp); // n
- fwrite(className, length, 1, fp); // n bytes
- // strong layout
- const uint8_t *layout = class_getIvarLayout((Class)class);
- length = layout ? (int)strlen((char *)layout)+1 : 0; // format is <skipnibble><count nibble> ending with <0><0>
- fwrite(&length, sizeof(length), 1, fp); // n
- fwrite(layout, length, 1, fp); // n bytes
- // weak layout
- layout = class_getWeakIvarLayout((Class)class);
- length = layout ? (int)strlen((char *)layout)+1 : 0; // format is <skipnibble><count nibble> ending with <0><0>
- fwrite(&length, sizeof(length), 1, fp); // n
- fwrite(layout, length, 1, fp); // n bytes
- });
-
- {
- // end
- char type = END;
- fwrite(&type, 1, 1, fp);
- fclose(fp);
- pointer_set_dispose(classes);
- }
- return YES;
-}
-
-#endif
--- /dev/null
+/*
+ * Copyright (c) 2008 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include "objc-config.h"
+
+#if SUPPORT_GC
+
+#include "objc-private.h"
+#include "objc-auto-dump.h"
+
+#include <auto_zone.h>
+#include <objc/objc.h>
+#include <objc/runtime.h>
+#include <strings.h>
+
+/*
+ * Utilities
+ */
+
+static char myType() {
+ char type = 0;
+ if (sizeof(void *) == 8) type |= SixtyFour;
+#if __LITTLE_ENDIAN__
+ type |= Little;
+#endif
+ return type;
+}
+
+/*
+ * Sigh, a mutable set.
+ */
+
+typedef struct {
+ long *items;
+ long count;
+ long capacity;
+} pointer_set_t;
+
+static pointer_set_t *new_pointer_set() {
+ pointer_set_t *result = (pointer_set_t *)malloc_zone_malloc(_objc_internal_zone(), sizeof(pointer_set_t));
+ result->items = (long *)calloc(64, sizeof(long));
+ result->count = 0;
+ result->capacity = 63; // last valid ptr, also mask
+ return result;
+}
+
+static void pointer_set_grow(pointer_set_t *set);
+
+static void pointer_set_add(pointer_set_t *set, long ptr) {
+ long hash = ptr & set->capacity;
+ while (1) {
+ if (!set->items[hash]) {
+ set->items[hash] = ptr;
+ ++set->count;
+ if (set->count*3 > set->capacity*2)
+ pointer_set_grow(set);
+ return;
+ }
+ if (set->items[hash] == ptr) return;
+ hash = (hash + 1) & set->capacity;
+ }
+}
+
+static void pointer_set_grow(pointer_set_t *set) {
+ long oldCapacity = set->capacity;
+ long *oldItems = set->items;
+ long i;
+ set->count = 0;
+ set->capacity = 2*(oldCapacity+1)-1;
+ set->items = (long *)malloc_zone_calloc(_objc_internal_zone(), 2*(oldCapacity+1), sizeof(long));
+ for (i = 0; i < oldCapacity; ++i)
+ if (oldItems[i]) pointer_set_add(set, oldItems[i]);
+ free(oldItems);
+}
+
+static void pointer_set_iterate(pointer_set_t *set, void (^block)(long item)) {
+ long i;
+ for (i = 0; i < set->capacity; ++i)
+ if (set->items[i]) block(set->items[i]);
+}
+
+static void pointer_set_dispose(pointer_set_t *set) {
+ free(set->items);
+ free(set);
+}
+
+/*
+ Quickly dump heap to a named file in a pretty raw format.
+ */
+BOOL _objc_dumpHeap(auto_zone_t *zone, const char *filename) {
+ // just write interesting info to disk
+ int fd = secure_open(filename, O_WRONLY|O_CREAT, geteuid());
+ if (fd < 0) return NO;
+ FILE *fp = fdopen(fd, "w");
+ if (fp == NULL) {
+ return NO;
+ }
+
+ fwrite(HEADER, strlen(HEADER), 1, fp);
+ char type2 = myType();
+ fwrite(&type2, 1, 1, fp);
+
+ // for each thread...
+
+ // do registers first
+ auto_zone_register_dump dump_registers = ^(const void *base, unsigned long byte_size) {
+ char type = REGISTER;
+ fwrite(&type, 1, 1, fp);
+ //fwrite(REGISTER, strlen(REGISTER), 1, fp);
+ fwrite(&byte_size, sizeof(byte_size), 1, fp);
+ fwrite(base, byte_size, 1, fp);
+ };
+
+ // then stacks
+ auto_zone_stack_dump dump_stack = ^(const void *base, unsigned long byte_size) {
+ char type = THREAD;
+ fwrite(&type, 1, 1, fp);
+ //fwrite(THREAD, strlen(THREAD), 1, fp);
+ fwrite(&byte_size, sizeof(byte_size), 1, fp);
+ fwrite(base, byte_size, 1, fp);
+ };
+
+ // then locals
+ void (^dump_local)(const void *, unsigned long, unsigned int, unsigned long) =
+ ^(const void *address, unsigned long size, unsigned int layout, unsigned long refcount) {
+ // just write the value - rely on it showing up again as a node later
+ char type = LOCAL;
+ fwrite(&type, 1, 1, fp);
+ fwrite(&address, sizeof(address), 1, fp);
+ };
+
+
+
+ // roots
+ auto_zone_root_dump dump_root = ^(const void **address) {
+ char type = ROOT;
+ fwrite(&type, 1, 1, fp);
+ // write the address so that we can catch misregistered globals
+ fwrite(&address, sizeof(address), 1, fp);
+ // write content, even (?) if zero
+ fwrite(address, sizeof(*address), 1, fp);
+ };
+
+ // the nodes
+ pointer_set_t *classes = new_pointer_set();
+ auto_zone_node_dump dump_node = ^(const void *address, unsigned long size, unsigned int layout, unsigned long refcount) {
+ char type = NODE;
+ fwrite(&type, 1, 1, fp);
+ fwrite(&address, sizeof(address), 1, fp);
+ fwrite(&size, sizeof(size), 1, fp);
+ fwrite(&layout, sizeof(layout), 1, fp);
+ fwrite(&refcount, sizeof(refcount), 1, fp);
+ if ((layout & AUTO_UNSCANNED) != AUTO_UNSCANNED) {
+ // now the nodes unfiltered content
+ fwrite(address, size, 1, fp);
+ }
+ if ((layout & AUTO_OBJECT) == AUTO_OBJECT) {
+ long theClass = *(long *)address;
+ if (theClass) pointer_set_add(classes, theClass);
+ }
+ };
+
+ // weak
+ auto_zone_weak_dump dump_weak = ^(const void **address, const void *item) {
+ char type = WEAK;
+ fwrite(&type, 1, 1, fp);
+ fwrite(&address, sizeof(address), 1, fp);
+ fwrite(&item, sizeof(item), 1, fp);
+ };
+
+ auto_zone_dump(zone, dump_stack, dump_registers, dump_local, dump_root, dump_node, dump_weak);
+
+ pointer_set_iterate(classes, ^(long cls) {
+ char type = CLASS;
+ fwrite(&type, 1, 1, fp);
+ fwrite(&cls, sizeof(cls), 1, fp); // write address so that we can map it from node isa's
+ // classname (for grins)
+ const char *className = class_getName((Class)cls);
+ unsigned int length = (int)strlen(className);
+ fwrite(&length, sizeof(length), 1, fp); // n
+ fwrite(className, length, 1, fp); // n bytes
+ // strong layout
+ const uint8_t *layout = class_getIvarLayout((Class)cls);
+ length = layout ? (int)strlen((char *)layout)+1 : 0; // format is <skipnibble><count nibble> ending with <0><0>
+ fwrite(&length, sizeof(length), 1, fp); // n
+ fwrite(layout, length, 1, fp); // n bytes
+ // weak layout
+ layout = class_getWeakIvarLayout((Class)cls);
+ length = layout ? (int)strlen((char *)layout)+1 : 0; // format is <skipnibble><count nibble> ending with <0><0>
+ fwrite(&length, sizeof(length), 1, fp); // n
+ fwrite(layout, length, 1, fp); // n bytes
+ });
+
+ {
+ // end
+ char type = END;
+ fwrite(&type, 1, 1, fp);
+ fclose(fp);
+ pointer_set_dispose(classes);
+ }
+ return YES;
+}
+
+#endif
#include <stddef.h>
#include <string.h>
#include <Availability.h>
-#include <AvailabilityMacros.h>
#include <TargetConditionals.h>
#if !TARGET_OS_WIN32
// Deprcated. Tells runtime to issue finalize calls on the main thread only.
OBJC_EXPORT void objc_finalizeOnMainThread(Class cls)
- AVAILABLE_MAC_OS_X_VERSION_10_5_AND_LATER_BUT_DEPRECATED;
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_5, __IPHONE_NA,__IPHONE_NA);
//
/* Deprecated. Use class_createInstance() instead. */
OBJC_EXPORT id objc_allocate_object(Class cls, int extra)
- AVAILABLE_MAC_OS_X_VERSION_10_4_AND_LATER_BUT_DEPRECATED;
+__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_4,__MAC_10_4, __IPHONE_NA,__IPHONE_NA);
/* !defined(OBJC_NO_GC) */
static OBJC_INLINE void objc_setCollectionRatio(size_t ratio __unused) { }
static OBJC_INLINE void objc_startCollectorThread(void) { }
-#if __has_feature(objc_arr)
+#if __has_feature(objc_arc)
/* Covers for GC memory operations are unavailable in ARC */
+++ /dev/null
-/*
- * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include "objc-config.h"
-#include "objc-auto.h"
-#include "objc-accessors.h"
-
-#ifndef OBJC_NO_GC
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <fcntl.h>
-#include <dlfcn.h>
-#include <mach/mach.h>
-#include <mach-o/dyld.h>
-#include <mach-o/nlist.h>
-#include <sys/types.h>
-#include <sys/mman.h>
-#include <libkern/OSAtomic.h>
-#include <auto_zone.h>
-
-#include <Block_private.h>
-#include <dispatch/private.h>
-
-#include "objc-private.h"
-#include "objc-references.h"
-#include "maptable.h"
-#include "message.h"
-#include "objc-gdb.h"
-
-#if !defined(NDEBUG) && !__OBJC2__
-#include "objc-exception.h"
-#endif
-
-
-static auto_zone_t *gc_zone_init(BOOL wantsCompaction);
-static void gc_block_init(void);
-static void registeredClassTableInit(void);
-static BOOL objc_isRegisteredClass(Class candidate);
-
-BOOL UseGC = NO;
-BOOL UseCompaction = NO;
-static BOOL WantsMainThreadFinalization = NO;
-
-auto_zone_t *gc_zone = NULL;
-
-// Pointer magic to make dyld happy. See notes in objc-private.h
-id (*objc_assign_ivar_internal)(id, id, ptrdiff_t) = objc_assign_ivar;
-
-
-/* Method prototypes */
-@interface DoesNotExist
-- (const char *)UTF8String;
-- (id)description;
-@end
-
-
-/***********************************************************************
-* Break-on-error functions
-**********************************************************************/
-
-BREAKPOINT_FUNCTION(
- void objc_assign_ivar_error(id base, ptrdiff_t offset)
-);
-
-BREAKPOINT_FUNCTION(
- void objc_assign_global_error(id value, id *slot)
-);
-
-BREAKPOINT_FUNCTION(
- void objc_exception_during_finalize_error(void)
-);
-
-/***********************************************************************
-* Utility exports
-* Called by various libraries.
-**********************************************************************/
-
-OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming
- if (UseGC) {
- auto_collection_parameters(gc_zone)->collection_threshold = threshold;
- }
-}
-
-OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) {
- if (UseGC) {
- auto_collection_parameters(gc_zone)->collection_threshold = threshold;
- }
-}
-
-void objc_setCollectionRatio(size_t ratio) {
- if (UseGC) {
- auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
- }
-}
-
-void objc_set_collection_ratio(size_t ratio) { // old naming
- if (UseGC) {
- auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
- }
-}
-
-void objc_finalizeOnMainThread(Class cls) {
- if (UseGC) {
- WantsMainThreadFinalization = YES;
- _class_setFinalizeOnMainThread(cls);
- }
-}
-
-// stack based data structure queued if/when there is main-thread-only finalization work TBD
-typedef struct BatchFinalizeBlock {
- auto_zone_foreach_object_t foreach;
- auto_zone_cursor_t cursor;
- size_t cursor_size;
- volatile BOOL finished;
- volatile BOOL started;
- struct BatchFinalizeBlock *next;
-} BatchFinalizeBlock_t;
-
-// The Main Thread Finalization Work Queue Head
-static struct {
- pthread_mutex_t mutex;
- pthread_cond_t condition;
- BatchFinalizeBlock_t *head;
- BatchFinalizeBlock_t *tail;
-} MainThreadWorkQ;
-
-
-void objc_startCollectorThread(void) {
-}
-
-void objc_start_collector_thread(void) {
-}
-
-static void batchFinalizeOnMainThread(void);
-
-void objc_collect(unsigned long options) {
- if (!UseGC) return;
- BOOL onMainThread = pthread_main_np() ? YES : NO;
-
- // while we're here, sneak off and do some finalization work (if any)
- if (onMainThread) batchFinalizeOnMainThread();
- // now on with our normally scheduled programming
- auto_zone_options_t amode = AUTO_ZONE_COLLECT_NO_OPTIONS;
- if (!(options & OBJC_COLLECT_IF_NEEDED)) {
- switch (options & 0x3) {
- case OBJC_RATIO_COLLECTION: amode = AUTO_ZONE_COLLECT_RATIO_COLLECTION; break;
- case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION; break;
- case OBJC_FULL_COLLECTION: amode = AUTO_ZONE_COLLECT_FULL_COLLECTION; break;
- case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION; break;
- }
- amode |= AUTO_ZONE_COLLECT_COALESCE;
- amode |= AUTO_ZONE_COLLECT_LOCAL_COLLECTION;
- }
- if (options & OBJC_WAIT_UNTIL_DONE) {
- __block BOOL done = NO;
- // If executing on the main thread, use the main thread work queue condition to block,
- // so main thread finalization can complete. Otherwise, use a thread-local condition.
- pthread_mutex_t localMutex = PTHREAD_MUTEX_INITIALIZER, *mutex = &localMutex;
- pthread_cond_t localCondition = PTHREAD_COND_INITIALIZER, *condition = &localCondition;
- if (onMainThread) {
- mutex = &MainThreadWorkQ.mutex;
- condition = &MainThreadWorkQ.condition;
- }
- pthread_mutex_lock(mutex);
- auto_zone_collect_and_notify(gc_zone, amode, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
- pthread_mutex_lock(mutex);
- done = YES;
- pthread_cond_signal(condition);
- pthread_mutex_unlock(mutex);
- });
- while (!done) {
- pthread_cond_wait(condition, mutex);
- if (onMainThread && MainThreadWorkQ.head) {
- pthread_mutex_unlock(mutex);
- batchFinalizeOnMainThread();
- pthread_mutex_lock(mutex);
- }
- }
- pthread_mutex_unlock(mutex);
- } else {
- auto_zone_collect(gc_zone, amode);
- }
-}
-
-
-// USED BY CF & ONE OTHER
-BOOL objc_isAuto(id object)
-{
- return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0;
-}
-
-
-BOOL objc_collectingEnabled(void)
-{
- return UseGC;
-}
-
-BOOL objc_collecting_enabled(void) // Old naming
-{
- return UseGC;
-}
-
-malloc_zone_t *objc_collectableZone(void) {
- return gc_zone;
-}
-
-BOOL objc_dumpHeap(char *filenamebuffer, unsigned long length) {
- static int counter = 0;
- ++counter;
- char buffer[1024];
- sprintf(buffer, OBJC_HEAP_DUMP_FILENAME_FORMAT, getpid(), counter);
- if (!_objc_dumpHeap(gc_zone, buffer)) return NO;
- if (filenamebuffer) {
- unsigned long blen = strlen(buffer);
- if (blen < length)
- strncpy(filenamebuffer, buffer, blen+1);
- else if (length > 0)
- filenamebuffer[0] = 0; // give some answer
- }
- return YES;
-}
-
-
-/***********************************************************************
-* Memory management.
-* Called by CF and Foundation.
-**********************************************************************/
-
-// Allocate an object in the GC zone, with the given number of extra bytes.
-id objc_allocate_object(Class cls, int extra)
-{
- return class_createInstance(cls, extra);
-}
-
-
-/***********************************************************************
-* Write barrier implementations, optimized for when GC is known to be on
-* Called by the write barrier exports only.
-* These implementations assume GC is on. The exported function must
-* either perform the check itself or be conditionally stomped at
-* startup time.
-**********************************************************************/
-
-id objc_assign_strongCast_gc(id value, id *slot) {
- if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) { // stores & returns true if slot points into GC allocated memory
- auto_zone_root_write_barrier(gc_zone, slot, value); // always stores
- }
- return value;
-}
-
-id objc_assign_global_gc(id value, id *slot) {
- // use explicit root registration.
- if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
- if (auto_zone_is_finalized(gc_zone, value)) {
- _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", value, slot);
- objc_assign_global_error(value, slot);
- }
- auto_zone_add_root(gc_zone, slot, value);
- }
- else
- *slot = value;
-
- return value;
-}
-
-id objc_assign_threadlocal_gc(id value, id *slot)
-{
- if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
- auto_zone_add_root(gc_zone, slot, value);
- }
- else {
- *slot = value;
- }
-
- return value;
-}
-
-id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset)
-{
- id *slot = (id*) ((char *)base + offset);
-
- if (value) {
- if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) {
- _objc_inform("GC: %p + %tu isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", base, offset);
- objc_assign_ivar_error(base, offset);
- }
- }
- else
- *slot = value;
-
- return value;
-}
-
-id objc_assign_strongCast_non_gc(id value, id *slot) {
- return (*slot = value);
-}
-
-id objc_assign_global_non_gc(id value, id *slot) {
- return (*slot = value);
-}
-
-id objc_assign_threadlocal_non_gc(id value, id *slot) {
- return (*slot = value);
-}
-
-id objc_assign_ivar_non_gc(id value, id base, ptrdiff_t offset) {
- id *slot = (id*) ((char *)base + offset);
- return (*slot = value);
-}
-
-/***********************************************************************
-* Write barrier exports
-* Called by pretty much all GC-supporting code.
-**********************************************************************/
-
-id objc_assign_strongCast(id value, id *dest)
-{
- if (UseGC) {
- return objc_assign_strongCast_gc(value, dest);
- } else {
- return (*dest = value);
- }
-}
-
-id objc_assign_global(id value, id *dest)
-{
- if (UseGC) {
- return objc_assign_global_gc(value, dest);
- } else {
- return (*dest = value);
- }
-}
-
-id objc_assign_threadlocal(id value, id *dest)
-{
- if (UseGC) {
- return objc_assign_threadlocal_gc(value, dest);
- } else {
- return (*dest = value);
- }
-}
-
-id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
-{
- if (UseGC) {
- return objc_assign_ivar_gc(value, dest, offset);
- } else {
- id *slot = (id*) ((char *)dest + offset);
- return (*slot = value);
- }
-}
-
-#if __LP64__
- #define LC_SEGMENT_COMMAND LC_SEGMENT_64
- #define LC_ROUTINES_COMMAND LC_ROUTINES_64
- typedef struct mach_header_64 macho_header;
- typedef struct section_64 macho_section;
- typedef struct nlist_64 macho_nlist;
- typedef struct segment_command_64 macho_segment_command;
-#else
- #define LC_SEGMENT_COMMAND LC_SEGMENT
- #define LC_ROUTINES_COMMAND LC_ROUTINES
- typedef struct mach_header macho_header;
- typedef struct section macho_section;
- typedef struct nlist macho_nlist;
- typedef struct segment_command macho_segment_command;
-#endif
-
-void _objc_update_stubs_in_mach_header(const struct mach_header* mh, uint32_t symbol_count, const char *symbols[], void *functions[]) {
- uint32_t cmd_index, cmd_count = mh->ncmds;
- intptr_t slide = 0;
- const struct load_command* const cmds = (struct load_command*)((char*)mh + sizeof(macho_header));
- const struct load_command* cmd;
- const uint8_t *linkEditBase = NULL;
- const macho_nlist *symbolTable = NULL;
- uint32_t symbolTableCount = 0;
- const char *stringTable = NULL;
- uint32_t stringTableSize = 0;
- const uint32_t *indirectSymbolTable = NULL;
- uint32_t indirectSymbolTableCount = 0;
-
- // first pass at load commands gets linkEditBase
- for (cmd = cmds, cmd_index = 0; cmd_index < cmd_count; ++cmd_index) {
- if ( cmd->cmd == LC_SEGMENT_COMMAND ) {
- const macho_segment_command* seg = (macho_segment_command*)cmd;
- if ( strcmp(seg->segname,"__TEXT") == 0 )
- slide = (uintptr_t)mh - seg->vmaddr;
- else if ( strcmp(seg->segname,"__LINKEDIT") == 0 )
- linkEditBase = (uint8_t*)(seg->vmaddr + slide - seg->fileoff);
- }
- cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
- }
-
- for (cmd = cmds, cmd_index = 0; cmd_index < cmd_count; ++cmd_index) {
- switch ( cmd->cmd ) {
- case LC_SYMTAB:
- {
- const struct symtab_command* symtab = (struct symtab_command*)cmd;
- symbolTableCount = symtab->nsyms;
- symbolTable = (macho_nlist*)(&linkEditBase[symtab->symoff]);
- stringTableSize = symtab->strsize;
- stringTable = (const char*)&linkEditBase[symtab->stroff];
- }
- break;
- case LC_DYSYMTAB:
- {
- const struct dysymtab_command* dsymtab = (struct dysymtab_command*)cmd;
- indirectSymbolTableCount = dsymtab->nindirectsyms;
- indirectSymbolTable = (uint32_t*)(&linkEditBase[dsymtab->indirectsymoff]);
- }
- break;
- }
- cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
- }
-
- // walk sections to find one with this lazy pointer
- for (cmd = cmds, cmd_index = 0; cmd_index < cmd_count; ++cmd_index) {
- if (cmd->cmd == LC_SEGMENT_COMMAND) {
- const macho_segment_command* seg = (macho_segment_command*)cmd;
- const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
- const macho_section* const sectionsEnd = §ionsStart[seg->nsects];
- const macho_section* sect;
- for (sect = sectionsStart; sect < sectionsEnd; ++sect) {
- const uint8_t type = sect->flags & SECTION_TYPE;
- if (type == S_LAZY_DYLIB_SYMBOL_POINTERS || type == S_LAZY_SYMBOL_POINTERS) { // S_LAZY_DYLIB_SYMBOL_POINTERS
- uint32_t pointer_index, pointer_count = (uint32_t)(sect->size / sizeof(uintptr_t));
- uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + slide);
- for (pointer_index = 0; pointer_index < pointer_count; ++pointer_index) {
- const uint32_t indirectTableOffset = sect->reserved1;
- if ((indirectTableOffset + pointer_index) < indirectSymbolTableCount) {
- uint32_t symbolIndex = indirectSymbolTable[indirectTableOffset + pointer_index];
- // if symbolIndex is INDIRECT_SYMBOL_LOCAL or INDIRECT_SYMBOL_LOCAL|INDIRECT_SYMBOL_ABS, then it will
- // by definition be >= symbolTableCount.
- if (symbolIndex < symbolTableCount) {
- // found symbol for this lazy pointer, now lookup address
- uint32_t stringTableOffset = symbolTable[symbolIndex].n_un.n_strx;
- if (stringTableOffset < stringTableSize) {
- const char* symbolName = &stringTable[stringTableOffset];
- uint32_t i;
- for (i = 0; i < symbol_count; ++i) {
- if (strcmp(symbols[i], symbolName) == 0) {
- symbolPointers[pointer_index] = (uintptr_t)functions[i];
- break;
- }
- }
- }
- }
- }
- }
- }
- }
- }
- cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
- }
-}
-
-void *objc_memmove_collectable(void *dst, const void *src, size_t size)
-{
- if (UseGC) {
- return auto_zone_write_barrier_memmove(gc_zone, dst, src, size);
- } else {
- return memmove(dst, src, size);
- }
-}
-
-BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) {
- const BOOL issueMemoryBarrier = NO;
- if (UseGC)
- return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
- else
- return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
-}
-
-BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) {
- const BOOL issueMemoryBarrier = YES;
- if (UseGC)
- return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
- else
- return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
-}
-
-BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) {
- const BOOL isGlobal = YES;
- const BOOL issueMemoryBarrier = NO;
- if (UseGC)
- return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
- else
- return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
-}
-
-BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) {
- const BOOL isGlobal = YES;
- const BOOL issueMemoryBarrier = YES;
- if (UseGC)
- return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
- else
- return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
-}
-
-BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) {
- const BOOL isGlobal = NO;
- const BOOL issueMemoryBarrier = NO;
- if (UseGC)
- return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
- else
- return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
-}
-
-BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) {
- const BOOL isGlobal = NO;
- const BOOL issueMemoryBarrier = YES;
- if (UseGC)
- return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
- else
- return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
-}
-
-
-/***********************************************************************
-* Weak ivar support
-**********************************************************************/
-
-id objc_read_weak_gc(id *location) {
- id result = *location;
- if (result) {
- result = auto_read_weak_reference(gc_zone, (void **)location);
- }
- return result;
-}
-
-id objc_read_weak_non_gc(id *location) {
- return *location;
-}
-
-id objc_read_weak(id *location) {
- id result = *location;
- if (UseGC && result) {
- result = auto_read_weak_reference(gc_zone, (void **)location);
- }
- return result;
-}
-
-id objc_assign_weak_gc(id value, id *location) {
- auto_assign_weak_reference(gc_zone, value, (const void **)location, NULL);
- return value;
-}
-
-id objc_assign_weak_non_gc(id value, id *location) {
- return (*location = value);
-}
-
-id objc_assign_weak(id value, id *location) {
- if (UseGC) {
- auto_assign_weak_reference(gc_zone, value, (const void **)location, NULL);
- }
- else {
- *location = value;
- }
- return value;
-}
-
-void gc_fixup_weakreferences(id newObject, id oldObject) {
- // fix up weak references if any.
- const unsigned char *weakLayout = (const unsigned char *)class_getWeakIvarLayout(_object_getClass(newObject));
- if (weakLayout) {
- void **newPtr = (void **)newObject, **oldPtr = (void **)oldObject;
- unsigned char byte;
- while ((byte = *weakLayout++)) {
- unsigned skips = (byte >> 4);
- unsigned weaks = (byte & 0x0F);
- newPtr += skips, oldPtr += skips;
- while (weaks--) {
- *newPtr = NULL;
- auto_assign_weak_reference(gc_zone, auto_read_weak_reference(gc_zone, oldPtr), (const void **)newPtr, NULL);
- ++newPtr, ++oldPtr;
- }
- }
- }
-}
-
-/***********************************************************************
-* Testing tools
-* Used to isolate resurrection of garbage objects during finalization.
-**********************************************************************/
-BOOL objc_is_finalized(void *ptr) {
- if (ptr != NULL && UseGC) {
- return auto_zone_is_finalized(gc_zone, ptr);
- }
- return NO;
-}
-
-
-/***********************************************************************
-* Stack clearing.
-* Used by top-level thread loops to reduce false pointers from the stack.
-**********************************************************************/
-void objc_clear_stack(unsigned long options) {
- if (!UseGC) return;
- auto_zone_clear_stack(gc_zone, 0);
-}
-
-
-/***********************************************************************
-* Finalization support
-**********************************************************************/
-
-// Finalizer crash debugging
-static void *finalizing_object;
-
-// finalize a single object without fuss
-// When there are no main-thread-only classes this is used directly
-// Otherwise, it is used indirectly by smarter code that knows main-thread-affinity requirements
-static void finalizeOneObject(void *obj, void *ignored) {
- id object = (id)obj;
- finalizing_object = obj;
-
- Class cls = object_getClass(obj);
- CRSetCrashLogMessage2(class_getName(cls));
-
- /// call -finalize method.
- ((void(*)(id, SEL))objc_msgSend)(object, @selector(finalize));
-
- // Call C++ destructors.
- // This would be objc_destructInstance() but for performance.
- if (_class_hasCxxStructors(cls)) {
- object_cxxDestruct(object);
- }
-
- finalizing_object = NULL;
- CRSetCrashLogMessage2(NULL);
-}
-
-// finalize object only if it is a main-thread-only object.
-// Called only from the main thread.
-static void finalizeOneMainThreadOnlyObject(void *obj, void *arg) {
- id object = (id)obj;
- Class cls = _object_getClass(object);
- if (cls == NULL) {
- _objc_fatal("object with NULL ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj);
- }
- if (_class_shouldFinalizeOnMainThread(cls)) {
- finalizeOneObject(obj, NULL);
- }
-}
-
-// finalize one object only if it is not a main-thread-only object
-// called from any other thread than the main thread
-// Important: if a main-thread-only object is passed, return that fact in the needsMain argument
-static void finalizeOneAnywhereObject(void *obj, void *needsMain) {
- id object = (id)obj;
- Class cls = _object_getClass(object);
- bool *needsMainThreadWork = needsMain;
- if (cls == NULL) {
- _objc_fatal("object with NULL ISA passed to finalizeOneAnywhereObject: %p\n", obj);
- }
- if (!_class_shouldFinalizeOnMainThread(cls)) {
- finalizeOneObject(obj, NULL);
- }
- else {
- *needsMainThreadWork = true;
- }
-}
-
-
-// Utility workhorse.
-// Set up the expensive @try block and ask the collector to hand the next object to
-// our finalizeAnObject function.
-// Track and return a boolean that records whether or not any main thread work is necessary.
-// (When we know that there are no main thread only objects then the boolean isn't even computed)
-static bool batchFinalize(auto_zone_t *zone,
- auto_zone_foreach_object_t foreach,
- auto_zone_cursor_t cursor,
- size_t cursor_size,
- void (*finalizeAnObject)(void *, void*))
-{
-#if !defined(NDEBUG) && !__OBJC2__
- // debug: don't call try/catch before exception handlers are installed
- objc_exception_functions_t table = {};
- objc_exception_get_functions(&table);
- assert(table.throw_exc);
-#endif
-
- bool needsMainThreadWork = false;
- for (;;) {
- @try {
- foreach(cursor, finalizeAnObject, &needsMainThreadWork);
- // non-exceptional return means finalization is complete.
- break;
- }
- @catch (id exception) {
- // whoops, note exception, then restart at cursor's position
- _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]);
- objc_exception_during_finalize_error();
- }
- @catch (...) {
- // whoops, note exception, then restart at cursor's position
- _objc_inform("GC: -finalize resulted in an exception being thrown, break on objc_exception_during_finalize_error to debug");
- objc_exception_during_finalize_error();
- }
- }
- return needsMainThreadWork;
-}
-
-// Called on main thread-only.
-// Pick up work from global queue.
-// called parasitically by anyone requesting a collection
-// called explicitly when there is known to be main thread only finalization work
-// In both cases we are on the main thread
-// Guard against recursion by something called from a finalizer
-static void batchFinalizeOnMainThread() {
- pthread_mutex_lock(&MainThreadWorkQ.mutex);
- if (!MainThreadWorkQ.head || MainThreadWorkQ.head->started) {
- // No work or we're already here
- pthread_mutex_unlock(&MainThreadWorkQ.mutex);
- return;
- }
- while (MainThreadWorkQ.head) {
- BatchFinalizeBlock_t *bfb = MainThreadWorkQ.head;
- bfb->started = YES;
- pthread_mutex_unlock(&MainThreadWorkQ.mutex);
-
- batchFinalize(gc_zone, bfb->foreach, bfb->cursor, bfb->cursor_size, finalizeOneMainThreadOnlyObject);
- // signal the collector thread(s) that finalization has finished.
- pthread_mutex_lock(&MainThreadWorkQ.mutex);
- bfb->finished = YES;
- pthread_cond_broadcast(&MainThreadWorkQ.condition);
- MainThreadWorkQ.head = bfb->next;
- }
- MainThreadWorkQ.tail = NULL;
- pthread_mutex_unlock(&MainThreadWorkQ.mutex);
-}
-
-
-// Knowing that we possibly have main thread only work to do, first process everything
-// that is not main-thread-only. If we discover main thread only work, queue a work block
-// to the main thread that will do just the main thread only work. Wait for it.
-// Called from a non main thread.
-static void batchFinalizeOnTwoThreads(auto_zone_t *zone,
- auto_zone_foreach_object_t foreach,
- auto_zone_cursor_t cursor,
- size_t cursor_size)
-{
- // First, lets get rid of everything we can on this thread, then ask main thread to help if needed
- char cursor_copy[cursor_size];
- memcpy(cursor_copy, cursor, cursor_size);
- bool needsMainThreadFinalization = batchFinalize(zone, foreach, (auto_zone_cursor_t)cursor_copy, cursor_size, finalizeOneAnywhereObject);
-
- if (! needsMainThreadFinalization)
- return; // no help needed
-
- // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or
- // an objc_collect(if_needed) will get to it. Either way, this block will be processed on the main thread.
- BatchFinalizeBlock_t bfb;
- bfb.foreach = foreach;
- bfb.cursor = cursor;
- bfb.cursor_size = cursor_size;
- bfb.started = NO;
- bfb.finished = NO;
- bfb.next = NULL;
- pthread_mutex_lock(&MainThreadWorkQ.mutex);
- if (MainThreadWorkQ.tail) {
-
- // link to end so that ordering of finalization is preserved.
- MainThreadWorkQ.tail->next = &bfb;
- MainThreadWorkQ.tail = &bfb;
- }
- else {
- MainThreadWorkQ.head = &bfb;
- MainThreadWorkQ.tail = &bfb;
- }
- pthread_mutex_unlock(&MainThreadWorkQ.mutex);
-
- //printf("----->asking main thread to finalize\n");
- dispatch_async(dispatch_get_main_queue(), ^{ batchFinalizeOnMainThread(); });
-
- // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD.
- pthread_mutex_lock(&MainThreadWorkQ.mutex);
- while (!bfb.finished) {
- // the main thread might be blocked waiting for a synchronous collection to complete, so wake it here
- pthread_cond_signal(&MainThreadWorkQ.condition);
- pthread_cond_wait(&MainThreadWorkQ.condition, &MainThreadWorkQ.mutex);
- }
- pthread_mutex_unlock(&MainThreadWorkQ.mutex);
- //printf("<------ main thread finalize done\n");
-
-}
-
-
-
-// collector calls this with garbage ready
-// thread collectors, too, so this needs to be thread-safe
-static void BatchInvalidate(auto_zone_t *zone,
- auto_zone_foreach_object_t foreach,
- auto_zone_cursor_t cursor,
- size_t cursor_size)
-{
- if (pthread_main_np() || !WantsMainThreadFinalization) {
- // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread
- // but no main-thread-only objects have been allocated.
- batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject);
- }
- else {
- // We're on the dedicated thread. Collect some on main thread, the rest here.
- batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size);
- }
-
-}
-
-
-/*
- * Zombie support
- * Collector calls into this system when it finds resurrected objects.
- * This keeps them pitifully alive and leaked, even if they reference garbage.
- */
-
-// idea: keep a side table mapping resurrected object pointers to their original Class, so we don't
-// need to smash anything. alternatively, could use associative references to track against a secondary
-// object with information about the resurrection, such as a stack crawl, etc.
-
-static Class _NSResurrectedObjectClass;
-static NXMapTable *_NSResurrectedObjectMap = NULL;
-static pthread_mutex_t _NSResurrectedObjectLock = PTHREAD_MUTEX_INITIALIZER;
-
-static Class resurrectedObjectOriginalClass(id object) {
- Class originalClass;
- pthread_mutex_lock(&_NSResurrectedObjectLock);
- originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object);
- pthread_mutex_unlock(&_NSResurrectedObjectLock);
- return originalClass;
-}
-
-static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; }
-
-static id _NSResurrectedObject_instanceMethod(id self, SEL name) {
- _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name));
- return self;
-}
-
-static void _NSResurrectedObject_finalize(id self, SEL _cmd) {
- Class originalClass;
- pthread_mutex_lock(&_NSResurrectedObjectLock);
- originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self);
- pthread_mutex_unlock(&_NSResurrectedObjectLock);
- if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", self, class_getName(originalClass));
- _objc_rootFinalize(self);
-}
-
-static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
- class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
- return YES;
-}
-
-static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
- class_addMethod(_object_getClass(self), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
- return YES;
-}
-
-static void _NSResurrectedObject_initialize() {
- _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128);
- _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0);
- class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:");
- Class metaClass = _object_getClass(_NSResurrectedObjectClass);
- class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::");
- class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::");
- objc_registerClassPair(_NSResurrectedObjectClass);
-}
-
-static void resurrectZombie(auto_zone_t *zone, void *ptr) {
- id object = (id) ptr;
- Class cls = _object_getClass(object);
- if (cls != _NSResurrectedObjectClass) {
- // remember the original class for this instance.
- pthread_mutex_lock(&_NSResurrectedObjectLock);
- NXMapInsert(_NSResurrectedObjectMap, ptr, cls);
- pthread_mutex_unlock(&_NSResurrectedObjectLock);
- object_setClass(object, _NSResurrectedObjectClass);
- }
-}
-
-/***********************************************************************
-* Pretty printing support
-* For development purposes.
-**********************************************************************/
-
-
-static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount);
-
-static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset)
-{
- return name_for_address(zone, base, offset, false);
-}
-
-static const char* objc_name_for_object(auto_zone_t *zone, void *object) {
- Class cls = *(Class *)object;
- if (!objc_isRegisteredClass(cls)) return "";
- return class_getName(cls);
-}
-
-/* Compaction support */
-
-void objc_disableCompaction() {
- if (UseCompaction) {
- UseCompaction = NO;
- auto_zone_disable_compaction(gc_zone);
- }
-}
-
-/***********************************************************************
-* Collection support
-**********************************************************************/
-
-static BOOL objc_isRegisteredClass(Class candidate);
-
-static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address) {
- id object = (id)address;
- Class cls = (volatile Class)_object_getClass(object);
- return objc_isRegisteredClass(cls) ? _object_getIvarLayout(cls, object) : NULL;
-}
-
-static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address) {
- id object = (id)address;
- Class cls = (volatile Class)_object_getClass(object);
- return objc_isRegisteredClass(cls) ? class_getWeakIvarLayout(cls) : NULL;
-}
-
-void gc_register_datasegment(uintptr_t base, size_t size) {
- auto_zone_register_datasegment(gc_zone, (void*)base, size);
-}
-
-void gc_unregister_datasegment(uintptr_t base, size_t size) {
- auto_zone_unregister_datasegment(gc_zone, (void*)base, size);
-}
-
-#define countof(array) (sizeof(array) / sizeof(array[0]))
-
-// defined in objc-externalref.m.
-extern objc_xref_t _object_addExternalReference_gc(id obj, objc_xref_t type);
-extern objc_xref_t _object_addExternalReference_rr(id obj, objc_xref_t type);
-extern id _object_readExternalReference_gc(objc_xref_t ref);
-extern id _object_readExternalReference_rr(objc_xref_t ref);
-extern void _object_removeExternalReference_gc(objc_xref_t ref);
-extern void _object_removeExternalReference_rr(objc_xref_t ref);
-
-void gc_fixup_barrier_stubs(const struct dyld_image_info *info) {
- static const char *symbols[] = {
- "_objc_assign_strongCast", "_objc_assign_ivar",
- "_objc_assign_global", "_objc_assign_threadlocal",
- "_objc_read_weak", "_objc_assign_weak",
- "_objc_getProperty", "_objc_setProperty",
- "_objc_getAssociatedObject", "_objc_setAssociatedObject",
- "__object_addExternalReference", "__object_readExternalReference", "__object_removeExternalReference"
- };
- if (UseGC) {
- // resolve barrier symbols using GC functions.
- static void *gc_functions[] = {
- &objc_assign_strongCast_gc, &objc_assign_ivar_gc,
- &objc_assign_global_gc, &objc_assign_threadlocal_gc,
- &objc_read_weak_gc, &objc_assign_weak_gc,
- &objc_getProperty_gc, &objc_setProperty_gc,
- &objc_getAssociatedObject_gc, &objc_setAssociatedObject_gc,
- &_object_addExternalReference_gc, &_object_readExternalReference_gc, &_object_removeExternalReference_gc
- };
- assert(countof(symbols) == countof(gc_functions));
- _objc_update_stubs_in_mach_header(info->imageLoadAddress, countof(symbols), symbols, gc_functions);
- } else {
- // resolve barrier symbols using non-GC functions.
- static void *nongc_functions[] = {
- &objc_assign_strongCast_non_gc, &objc_assign_ivar_non_gc,
- &objc_assign_global_non_gc, &objc_assign_threadlocal_non_gc,
- &objc_read_weak_non_gc, &objc_assign_weak_non_gc,
- &objc_getProperty_non_gc, &objc_setProperty_non_gc,
- &objc_getAssociatedObject_non_gc, &objc_setAssociatedObject_non_gc,
- &_object_addExternalReference_rr, &_object_readExternalReference_rr, &_object_removeExternalReference_rr
- };
- assert(countof(symbols) == countof(nongc_functions));
- _objc_update_stubs_in_mach_header(info->imageLoadAddress, countof(symbols), symbols, nongc_functions);
- }
-}
-
-/***********************************************************************
-* Initialization
-**********************************************************************/
-
-static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) {
- if (auto_zone_is_collecting(gc_zone)) {
- ;
- }
- else {
- auto_zone_collect(gc_zone, AUTO_ZONE_COLLECT_COALESCE|AUTO_ZONE_COLLECT_RATIO_COLLECTION);
- }
-}
-
-
-static auto_zone_t *gc_zone_init(BOOL wantsCompaction)
-{
- auto_zone_t *result;
- static int didOnce = 0;
- if (!didOnce) {
- didOnce = 1;
-
- // initialize the batch finalization queue
- MainThreadWorkQ.head = NULL;
- MainThreadWorkQ.tail = NULL;
- pthread_mutex_init(&MainThreadWorkQ.mutex, NULL);
- pthread_cond_init(&MainThreadWorkQ.condition, NULL);
- }
-
- result = auto_zone_create("auto_zone");
-
- if (!wantsCompaction) auto_zone_disable_compaction(result);
-
- auto_collection_control_t *control = auto_collection_parameters(result);
-
- // set up the magic control parameters
- control->batch_invalidate = BatchInvalidate;
- control->will_grow = objc_will_grow;
- control->resurrect = resurrectZombie;
- control->layout_for_address = objc_layout_for_address;
- control->weak_layout_for_address = objc_weak_layout_for_address;
- control->name_for_address = objc_name_for_address;
-
- if (control->version >= sizeof(auto_collection_control_t)) {
- control->name_for_object = objc_name_for_object;
- }
-
- return result;
-}
-
-
-/* should be defined in /usr/local/include/libdispatch_private.h. */
-extern void (*dispatch_begin_thread_4GC)(void);
-extern void (*dispatch_end_thread_4GC)(void);
-
-static void objc_reapThreadLocalBlocks()
-{
- if (UseGC) auto_zone_reap_all_local_blocks(gc_zone);
-}
-
-void objc_registerThreadWithCollector()
-{
- if (UseGC) auto_zone_register_thread(gc_zone);
-}
-
-void objc_unregisterThreadWithCollector()
-{
- if (UseGC) auto_zone_unregister_thread(gc_zone);
-}
-
-void objc_assertRegisteredThreadWithCollector()
-{
- if (UseGC) auto_zone_assert_thread_registered(gc_zone);
-}
-
-// Always called by _objcInit, even if GC is off.
-void gc_init(BOOL wantsGC, BOOL wantsCompaction)
-{
- UseGC = wantsGC;
- UseCompaction = wantsCompaction;
-
- if (PrintGC) {
- _objc_inform("GC: is %s", wantsGC ? "ON" : "OFF");
- _objc_inform("Compaction: is %s", wantsCompaction ? "ON" : "OFF");
- }
-
- if (UseGC) {
- // Set up the GC zone
- gc_zone = gc_zone_init(wantsCompaction);
-
- // tell libdispatch to register its threads with the GC.
- dispatch_begin_thread_4GC = objc_registerThreadWithCollector;
- dispatch_end_thread_4GC = objc_reapThreadLocalBlocks;
-
- // set up the registered classes list
- registeredClassTableInit();
-
- // tell Blocks to use collectable memory. CF will cook up the classes separately.
- gc_block_init();
-
- // Add GC state to crash log reports
- _objc_inform_on_crash("garbage collection is ON");
- }
-}
-
-
-// Called by NSObject +load to perform late GC setup
-// This work must wait until after all of libSystem initializes.
-void gc_init2(void)
-{
- assert(UseGC);
-
- // create the _NSResurrectedObject class used to track resurrections.
- _NSResurrectedObject_initialize();
-
- // tell libauto to set up its dispatch queues
- auto_collect_multithreaded(gc_zone);
-}
-
-// Called by Foundation.
-// This function used to initialize NSObject stuff, but now does nothing.
-malloc_zone_t *objc_collect_init(int (*callback)(void) __unused)
-{
- return (malloc_zone_t *)gc_zone;
-}
-
-/*
- * Support routines for the Block implementation
- */
-
-
-// The Block runtime now needs to sometimes allocate a Block that is an Object - namely
-// when it neesd to have a finalizer which, for now, is only if there are C++ destructors
-// in the helper function. Hence the isObject parameter.
-// Under GC a -copy message should allocate a refcount 0 block, ergo the isOne parameter.
-static void *block_gc_alloc5(const unsigned long size, const bool isOne, const bool isObject) {
- auto_memory_type_t type = isObject ? (AUTO_OBJECT|AUTO_MEMORY_SCANNED) : AUTO_MEMORY_SCANNED;
- return auto_zone_allocate_object(gc_zone, size, type, isOne, false);
-}
-
-// The Blocks runtime keeps track of everything above 1 and so it only calls
-// up to the collector to tell it about the 0->1 transition and then the 1->0 transition
-static void block_gc_setHasRefcount(const void *block, const bool hasRefcount) {
- if (hasRefcount)
- auto_zone_retain(gc_zone, (void *)block);
- else
- auto_zone_release(gc_zone, (void *)block);
-}
-
-static void block_gc_memmove(void *dst, void *src, unsigned long size) {
- auto_zone_write_barrier_memmove(gc_zone, dst, src, (size_t)size);
-}
-
-static void gc_block_init(void) {
- _Block_use_GC(
- block_gc_alloc5,
- block_gc_setHasRefcount,
- (void (*)(void *, void **))objc_assign_strongCast_gc,
- (void (*)(const void *, void *))objc_assign_weak,
- block_gc_memmove
- );
-}
-
-
-/***********************************************************************
-* Track classes.
-* In addition to the global class hashtable (set) indexed by name, we
-* also keep one based purely by pointer when running under Garbage Collection.
-* This allows the background collector to race against objects recycled from TLC.
-* Specifically, the background collector can read the admin byte and see that
-* a thread local object is an object, get scheduled out, and the TLC recovers it,
-* linking it into the cache, then the background collector reads the isa field and
-* finds linkage info. By qualifying all isa fields read we avoid this.
-**********************************************************************/
-
-// This is a self-contained hash table of all classes. The first two elements contain the (size-1) and count.
-static volatile Class *AllClasses = nil;
-
-#define SHIFT 3
-#define INITIALSIZE 512
-#define REMOVED ~0ul
-
-// Allocate the side table.
-static void registeredClassTableInit() {
- assert(UseGC);
- // allocate a collectable (refcount 0) zeroed hunk of unscanned memory
- uintptr_t *table = (uintptr_t *)auto_zone_allocate_object(gc_zone, INITIALSIZE*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true);
- // set initial capacity (as mask)
- table[0] = INITIALSIZE - 1;
- // set initial count
- table[1] = 0;
- // Compaction: we allocate it refcount 1 and then decr when done.
- AllClasses = (Class *)table;
-}
-
-// Verify that a particular pointer is to a class.
-// Safe from any thread anytime
-static BOOL objc_isRegisteredClass(Class candidate) {
- assert(UseGC);
- // nil is never a valid ISA.
- if (candidate == nil) return NO;
- // We don't care about a race with another thread adding a class to which we randomly might have a pointer
- // Get local copy of classes so that we're immune from updates.
- // We keep the size of the list as the first element so there is no race as the list & size get updated.
- uintptr_t *allClasses = (uintptr_t *)AllClasses;
- // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
- // Slot 1 is count
- uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & allClasses[0];
- // avoid slot 0 and 1
- if (slot < 2) slot = 2;
- for(;;) {
- long int slotValue = allClasses[slot];
- if (slotValue == (long int)candidate) {
- return YES;
- }
- if (slotValue == 0) {
- return NO;
- }
- ++slot;
- if (slot > allClasses[0])
- slot = 2; // skip size, count
- }
-}
-
-// Utility used when growing
-// Assumes lock held
-static void addClassHelper(uintptr_t *table, uintptr_t candidate) {
- uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
- if (slot < 2) slot = 2;
- for(;;) {
- uintptr_t slotValue = table[slot];
- if (slotValue == 0) {
- table[slot] = candidate;
- ++table[1];
- return;
- }
- ++slot;
- if (slot > table[0])
- slot = 2; // skip size, count
- }
-}
-
-// lock held by callers
-void objc_addRegisteredClass(Class candidate) {
- if (!UseGC) return;
- uintptr_t *table = (uintptr_t *)AllClasses;
- // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
- // Slot 1 is count - always non-zero
- uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
- if (slot < 2) slot = 2;
- for(;;) {
- uintptr_t slotValue = table[slot];
- assert(slotValue != (uintptr_t)candidate);
- if (slotValue == REMOVED) {
- table[slot] = (long)candidate;
- return;
- }
- else if (slotValue == 0) {
- table[slot] = (long)candidate;
- if (2*++table[1] > table[0]) { // add to count; check if we cross 50% utilization
- // grow
- uintptr_t oldSize = table[0]+1;
- uintptr_t *newTable = (uintptr_t *)auto_zone_allocate_object(gc_zone, oldSize*2*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true);
- uintptr_t i;
- newTable[0] = 2*oldSize - 1;
- newTable[1] = 0;
- for (i = 2; i < oldSize; ++i) {
- if (table[i] && table[i] != REMOVED)
- addClassHelper(newTable, table[i]);
- }
- AllClasses = (Class *)newTable;
- // let the old table be collected when other threads are no longer reading it.
- auto_zone_release(gc_zone, (void *)table);
- }
- return;
- }
- ++slot;
- if (slot > table[0])
- slot = 2; // skip size, count
- }
-}
-
-// lock held by callers
-void objc_removeRegisteredClass(Class candidate) {
- if (!UseGC) return;
- uintptr_t *table = (uintptr_t *)AllClasses;
- // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
- // Slot 1 is count - always non-zero
- uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & table[0];
- if (slot < 2) slot = 2;
- for(;;) {
- uintptr_t slotValue = table[slot];
- if (slotValue == (uintptr_t)candidate) {
- table[slot] = REMOVED; // if next slot == 0 we could set to 0 here and decr count
- return;
- }
- assert(slotValue != 0);
- ++slot;
- if (slot > table[0])
- slot = 2; // skip size, count
- }
-}
-
-
-/***********************************************************************
-* Debugging - support for smart printouts when errors occur
-**********************************************************************/
-
-
-static malloc_zone_t *objc_debug_zone(void)
-{
- static malloc_zone_t *z = NULL;
- if (!z) {
- z = malloc_create_zone(4096, 0);
- malloc_set_zone_name(z, "objc-auto debug");
- }
- return z;
-}
-
-static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) {
- if (!value) {
- head[0] = '0';
- } else {
- if (value >= base) head = _malloc_append_unsigned(value / base, base, head);
- value = value % base;
- head[0] = (value < 10) ? '0' + value : 'a' + value - 10;
- }
- return head+1;
-}
-
-static void strlcati(char *str, uintptr_t value, size_t bufSize)
-{
- if ( (bufSize - strlen(str)) < 30)
- return;
- str = _malloc_append_unsigned(value, 10, str + strlen(str));
- str[0] = '\0';
-}
-
-
-static Ivar ivar_for_offset(Class cls, vm_address_t offset)
-{
- unsigned i;
- vm_address_t ivar_offset;
- Ivar super_ivar, result;
- Ivar *ivars;
- unsigned int ivar_count;
-
- if (!cls) return NULL;
-
- // scan base classes FIRST
- super_ivar = ivar_for_offset(class_getSuperclass(cls), offset);
- // result is best-effort; our ivars may be closer
-
- ivars = class_copyIvarList(cls, &ivar_count);
- if (ivars && ivar_count) {
- // Try our first ivar. If it's too big, use super's best ivar.
- // (lose 64-bit precision)
- ivar_offset = ivar_getOffset(ivars[0]);
- if (ivar_offset > offset) result = super_ivar;
- else if (ivar_offset == offset) result = ivars[0];
- else result = NULL;
-
- // Try our other ivars. If any is too big, use the previous.
- for (i = 1; result == NULL && i < ivar_count; i++) {
- ivar_offset = ivar_getOffset(ivars[i]);
- if (ivar_offset == offset) {
- result = ivars[i];
- } else if (ivar_offset > offset) {
- result = ivars[i - 1];
- }
- }
-
- // Found nothing. Return our last ivar.
- if (result == NULL)
- result = ivars[ivar_count - 1];
-
- free(ivars);
- } else {
- result = super_ivar;
- }
-
- return result;
-}
-
-static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset, size_t bufSize)
-{
- Ivar ivar = NULL;
-
- if (offset == 0) return; // don't bother with isa
- if (offset >= class_getInstanceSize(cls)) {
- strlcat(buf, ".<extra>+", bufSize);
- strlcati(buf, offset, bufSize);
- return;
- }
-
- ivar = ivar_for_offset(cls, offset);
- if (!ivar) {
- strlcat(buf, ".<?>", bufSize);
- return;
- }
-
- // fixme doesn't handle structs etc.
-
- strlcat(buf, ".", bufSize);
- const char *ivar_name = ivar_getName(ivar);
- if (ivar_name) strlcat(buf, ivar_name, bufSize);
- else strlcat(buf, "<anonymous ivar>", bufSize);
-
- offset -= ivar_getOffset(ivar);
- if (offset > 0) {
- strlcat(buf, "+", bufSize);
- strlcati(buf, offset, bufSize);
- }
-}
-
-
-static const char *cf_class_for_object(void *cfobj)
-{
- // ick - we don't link against CF anymore
-
- const char *result;
- void *dlh;
- size_t (*CFGetTypeID)(void *);
- void * (*_CFRuntimeGetClassWithTypeID)(size_t);
-
- result = "anonymous_NSCFType";
-
- dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST);
- if (!dlh) return result;
-
- CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID");
- _CFRuntimeGetClassWithTypeID = (void*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID");
-
- if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) {
- struct {
- size_t version;
- const char *className;
- // don't care about the rest
- } *cfcls;
- size_t cfid;
- cfid = (*CFGetTypeID)(cfobj);
- cfcls = (*_CFRuntimeGetClassWithTypeID)(cfid);
- result = cfcls->className;
- }
-
- dlclose(dlh);
- return result;
-}
-
-
-static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount)
-{
-#define APPEND_SIZE(s) \
- strlcat(buf, "[", sizeof(buf)); \
- strlcati(buf, s, sizeof(buf)); \
- strlcat(buf, "]", sizeof(buf));
-
- char buf[1500];
- char *result;
-
- buf[0] = '\0';
-
- size_t size =
- auto_zone_size(zone, (void *)base);
- auto_memory_type_t type = size ?
- auto_zone_get_layout_type(zone, (void *)base) : AUTO_TYPE_UNKNOWN;
- unsigned int refcount = size ?
- auto_zone_retain_count(zone, (void *)base) : 0;
-
- switch (type) {
- case AUTO_OBJECT_SCANNED:
- case AUTO_OBJECT_UNSCANNED:
- case AUTO_OBJECT_ALL_POINTERS: {
- const char *class_name = object_getClassName((id)base);
- if ((0 == strcmp(class_name, "__NSCFType")) || (0 == strcmp(class_name, "NSCFType"))) {
- strlcat(buf, cf_class_for_object((void *)base), sizeof(buf));
- } else {
- strlcat(buf, class_name, sizeof(buf));
- }
- if (offset) {
- append_ivar_at_offset(buf, _object_getClass((id)base), offset, sizeof(buf));
- }
- APPEND_SIZE(size);
- break;
- }
- case AUTO_MEMORY_SCANNED:
- strlcat(buf, "{conservative-block}", sizeof(buf));
- APPEND_SIZE(size);
- break;
- case AUTO_MEMORY_UNSCANNED:
- strlcat(buf, "{no-pointers-block}", sizeof(buf));
- APPEND_SIZE(size);
- break;
- case AUTO_MEMORY_ALL_POINTERS:
- strlcat(buf, "{all-pointers-block}", sizeof(buf));
- APPEND_SIZE(size);
- break;
- case AUTO_MEMORY_ALL_WEAK_POINTERS:
- strlcat(buf, "{all-weak-pointers-block}", sizeof(buf));
- APPEND_SIZE(size);
- break;
- case AUTO_TYPE_UNKNOWN:
- strlcat(buf, "{uncollectable-memory}", sizeof(buf));
- break;
- default:
- strlcat(buf, "{unknown-memory-type}", sizeof(buf));
- }
-
- if (withRetainCount && refcount > 0) {
- strlcat(buf, " [[refcount=", sizeof(buf));
- strlcati(buf, refcount, sizeof(buf));
- strlcat(buf, "]]", sizeof(buf));
- }
-
- size_t len = 1 + strlen(buf);
- result = malloc_zone_malloc(objc_debug_zone(), len);
- memcpy(result, buf, len);
- return result;
-
-#undef APPEND_SIZE
-}
-
-
-
-
-
-#endif
--- /dev/null
+/*
+ * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include "objc-private.h"
+
+#include "objc-config.h"
+#include "objc-auto.h"
+#include "objc-accessors.h"
+
+#ifndef OBJC_NO_GC
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <mach/mach.h>
+#include <mach-o/dyld.h>
+#include <mach-o/nlist.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <libkern/OSAtomic.h>
+#include <auto_zone.h>
+
+#include <Block_private.h>
+#include <dispatch/private.h>
+
+#include "objc-private.h"
+#include "objc-references.h"
+#include "maptable.h"
+#include "message.h"
+#include "objc-gdb.h"
+
+#if !defined(NDEBUG) && !__OBJC2__
+#include "objc-exception.h"
+#endif
+
+
+static auto_zone_t *gc_zone_init(void);
+static void gc_block_init(void);
+static void registeredClassTableInit(void);
+static BOOL objc_isRegisteredClass(Class candidate);
+
+int8_t UseGC = -1;
+static BOOL WantsMainThreadFinalization = NO;
+
+auto_zone_t *gc_zone = nil;
+
+
+/* Method prototypes */
+@interface DoesNotExist
+- (const char *)UTF8String;
+- (id)description;
+@end
+
+
+/***********************************************************************
+* Break-on-error functions
+**********************************************************************/
+
+BREAKPOINT_FUNCTION(
+ void objc_assign_ivar_error(id base, ptrdiff_t offset)
+);
+
+BREAKPOINT_FUNCTION(
+ void objc_assign_global_error(id value, id *slot)
+);
+
+BREAKPOINT_FUNCTION(
+ void objc_exception_during_finalize_error(void)
+);
+
+/***********************************************************************
+* Utility exports
+* Called by various libraries.
+**********************************************************************/
+
+OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) { // Old naming
+ if (UseGC) {
+ auto_collection_parameters(gc_zone)->collection_threshold = threshold;
+ }
+}
+
+OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) {
+ if (UseGC) {
+ auto_collection_parameters(gc_zone)->collection_threshold = threshold;
+ }
+}
+
+void objc_setCollectionRatio(size_t ratio) {
+ if (UseGC) {
+ auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
+ }
+}
+
+void objc_set_collection_ratio(size_t ratio) { // old naming
+ if (UseGC) {
+ auto_collection_parameters(gc_zone)->full_vs_gen_frequency = ratio;
+ }
+}
+
+void objc_finalizeOnMainThread(Class cls) {
+ if (UseGC) {
+ WantsMainThreadFinalization = YES;
+ cls->setShouldFinalizeOnMainThread();
+ }
+}
+
+// stack based data structure queued if/when there is main-thread-only finalization work TBD
+typedef struct BatchFinalizeBlock {
+ auto_zone_foreach_object_t foreach;
+ auto_zone_cursor_t cursor;
+ size_t cursor_size;
+ volatile BOOL finished;
+ volatile BOOL started;
+ struct BatchFinalizeBlock *next;
+} BatchFinalizeBlock_t;
+
+// The Main Thread Finalization Work Queue Head
+static struct {
+ pthread_mutex_t mutex;
+ pthread_cond_t condition;
+ BatchFinalizeBlock_t *head;
+ BatchFinalizeBlock_t *tail;
+} MainThreadWorkQ;
+
+
+void objc_startCollectorThread(void) {
+}
+
+void objc_start_collector_thread(void) {
+}
+
+static void batchFinalizeOnMainThread(void);
+
+void objc_collect(unsigned long options) {
+ if (!UseGC) return;
+ BOOL onMainThread = pthread_main_np() ? YES : NO;
+
+ // while we're here, sneak off and do some finalization work (if any)
+ if (onMainThread) batchFinalizeOnMainThread();
+ // now on with our normally scheduled programming
+ auto_zone_options_t amode = AUTO_ZONE_COLLECT_NO_OPTIONS;
+ if (!(options & OBJC_COLLECT_IF_NEEDED)) {
+ switch (options & 0x3) {
+ case OBJC_RATIO_COLLECTION: amode = AUTO_ZONE_COLLECT_RATIO_COLLECTION; break;
+ case OBJC_GENERATIONAL_COLLECTION: amode = AUTO_ZONE_COLLECT_GENERATIONAL_COLLECTION; break;
+ case OBJC_FULL_COLLECTION: amode = AUTO_ZONE_COLLECT_FULL_COLLECTION; break;
+ case OBJC_EXHAUSTIVE_COLLECTION: amode = AUTO_ZONE_COLLECT_EXHAUSTIVE_COLLECTION; break;
+ }
+ amode |= AUTO_ZONE_COLLECT_COALESCE;
+ amode |= AUTO_ZONE_COLLECT_LOCAL_COLLECTION;
+ }
+ if (options & OBJC_WAIT_UNTIL_DONE) {
+ __block BOOL done = NO;
+ // If executing on the main thread, use the main thread work queue condition to block,
+ // so main thread finalization can complete. Otherwise, use a thread-local condition.
+ pthread_mutex_t localMutex = PTHREAD_MUTEX_INITIALIZER, *mutex = &localMutex;
+ pthread_cond_t localCondition = PTHREAD_COND_INITIALIZER, *condition = &localCondition;
+ if (onMainThread) {
+ mutex = &MainThreadWorkQ.mutex;
+ condition = &MainThreadWorkQ.condition;
+ }
+ pthread_mutex_lock(mutex);
+ auto_zone_collect_and_notify(gc_zone, amode, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+ pthread_mutex_lock(mutex);
+ done = YES;
+ pthread_cond_signal(condition);
+ pthread_mutex_unlock(mutex);
+ });
+ while (!done) {
+ pthread_cond_wait(condition, mutex);
+ if (onMainThread && MainThreadWorkQ.head) {
+ pthread_mutex_unlock(mutex);
+ batchFinalizeOnMainThread();
+ pthread_mutex_lock(mutex);
+ }
+ }
+ pthread_mutex_unlock(mutex);
+ } else {
+ auto_zone_collect(gc_zone, amode);
+ }
+}
+
+
+// USED BY CF & ONE OTHER
+BOOL objc_isAuto(id object)
+{
+ return UseGC && auto_zone_is_valid_pointer(gc_zone, object) != 0;
+}
+
+
+BOOL objc_collectingEnabled(void)
+{
+ return UseGC;
+}
+
+BOOL objc_collecting_enabled(void) // Old naming
+{
+ return UseGC;
+}
+
+malloc_zone_t *objc_collectableZone(void) {
+ return gc_zone;
+}
+
+BOOL objc_dumpHeap(char *filenamebuffer, unsigned long length) {
+ static int counter = 0;
+ ++counter;
+ char buffer[1024];
+ sprintf(buffer, OBJC_HEAP_DUMP_FILENAME_FORMAT, getpid(), counter);
+ if (!_objc_dumpHeap(gc_zone, buffer)) return NO;
+ if (filenamebuffer) {
+ unsigned long blen = strlen(buffer);
+ if (blen < length)
+ strncpy(filenamebuffer, buffer, blen+1);
+ else if (length > 0)
+ filenamebuffer[0] = 0; // give some answer
+ }
+ return YES;
+}
+
+
+/***********************************************************************
+* Memory management.
+* Called by CF and Foundation.
+**********************************************************************/
+
+// Allocate an object in the GC zone, with the given number of extra bytes.
+id objc_allocate_object(Class cls, int extra)
+{
+ return class_createInstance(cls, extra);
+}
+
+
+/***********************************************************************
+* Write barrier implementations, optimized for when GC is known to be on
+* Called by the write barrier exports only.
+* These implementations assume GC is on. The exported function must
+* either perform the check itself or be conditionally stomped at
+* startup time.
+**********************************************************************/
+
+id objc_assign_strongCast_gc(id value, id *slot) {
+ if (!auto_zone_set_write_barrier(gc_zone, (void*)slot, value)) { // stores & returns true if slot points into GC allocated memory
+ auto_zone_root_write_barrier(gc_zone, slot, value); // always stores
+ }
+ return value;
+}
+
+id objc_assign_global_gc(id value, id *slot) {
+ // use explicit root registration.
+ if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
+ if (auto_zone_is_finalized(gc_zone, value)) {
+ _objc_inform("GC: storing an already collected object %p into global memory at %p, break on objc_assign_global_error to debug\n", (void*)value, slot);
+ objc_assign_global_error(value, slot);
+ }
+ auto_zone_add_root(gc_zone, slot, value);
+ }
+ else
+ *slot = value;
+
+ return value;
+}
+
+id objc_assign_threadlocal_gc(id value, id *slot)
+{
+ if (value && auto_zone_is_valid_pointer(gc_zone, value)) {
+ auto_zone_add_root(gc_zone, slot, value);
+ }
+ else {
+ *slot = value;
+ }
+
+ return value;
+}
+
+id objc_assign_ivar_gc(id value, id base, ptrdiff_t offset)
+{
+ id *slot = (id*) ((char *)base + offset);
+
+ if (value) {
+ if (!auto_zone_set_write_barrier(gc_zone, (char *)base + offset, value)) {
+ _objc_inform("GC: %p + %tu isn't in the auto_zone, break on objc_assign_ivar_error to debug.\n", (void*)base, offset);
+ objc_assign_ivar_error(base, offset);
+ }
+ }
+ else
+ *slot = value;
+
+ return value;
+}
+
+id objc_assign_strongCast_non_gc(id value, id *slot) {
+ return (*slot = value);
+}
+
+id objc_assign_global_non_gc(id value, id *slot) {
+ return (*slot = value);
+}
+
+id objc_assign_threadlocal_non_gc(id value, id *slot) {
+ return (*slot = value);
+}
+
+id objc_assign_ivar_non_gc(id value, id base, ptrdiff_t offset) {
+ id *slot = (id*) ((char *)base + offset);
+ return (*slot = value);
+}
+
+
+/***********************************************************************
+* Non-trivial write barriers
+**********************************************************************/
+
+void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+{
+ if (UseGC) {
+ return auto_zone_write_barrier_memmove(gc_zone, dst, src, size);
+ } else {
+ return memmove(dst, src, size);
+ }
+}
+
+BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) {
+ const BOOL issueMemoryBarrier = NO;
+ if (UseGC)
+ return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
+ else
+ return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
+}
+
+BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) {
+ const BOOL issueMemoryBarrier = YES;
+ if (UseGC)
+ return auto_zone_atomicCompareAndSwapPtr(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, issueMemoryBarrier);
+ else
+ return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
+}
+
+BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) {
+ const BOOL isGlobal = YES;
+ const BOOL issueMemoryBarrier = NO;
+ if (UseGC)
+ return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
+ else
+ return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
+}
+
+BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) {
+ const BOOL isGlobal = YES;
+ const BOOL issueMemoryBarrier = YES;
+ if (UseGC)
+ return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
+ else
+ return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
+}
+
+BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) {
+ const BOOL isGlobal = NO;
+ const BOOL issueMemoryBarrier = NO;
+ if (UseGC)
+ return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
+ else
+ return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
+}
+
+BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) {
+ const BOOL isGlobal = NO;
+ const BOOL issueMemoryBarrier = YES;
+ if (UseGC)
+ return auto_zone_atomicCompareAndSwap(gc_zone, (void *)predicate, (void *)replacement, (void * volatile *)objectLocation, isGlobal, issueMemoryBarrier);
+ else
+ return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation);
+}
+
+
+/***********************************************************************
+* Weak ivar support
+**********************************************************************/
+
+id objc_read_weak_gc(id *location) {
+ id result = *location;
+ if (result) {
+ result = (id)auto_read_weak_reference(gc_zone, (void **)location);
+ }
+ return result;
+}
+
+id objc_read_weak_non_gc(id *location) {
+ return *location;
+}
+
+id objc_assign_weak_gc(id value, id *location) {
+ auto_assign_weak_reference(gc_zone, value, (const void **)location, nil);
+ return value;
+}
+
+id objc_assign_weak_non_gc(id value, id *location) {
+ return (*location = value);
+}
+
+
+void gc_fixup_weakreferences(id newObject, id oldObject) {
+ // fix up weak references if any.
+ const unsigned char *weakLayout = (const unsigned char *)class_getWeakIvarLayout(newObject->ISA());
+ if (weakLayout) {
+ void **newPtr = (void **)newObject, **oldPtr = (void **)oldObject;
+ unsigned char byte;
+ while ((byte = *weakLayout++)) {
+ unsigned skips = (byte >> 4);
+ unsigned weaks = (byte & 0x0F);
+ newPtr += skips, oldPtr += skips;
+ while (weaks--) {
+ *newPtr = nil;
+ auto_assign_weak_reference(gc_zone, auto_read_weak_reference(gc_zone, oldPtr), (const void **)newPtr, nil);
+ ++newPtr, ++oldPtr;
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* dyld resolver functions for basic GC write barriers
+* dyld calls the resolver function to bind the symbol.
+* We return the GC or non-GC variant as appropriate.
+**********************************************************************/
+
+#define GC_RESOLVER(name) \
+ OBJC_EXPORT void *name##_resolver(void) __asm__("_" #name); \
+ void *name##_resolver(void) \
+ { \
+ __asm__(".symbol_resolver _" #name); \
+ if (UseGC) return (void*)name##_gc; \
+ else return (void*)name##_non_gc; \
+ }
+
+GC_RESOLVER(objc_assign_ivar)
+GC_RESOLVER(objc_assign_strongCast)
+GC_RESOLVER(objc_assign_global)
+GC_RESOLVER(objc_assign_threadlocal)
+GC_RESOLVER(objc_read_weak)
+GC_RESOLVER(objc_assign_weak)
+GC_RESOLVER(objc_getProperty)
+GC_RESOLVER(objc_setProperty)
+GC_RESOLVER(objc_getAssociatedObject)
+GC_RESOLVER(objc_setAssociatedObject)
+GC_RESOLVER(_object_addExternalReference)
+GC_RESOLVER(_object_readExternalReference)
+GC_RESOLVER(_object_removeExternalReference)
+
+
+/***********************************************************************
+* Testing tools
+* Used to isolate resurrection of garbage objects during finalization.
+**********************************************************************/
+BOOL objc_is_finalized(void *ptr) {
+ if (ptr != nil && UseGC) {
+ return auto_zone_is_finalized(gc_zone, ptr);
+ }
+ return NO;
+}
+
+
+/***********************************************************************
+* Stack clearing.
+* Used by top-level thread loops to reduce false pointers from the stack.
+**********************************************************************/
+void objc_clear_stack(unsigned long options) {
+ if (!UseGC) return;
+ auto_zone_clear_stack(gc_zone, 0);
+}
+
+
+/***********************************************************************
+* Finalization support
+**********************************************************************/
+
+// Finalizer crash debugging
+static void *finalizing_object;
+
+// finalize a single object without fuss
+// When there are no main-thread-only classes this is used directly
+// Otherwise, it is used indirectly by smarter code that knows main-thread-affinity requirements
+static void finalizeOneObject(void *obj, void *ignored) {
+ id object = (id)obj;
+ finalizing_object = obj;
+
+ Class cls = object->ISA();
+ CRSetCrashLogMessage2(class_getName(cls));
+
+ /// call -finalize method.
+ ((void(*)(id, SEL))objc_msgSend)(object, @selector(finalize));
+
+ // Call C++ destructors.
+ // This would be objc_destructInstance() but for performance.
+ if (cls->hasCxxDtor()) {
+ object_cxxDestruct(object);
+ }
+
+ finalizing_object = nil;
+ CRSetCrashLogMessage2(nil);
+}
+
+// finalize object only if it is a main-thread-only object.
+// Called only from the main thread.
+static void finalizeOneMainThreadOnlyObject(void *obj, void *arg) {
+ id object = (id)obj;
+ Class cls = object->ISA();
+ if (cls == nil) {
+ _objc_fatal("object with nil ISA passed to finalizeOneMainThreadOnlyObject: %p\n", obj);
+ }
+ if (cls->shouldFinalizeOnMainThread()) {
+ finalizeOneObject(obj, nil);
+ }
+}
+
+// finalize one object only if it is not a main-thread-only object
+// called from any other thread than the main thread
+// Important: if a main-thread-only object is passed, return that fact in the needsMain argument
+static void finalizeOneAnywhereObject(void *obj, void *needsMain) {
+ id object = (id)obj;
+ Class cls = object->ISA();
+ bool *needsMainThreadWork = (bool *)needsMain;
+ if (cls == nil) {
+ _objc_fatal("object with nil ISA passed to finalizeOneAnywhereObject: %p\n", obj);
+ }
+ if (!cls->shouldFinalizeOnMainThread()) {
+ finalizeOneObject(obj, nil);
+ }
+ else {
+ *needsMainThreadWork = true;
+ }
+}
+
+
+// Utility workhorse.
+// Set up the expensive @try block and ask the collector to hand the next object to
+// our finalizeAnObject function.
+// Track and return a boolean that records whether or not any main thread work is necessary.
+// (When we know that there are no main thread only objects then the boolean isn't even computed)
+static bool batchFinalize(auto_zone_t *zone,
+ auto_zone_foreach_object_t foreach,
+ auto_zone_cursor_t cursor,
+ size_t cursor_size,
+ void (*finalizeAnObject)(void *, void*))
+{
+#if !defined(NDEBUG) && !__OBJC2__
+ // debug: don't call try/catch before exception handlers are installed
+ objc_exception_functions_t table = {};
+ objc_exception_get_functions(&table);
+ assert(table.throw_exc);
+#endif
+
+ bool needsMainThreadWork = false;
+ for (;;) {
+ @try {
+ foreach(cursor, finalizeAnObject, &needsMainThreadWork);
+ // non-exceptional return means finalization is complete.
+ break;
+ }
+ @catch (id exception) {
+ // whoops, note exception, then restart at cursor's position
+ _objc_inform("GC: -finalize resulted in an exception (%p) being thrown, break on objc_exception_during_finalize_error to debug\n\t%s", exception, (const char*)[[exception description] UTF8String]);
+ objc_exception_during_finalize_error();
+ }
+ @catch (...) {
+ // whoops, note exception, then restart at cursor's position
+ _objc_inform("GC: -finalize resulted in an exception being thrown, break on objc_exception_during_finalize_error to debug");
+ objc_exception_during_finalize_error();
+ }
+ }
+ return needsMainThreadWork;
+}
+
+// Called on main thread-only.
+// Pick up work from global queue.
+// called parasitically by anyone requesting a collection
+// called explicitly when there is known to be main thread only finalization work
+// In both cases we are on the main thread
+// Guard against recursion by something called from a finalizer
+static void batchFinalizeOnMainThread() {
+ pthread_mutex_lock(&MainThreadWorkQ.mutex);
+ if (!MainThreadWorkQ.head || MainThreadWorkQ.head->started) {
+ // No work or we're already here
+ pthread_mutex_unlock(&MainThreadWorkQ.mutex);
+ return;
+ }
+ while (MainThreadWorkQ.head) {
+ BatchFinalizeBlock_t *bfb = MainThreadWorkQ.head;
+ bfb->started = YES;
+ pthread_mutex_unlock(&MainThreadWorkQ.mutex);
+
+ batchFinalize(gc_zone, bfb->foreach, bfb->cursor, bfb->cursor_size, finalizeOneMainThreadOnlyObject);
+ // signal the collector thread(s) that finalization has finished.
+ pthread_mutex_lock(&MainThreadWorkQ.mutex);
+ bfb->finished = YES;
+ pthread_cond_broadcast(&MainThreadWorkQ.condition);
+ MainThreadWorkQ.head = bfb->next;
+ }
+ MainThreadWorkQ.tail = nil;
+ pthread_mutex_unlock(&MainThreadWorkQ.mutex);
+}
+
+
+// Knowing that we possibly have main thread only work to do, first process everything
+// that is not main-thread-only. If we discover main thread only work, queue a work block
+// to the main thread that will do just the main thread only work. Wait for it.
+// Called from a non main thread.
+static void batchFinalizeOnTwoThreads(auto_zone_t *zone,
+ auto_zone_foreach_object_t foreach,
+ auto_zone_cursor_t cursor,
+ size_t cursor_size)
+{
+ // First, lets get rid of everything we can on this thread, then ask main thread to help if needed
+ char cursor_copy[cursor_size];
+ memcpy(cursor_copy, cursor, cursor_size);
+ bool needsMainThreadFinalization = batchFinalize(zone, foreach, (auto_zone_cursor_t)cursor_copy, cursor_size, finalizeOneAnywhereObject);
+
+ if (! needsMainThreadFinalization)
+ return; // no help needed
+
+ // set up the control block. Either our ping of main thread with _callOnMainThread will get to it, or
+ // an objc_collect(if_needed) will get to it. Either way, this block will be processed on the main thread.
+ BatchFinalizeBlock_t bfb;
+ bfb.foreach = foreach;
+ bfb.cursor = cursor;
+ bfb.cursor_size = cursor_size;
+ bfb.started = NO;
+ bfb.finished = NO;
+ bfb.next = nil;
+ pthread_mutex_lock(&MainThreadWorkQ.mutex);
+ if (MainThreadWorkQ.tail) {
+
+ // link to end so that ordering of finalization is preserved.
+ MainThreadWorkQ.tail->next = &bfb;
+ MainThreadWorkQ.tail = &bfb;
+ }
+ else {
+ MainThreadWorkQ.head = &bfb;
+ MainThreadWorkQ.tail = &bfb;
+ }
+ pthread_mutex_unlock(&MainThreadWorkQ.mutex);
+
+ //printf("----->asking main thread to finalize\n");
+ dispatch_async(dispatch_get_main_queue(), ^{ batchFinalizeOnMainThread(); });
+
+ // wait for the main thread to finish finalizing instances of classes marked CLS_FINALIZE_ON_MAIN_THREAD.
+ pthread_mutex_lock(&MainThreadWorkQ.mutex);
+ while (!bfb.finished) {
+ // the main thread might be blocked waiting for a synchronous collection to complete, so wake it here
+ pthread_cond_signal(&MainThreadWorkQ.condition);
+ pthread_cond_wait(&MainThreadWorkQ.condition, &MainThreadWorkQ.mutex);
+ }
+ pthread_mutex_unlock(&MainThreadWorkQ.mutex);
+ //printf("<------ main thread finalize done\n");
+
+}
+
+
+
+// collector calls this with garbage ready
+// thread collectors, too, so this needs to be thread-safe
+static void BatchInvalidate(auto_zone_t *zone,
+ auto_zone_foreach_object_t foreach,
+ auto_zone_cursor_t cursor,
+ size_t cursor_size)
+{
+ if (pthread_main_np() || !WantsMainThreadFinalization) {
+ // Collect all objects. We're either pre-multithreaded on main thread or we're on the collector thread
+ // but no main-thread-only objects have been allocated.
+ batchFinalize(zone, foreach, cursor, cursor_size, finalizeOneObject);
+ }
+ else {
+ // We're on the dedicated thread. Collect some on main thread, the rest here.
+ batchFinalizeOnTwoThreads(zone, foreach, cursor, cursor_size);
+ }
+
+}
+
+
+/*
+ * Zombie support
+ * Collector calls into this system when it finds resurrected objects.
+ * This keeps them pitifully alive and leaked, even if they reference garbage.
+ */
+
+// idea: keep a side table mapping resurrected object pointers to their original Class, so we don't
+// need to smash anything. alternatively, could use associative references to track against a secondary
+// object with information about the resurrection, such as a stack crawl, etc.
+
+static Class _NSResurrectedObjectClass;
+static NXMapTable *_NSResurrectedObjectMap = nil;
+static pthread_mutex_t _NSResurrectedObjectLock = PTHREAD_MUTEX_INITIALIZER;
+
+static Class resurrectedObjectOriginalClass(id object) {
+ Class originalClass;
+ pthread_mutex_lock(&_NSResurrectedObjectLock);
+ originalClass = (Class) NXMapGet(_NSResurrectedObjectMap, object);
+ pthread_mutex_unlock(&_NSResurrectedObjectLock);
+ return originalClass;
+}
+
+static id _NSResurrectedObject_classMethod(id self, SEL selector) { return self; }
+
+static id _NSResurrectedObject_instanceMethod(id self, SEL name) {
+ _objc_inform("**resurrected** object %p of class %s being sent message '%s'\n", (void*)self, class_getName(resurrectedObjectOriginalClass(self)), sel_getName(name));
+ return self;
+}
+
+static void _NSResurrectedObject_finalize(id self, SEL _cmd) {
+ Class originalClass;
+ pthread_mutex_lock(&_NSResurrectedObjectLock);
+ originalClass = (Class) NXMapRemove(_NSResurrectedObjectMap, self);
+ pthread_mutex_unlock(&_NSResurrectedObjectLock);
+ if (originalClass) _objc_inform("**resurrected** object %p of class %s being finalized\n", (void*)self, class_getName(originalClass));
+ _objc_rootFinalize(self);
+}
+
+static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
+ class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
+ return YES;
+}
+
+static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
+ class_addMethod(self->ISA(), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
+ return YES;
+}
+
+static void _NSResurrectedObject_initialize() {
+ _NSResurrectedObjectMap = NXCreateMapTable(NXPtrValueMapPrototype, 128);
+ _NSResurrectedObjectClass = objc_allocateClassPair(objc_getClass("NSObject"), "_NSResurrectedObject", 0);
+ class_addMethod(_NSResurrectedObjectClass, @selector(finalize), (IMP)_NSResurrectedObject_finalize, "v@:");
+ Class metaClass = _NSResurrectedObjectClass->ISA();
+ class_addMethod(metaClass, @selector(resolveInstanceMethod:), (IMP)_NSResurrectedObject_resolveInstanceMethod, "c@::");
+ class_addMethod(metaClass, @selector(resolveClassMethod:), (IMP)_NSResurrectedObject_resolveClassMethod, "c@::");
+ objc_registerClassPair(_NSResurrectedObjectClass);
+}
+
+static void resurrectZombie(auto_zone_t *zone, void *ptr) {
+ id object = (id) ptr;
+ Class cls = object->ISA();
+ if (cls != _NSResurrectedObjectClass) {
+ // remember the original class for this instance.
+ pthread_mutex_lock(&_NSResurrectedObjectLock);
+ NXMapInsert(_NSResurrectedObjectMap, ptr, cls);
+ pthread_mutex_unlock(&_NSResurrectedObjectLock);
+ object_setClass(object, _NSResurrectedObjectClass);
+ }
+}
+
+/***********************************************************************
+* Pretty printing support
+* For development purposes.
+**********************************************************************/
+
+
+static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount);
+
+static char* objc_name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset)
+{
+ return name_for_address(zone, base, offset, false);
+}
+
+static const char* objc_name_for_object(auto_zone_t *zone, void *object) {
+ Class cls = *(Class *)object;
+ if (!objc_isRegisteredClass(cls)) return "";
+ return class_getName(cls);
+}
+
+/***********************************************************************
+* Collection support
+**********************************************************************/
+
+static BOOL objc_isRegisteredClass(Class candidate);
+
+static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address) {
+ id object = (id)address;
+ volatile void *clsptr = (void*)object->ISA();
+ Class cls = (Class)clsptr;
+ return objc_isRegisteredClass(cls) ? _object_getIvarLayout(cls, object) : nil;
+}
+
+static const unsigned char *objc_weak_layout_for_address(auto_zone_t *zone, void *address) {
+ id object = (id)address;
+ volatile void *clsptr = (void*)object->ISA();
+ Class cls = (Class)clsptr;
+ return objc_isRegisteredClass(cls) ? class_getWeakIvarLayout(cls) : nil;
+}
+
+void gc_register_datasegment(uintptr_t base, size_t size) {
+ auto_zone_register_datasegment(gc_zone, (void*)base, size);
+}
+
+void gc_unregister_datasegment(uintptr_t base, size_t size) {
+ auto_zone_unregister_datasegment(gc_zone, (void*)base, size);
+}
+
+#define countof(array) (sizeof(array) / sizeof(array[0]))
+
+
+/***********************************************************************
+* Initialization
+**********************************************************************/
+
+static void objc_will_grow(auto_zone_t *zone, auto_heap_growth_info_t info) {
+ if (auto_zone_is_collecting(gc_zone)) {
+ ;
+ }
+ else {
+ auto_zone_collect(gc_zone, AUTO_ZONE_COLLECT_COALESCE|AUTO_ZONE_COLLECT_RATIO_COLLECTION);
+ }
+}
+
+
+static auto_zone_t *gc_zone_init(void)
+{
+ auto_zone_t *result;
+ static int didOnce = 0;
+ if (!didOnce) {
+ didOnce = 1;
+
+ // initialize the batch finalization queue
+ MainThreadWorkQ.head = nil;
+ MainThreadWorkQ.tail = nil;
+ pthread_mutex_init(&MainThreadWorkQ.mutex, nil);
+ pthread_cond_init(&MainThreadWorkQ.condition, nil);
+ }
+
+ result = auto_zone_create("auto_zone");
+
+ auto_zone_disable_compaction(result);
+
+ auto_collection_control_t *control = auto_collection_parameters(result);
+
+ // set up the magic control parameters
+ control->batch_invalidate = BatchInvalidate;
+ control->will_grow = objc_will_grow;
+ control->resurrect = resurrectZombie;
+ control->layout_for_address = objc_layout_for_address;
+ control->weak_layout_for_address = objc_weak_layout_for_address;
+ control->name_for_address = objc_name_for_address;
+
+ if (control->version >= sizeof(auto_collection_control_t)) {
+ control->name_for_object = objc_name_for_object;
+ }
+
+ return result;
+}
+
+
+/* should be defined in /usr/local/include/libdispatch_private.h. */
+extern void (*dispatch_begin_thread_4GC)(void);
+extern void (*dispatch_end_thread_4GC)(void);
+
+static void objc_reapThreadLocalBlocks()
+{
+ if (UseGC) auto_zone_reap_all_local_blocks(gc_zone);
+}
+
+void objc_registerThreadWithCollector()
+{
+ if (UseGC) auto_zone_register_thread(gc_zone);
+}
+
+void objc_unregisterThreadWithCollector()
+{
+ if (UseGC) auto_zone_unregister_thread(gc_zone);
+}
+
+void objc_assertRegisteredThreadWithCollector()
+{
+ if (UseGC) auto_zone_assert_thread_registered(gc_zone);
+}
+
+// Always called by _objcInit, even if GC is off.
+void gc_init(BOOL wantsGC)
+{
+ assert(UseGC == -1);
+ UseGC = wantsGC;
+
+ if (PrintGC) {
+ _objc_inform("GC: is %s", wantsGC ? "ON" : "OFF");
+ }
+
+ if (UseGC) {
+ // Set up the GC zone
+ gc_zone = gc_zone_init();
+
+ // tell libdispatch to register its threads with the GC.
+ dispatch_begin_thread_4GC = objc_registerThreadWithCollector;
+ dispatch_end_thread_4GC = objc_reapThreadLocalBlocks;
+
+ // set up the registered classes list
+ registeredClassTableInit();
+
+ // tell Blocks to use collectable memory. CF will cook up the classes separately.
+ gc_block_init();
+
+ // Add GC state to crash log reports
+ _objc_inform_on_crash("garbage collection is ON");
+ }
+}
+
+
+// Called by NSObject +load to perform late GC setup
+// This work must wait until after all of libSystem initializes.
+void gc_init2(void)
+{
+ assert(UseGC);
+
+ // create the _NSResurrectedObject class used to track resurrections.
+ _NSResurrectedObject_initialize();
+
+ // tell libauto to set up its dispatch queues
+ auto_collect_multithreaded(gc_zone);
+}
+
+// Called by Foundation.
+// This function used to initialize NSObject stuff, but now does nothing.
+malloc_zone_t *objc_collect_init(int (*callback)(void) __unused)
+{
+ return (malloc_zone_t *)gc_zone;
+}
+
+/*
+ * Support routines for the Block implementation
+ */
+
+
+// The Block runtime now needs to sometimes allocate a Block that is an Object - namely
+// when it neesd to have a finalizer which, for now, is only if there are C++ destructors
+// in the helper function. Hence the isObject parameter.
+// Under GC a -copy message should allocate a refcount 0 block, ergo the isOne parameter.
+static void *block_gc_alloc5(const unsigned long size, const bool isOne, const bool isObject) {
+ auto_memory_type_t type = isObject ? (AUTO_OBJECT|AUTO_MEMORY_SCANNED) : AUTO_MEMORY_SCANNED;
+ return auto_zone_allocate_object(gc_zone, size, type, isOne, false);
+}
+
+// The Blocks runtime keeps track of everything above 1 and so it only calls
+// up to the collector to tell it about the 0->1 transition and then the 1->0 transition
+static void block_gc_setHasRefcount(const void *block, const bool hasRefcount) {
+ if (hasRefcount)
+ auto_zone_retain(gc_zone, (void *)block);
+ else
+ auto_zone_release(gc_zone, (void *)block);
+}
+
+static void block_gc_memmove(void *dst, void *src, unsigned long size) {
+ auto_zone_write_barrier_memmove(gc_zone, dst, src, (size_t)size);
+}
+
+static void gc_block_init(void) {
+ _Block_use_GC(
+ block_gc_alloc5,
+ block_gc_setHasRefcount,
+ (void (*)(void *, void **))objc_assign_strongCast_gc,
+ (void (*)(const void *, void *))objc_assign_weak,
+ block_gc_memmove
+ );
+}
+
+
+/***********************************************************************
+* Track classes.
+* In addition to the global class hashtable (set) indexed by name, we
+* also keep one based purely by pointer when running under Garbage Collection.
+* This allows the background collector to race against objects recycled from TLC.
+* Specifically, the background collector can read the admin byte and see that
+* a thread local object is an object, get scheduled out, and the TLC recovers it,
+* linking it into the cache, then the background collector reads the isa field and
+* finds linkage info. By qualifying all isa fields read we avoid this.
+**********************************************************************/
+
+// This is a self-contained hash table of all classes. The first two elements contain the (size-1) and count.
+static volatile Class *AllClasses = nil;
+
+#define SHIFT 3
+#define INITIALSIZE 512
+#define REMOVED ~0ul
+
+// Allocate the side table.
+static void registeredClassTableInit() {
+ assert(UseGC);
+ // allocate a collectable (refcount 0) zeroed hunk of unscanned memory
+ uintptr_t *table = (uintptr_t *)auto_zone_allocate_object(gc_zone, INITIALSIZE*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true);
+ // set initial capacity (as mask)
+ table[0] = INITIALSIZE - 1;
+ // set initial count
+ table[1] = 0;
+ AllClasses = (Class *)table;
+}
+
+// Verify that a particular pointer is to a class.
+// Safe from any thread anytime
+static BOOL objc_isRegisteredClass(Class candidate) {
+ assert(UseGC);
+ // nil is never a valid ISA.
+ if (candidate == nil) return NO;
+ // We don't care about a race with another thread adding a class to which we randomly might have a pointer
+ // Get local copy of classes so that we're immune from updates.
+ // We keep the size of the list as the first element so there is no race as the list & size get updated.
+ uintptr_t *allClasses = (uintptr_t *)AllClasses;
+ // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
+ // Slot 1 is count
+ uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & allClasses[0];
+ // avoid slot 0 and 1
+ if (slot < 2) slot = 2;
+ for(;;) {
+ long int slotValue = allClasses[slot];
+ if (slotValue == (long int)candidate) {
+ return YES;
+ }
+ if (slotValue == 0) {
+ return NO;
+ }
+ ++slot;
+ if (slot > allClasses[0])
+ slot = 2; // skip size, count
+ }
+}
+
+// Utility used when growing
+// Assumes lock held
+static void addClassHelper(uintptr_t *table, uintptr_t candidate) {
+ uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
+ if (slot < 2) slot = 2;
+ for(;;) {
+ uintptr_t slotValue = table[slot];
+ if (slotValue == 0) {
+ table[slot] = candidate;
+ ++table[1];
+ return;
+ }
+ ++slot;
+ if (slot > table[0])
+ slot = 2; // skip size, count
+ }
+}
+
+// lock held by callers
+void objc_addRegisteredClass(Class candidate) {
+ if (!UseGC) return;
+ uintptr_t *table = (uintptr_t *)AllClasses;
+ // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
+ // Slot 1 is count - always non-zero
+ uintptr_t slot = (((long int)candidate) >> SHIFT) & table[0];
+ if (slot < 2) slot = 2;
+ for(;;) {
+ uintptr_t slotValue = table[slot];
+ assert(slotValue != (uintptr_t)candidate);
+ if (slotValue == REMOVED) {
+ table[slot] = (long)candidate;
+ return;
+ }
+ else if (slotValue == 0) {
+ table[slot] = (long)candidate;
+ if (2*++table[1] > table[0]) { // add to count; check if we cross 50% utilization
+ // grow
+ uintptr_t oldSize = table[0]+1;
+ uintptr_t *newTable = (uintptr_t *)auto_zone_allocate_object(gc_zone, oldSize*2*sizeof(void *), AUTO_MEMORY_UNSCANNED, true, true);
+ uintptr_t i;
+ newTable[0] = 2*oldSize - 1;
+ newTable[1] = 0;
+ for (i = 2; i < oldSize; ++i) {
+ if (table[i] && table[i] != REMOVED)
+ addClassHelper(newTable, table[i]);
+ }
+ AllClasses = (Class *)newTable;
+ // let the old table be collected when other threads are no longer reading it.
+ auto_zone_release(gc_zone, (void *)table);
+ }
+ return;
+ }
+ ++slot;
+ if (slot > table[0])
+ slot = 2; // skip size, count
+ }
+}
+
+// lock held by callers
+void objc_removeRegisteredClass(Class candidate) {
+ if (!UseGC) return;
+ uintptr_t *table = (uintptr_t *)AllClasses;
+ // Slot 0 is always the size of the list in log 2 masked terms (e.g. size - 1) where size is always power of 2
+ // Slot 1 is count - always non-zero
+ uintptr_t slot = (((uintptr_t)candidate) >> SHIFT) & table[0];
+ if (slot < 2) slot = 2;
+ for(;;) {
+ uintptr_t slotValue = table[slot];
+ if (slotValue == (uintptr_t)candidate) {
+ table[slot] = REMOVED; // if next slot == 0 we could set to 0 here and decr count
+ return;
+ }
+ assert(slotValue != 0);
+ ++slot;
+ if (slot > table[0])
+ slot = 2; // skip size, count
+ }
+}
+
+
+/***********************************************************************
+* Debugging - support for smart printouts when errors occur
+**********************************************************************/
+
+
+static malloc_zone_t *objc_debug_zone(void)
+{
+ static malloc_zone_t *z = nil;
+ if (!z) {
+ z = malloc_create_zone(PAGE_SIZE, 0);
+ malloc_set_zone_name(z, "objc-auto debug");
+ }
+ return z;
+}
+
+static char *_malloc_append_unsigned(uintptr_t value, unsigned base, char *head) {
+ if (!value) {
+ head[0] = '0';
+ } else {
+ if (value >= base) head = _malloc_append_unsigned(value / base, base, head);
+ value = value % base;
+ head[0] = (value < 10) ? '0' + value : 'a' + value - 10;
+ }
+ return head+1;
+}
+
+static void strlcati(char *str, uintptr_t value, size_t bufSize)
+{
+ if ( (bufSize - strlen(str)) < 30)
+ return;
+ str = _malloc_append_unsigned(value, 10, str + strlen(str));
+ str[0] = '\0';
+}
+
+
+static Ivar ivar_for_offset(Class cls, vm_address_t offset)
+{
+ unsigned i;
+ vm_address_t ivar_offset;
+ Ivar super_ivar, result;
+ Ivar *ivars;
+ unsigned int ivar_count;
+
+ if (!cls) return nil;
+
+ // scan base classes FIRST
+ super_ivar = ivar_for_offset(cls->superclass, offset);
+ // result is best-effort; our ivars may be closer
+
+ ivars = class_copyIvarList(cls, &ivar_count);
+ if (ivars && ivar_count) {
+ // Try our first ivar. If it's too big, use super's best ivar.
+ // (lose 64-bit precision)
+ ivar_offset = ivar_getOffset(ivars[0]);
+ if (ivar_offset > offset) result = super_ivar;
+ else if (ivar_offset == offset) result = ivars[0];
+ else result = nil;
+
+ // Try our other ivars. If any is too big, use the previous.
+ for (i = 1; result == nil && i < ivar_count; i++) {
+ ivar_offset = ivar_getOffset(ivars[i]);
+ if (ivar_offset == offset) {
+ result = ivars[i];
+ } else if (ivar_offset > offset) {
+ result = ivars[i - 1];
+ }
+ }
+
+ // Found nothing. Return our last ivar.
+ if (result == nil)
+ result = ivars[ivar_count - 1];
+
+ free(ivars);
+ } else {
+ result = super_ivar;
+ }
+
+ return result;
+}
+
+static void append_ivar_at_offset(char *buf, Class cls, vm_address_t offset, size_t bufSize)
+{
+ Ivar ivar = nil;
+
+ if (offset == 0) return; // don't bother with isa
+ if (offset >= class_getInstanceSize(cls)) {
+ strlcat(buf, ".<extra>+", bufSize);
+ strlcati(buf, offset, bufSize);
+ return;
+ }
+
+ ivar = ivar_for_offset(cls, offset);
+ if (!ivar) {
+ strlcat(buf, ".<?>", bufSize);
+ return;
+ }
+
+ // fixme doesn't handle structs etc.
+
+ strlcat(buf, ".", bufSize);
+ const char *ivar_name = ivar_getName(ivar);
+ if (ivar_name) strlcat(buf, ivar_name, bufSize);
+ else strlcat(buf, "<anonymous ivar>", bufSize);
+
+ offset -= ivar_getOffset(ivar);
+ if (offset > 0) {
+ strlcat(buf, "+", bufSize);
+ strlcati(buf, offset, bufSize);
+ }
+}
+
+
+static const char *cf_class_for_object(void *cfobj)
+{
+ // ick - we don't link against CF anymore
+
+ struct fake_cfclass {
+ size_t version;
+ const char *className;
+ // don't care about the rest
+ };
+
+ const char *result;
+ void *dlh;
+ size_t (*CFGetTypeID)(void *);
+ fake_cfclass * (*_CFRuntimeGetClassWithTypeID)(size_t);
+
+ result = "anonymous_NSCFType";
+
+ dlh = dlopen("/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation", RTLD_LAZY | RTLD_NOLOAD | RTLD_FIRST);
+ if (!dlh) return result;
+
+ CFGetTypeID = (size_t(*)(void*)) dlsym(dlh, "CFGetTypeID");
+ _CFRuntimeGetClassWithTypeID = (fake_cfclass*(*)(size_t)) dlsym(dlh, "_CFRuntimeGetClassWithTypeID");
+
+ if (CFGetTypeID && _CFRuntimeGetClassWithTypeID) {
+ size_t cfid = (*CFGetTypeID)(cfobj);
+ result = (*_CFRuntimeGetClassWithTypeID)(cfid)->className;
+ }
+
+ dlclose(dlh);
+ return result;
+}
+
+
+static char *name_for_address(auto_zone_t *zone, vm_address_t base, vm_address_t offset, int withRetainCount)
+{
+#define APPEND_SIZE(s) \
+ strlcat(buf, "[", sizeof(buf)); \
+ strlcati(buf, s, sizeof(buf)); \
+ strlcat(buf, "]", sizeof(buf));
+
+ char buf[1500];
+ char *result;
+
+ buf[0] = '\0';
+
+ size_t size =
+ auto_zone_size(zone, (void *)base);
+ auto_memory_type_t type = size ?
+ auto_zone_get_layout_type(zone, (void *)base) : AUTO_TYPE_UNKNOWN;
+ unsigned int refcount = size ?
+ auto_zone_retain_count(zone, (void *)base) : 0;
+
+ switch (type) {
+ case AUTO_OBJECT_SCANNED:
+ case AUTO_OBJECT_UNSCANNED:
+ case AUTO_OBJECT_ALL_POINTERS: {
+ const char *class_name = object_getClassName((id)base);
+ if ((0 == strcmp(class_name, "__NSCFType")) || (0 == strcmp(class_name, "NSCFType"))) {
+ strlcat(buf, cf_class_for_object((void *)base), sizeof(buf));
+ } else {
+ strlcat(buf, class_name, sizeof(buf));
+ }
+ if (offset) {
+ append_ivar_at_offset(buf, ((id)base)->ISA(), offset, sizeof(buf));
+ }
+ APPEND_SIZE(size);
+ break;
+ }
+ case AUTO_MEMORY_SCANNED:
+ strlcat(buf, "{conservative-block}", sizeof(buf));
+ APPEND_SIZE(size);
+ break;
+ case AUTO_MEMORY_UNSCANNED:
+ strlcat(buf, "{no-pointers-block}", sizeof(buf));
+ APPEND_SIZE(size);
+ break;
+ case AUTO_MEMORY_ALL_POINTERS:
+ strlcat(buf, "{all-pointers-block}", sizeof(buf));
+ APPEND_SIZE(size);
+ break;
+ case AUTO_MEMORY_ALL_WEAK_POINTERS:
+ strlcat(buf, "{all-weak-pointers-block}", sizeof(buf));
+ APPEND_SIZE(size);
+ break;
+ case AUTO_TYPE_UNKNOWN:
+ strlcat(buf, "{uncollectable-memory}", sizeof(buf));
+ break;
+ default:
+ strlcat(buf, "{unknown-memory-type}", sizeof(buf));
+ }
+
+ if (withRetainCount && refcount > 0) {
+ strlcat(buf, " [[refcount=", sizeof(buf));
+ strlcati(buf, refcount, sizeof(buf));
+ strlcat(buf, "]]", sizeof(buf));
+ }
+
+ size_t len = 1 + strlen(buf);
+ result = (char *)malloc_zone_malloc(objc_debug_zone(), len);
+ memcpy(result, buf, len);
+ return result;
+
+#undef APPEND_SIZE
+}
+
+
+
+
+
+#endif
vm_address_t dataAddress;
// make sure certain assumptions are met
- assert(PAGE_SIZE == 4096);
assert(sizeof(TrampolineBlockPagePair) == 2*PAGE_SIZE);
assert(_slotSize() == 8);
assert(_headerSize() >= TRAMPOLINE_PAGE_PAIR_HEADER_SIZE);
TrampolineBlockPagePair *headPagePair = headPagePairs[aMode];
if (headPagePair) {
- assert(headPagePair->nextAvailablePage == NULL);
+ assert(headPagePair->nextAvailablePage == nil);
}
int i;
kern_return_t result = KERN_FAILURE;
for(i = 0; i < 5; i++) {
- result = vm_allocate(mach_task_self(), &dataAddress, PAGE_SIZE * 2, TRUE);
+ result = vm_allocate(mach_task_self(), &dataAddress, PAGE_SIZE * 2,
+ TRUE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION));
if (result != KERN_SUCCESS) {
mach_error("vm_allocate failed", result);
- return NULL;
+ return nil;
}
vm_address_t codeAddress = dataAddress + PAGE_SIZE;
result = vm_deallocate(mach_task_self(), codeAddress, PAGE_SIZE);
if (result != KERN_SUCCESS) {
mach_error("vm_deallocate failed", result);
- return NULL;
+ return nil;
}
uintptr_t codePage;
result = vm_deallocate(mach_task_self(), dataAddress, PAGE_SIZE);
if (result != KERN_SUCCESS) {
mach_error("vm_deallocate for retry failed.", result);
- return NULL;
+ return nil;
}
} else
break;
}
if (result != KERN_SUCCESS)
- return NULL;
+ return nil;
TrampolineBlockPagePair *pagePair = (TrampolineBlockPagePair *) dataAddress;
pagePair->nextAvailable = _paddingSlotCount();
- pagePair->nextPagePair = NULL;
- pagePair->nextAvailablePage = NULL;
+ pagePair->nextPagePair = nil;
+ pagePair->nextAvailablePage = nil;
id *lastPageBlockPtr = _payloadAddressAtIndex(pagePair, _slotsPerPagePair() - 1);
*lastPageBlockPtr = (id)(uintptr_t) LAST_SLOT_MARKER;
}
}
- return NULL;
+ return nil;
}
// `block` must already have been copied
uint32_t index = pagePair->nextAvailable;
id *payloadAddress = _payloadAddressAtIndex(pagePair, index);
- assert((index < 1024) || (index == LAST_SLOT_MARKER));
+ assert((index < _slotsPerPagePair()) || (index == LAST_SLOT_MARKER));
uint32_t nextAvailableIndex = (uint32_t) *((uintptr_t *) payloadAddress);
if (nextAvailableIndex == 0)
iteratorPair = iteratorPair->nextAvailablePage;
if (iteratorPair) {
iteratorPair->nextAvailablePage = pagePair->nextAvailablePage;
- pagePair->nextAvailablePage = NULL;
+ pagePair->nextAvailablePage = nil;
}
} else {
// empty slot at index contains pointer to next available index
uint32_t index;
TrampolineBlockPagePair *pagePair;
- if (!anImp) return NULL;
+ if (!anImp) return nil;
_lock();
- pagePair = _pagePairAndIndexContainingIMP(anImp, &index, NULL);
+ pagePair = _pagePairAndIndexContainingIMP(anImp, &index, nil);
if (!pagePair) {
_unlock();
- return NULL;
+ return nil;
}
id potentialBlock = *_payloadAddressAtIndex(pagePair, index);
if ((uintptr_t) potentialBlock == (uintptr_t) LAST_SLOT_MARKER) {
_unlock();
- return NULL;
+ return nil;
}
if ((uintptr_t) potentialBlock < (uintptr_t) _slotsPerPagePair()) {
_unlock();
- return NULL;
+ return nil;
}
_unlock();
while(pagePairIterator->nextAvailablePage && (pagePairIterator->nextAvailablePage != pagePair))
pagePairIterator = pagePairIterator->nextAvailablePage;
- if (! pagePairIterator->nextAvailablePage) { // if iteration stopped because nextAvail was NULL
+ if (! pagePairIterator->nextAvailablePage) { // if iteration stopped because nextAvail was nil
// add to end of list.
pagePairIterator->nextAvailablePage = pagePair;
- pagePair->nextAvailablePage = NULL;
+ pagePair->nextAvailablePage = nil;
}
_unlock();
--- /dev/null
+/*
+ * Copyright (c) 2012 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OBJC_CACHE_OLD_H
+#define _OBJC_CACHE_OLD_H
+
+#include "objc-private.h"
+
+__BEGIN_DECLS
+
+extern IMP _cache_getImp(Class cls, SEL sel);
+extern Method _cache_getMethod(Class cls, SEL sel, IMP objc_msgForward_internal_imp);
+
+extern void flush_cache(Class cls);
+extern BOOL _cache_fill(Class cls, Method meth, SEL sel);
+extern void _cache_addForwardEntry(Class cls, SEL sel);
+extern IMP _cache_addIgnoredEntry(Class cls, SEL sel);
+extern void _cache_free(Cache cache);
+extern void _cache_collect(bool collectALot);
+
+__END_DECLS
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/***********************************************************************
+* objc-cache.m
+* Method cache management
+* Cache flushing
+* Cache garbage collection
+* Cache instrumentation
+* Dedicated allocator for large caches
+**********************************************************************/
+
+
+/***********************************************************************
+ * Method cache locking (GrP 2001-1-14)
+ *
+ * For speed, objc_msgSend does not acquire any locks when it reads
+ * method caches. Instead, all cache changes are performed so that any
+ * objc_msgSend running concurrently with the cache mutator will not
+ * crash or hang or get an incorrect result from the cache.
+ *
+ * When cache memory becomes unused (e.g. the old cache after cache
+ * expansion), it is not immediately freed, because a concurrent
+ * objc_msgSend could still be using it. Instead, the memory is
+ * disconnected from the data structures and placed on a garbage list.
+ * The memory is now only accessible to instances of objc_msgSend that
+ * were running when the memory was disconnected; any further calls to
+ * objc_msgSend will not see the garbage memory because the other data
+ * structures don't point to it anymore. The collecting_in_critical
+ * function checks the PC of all threads and returns FALSE when all threads
+ * are found to be outside objc_msgSend. This means any call to objc_msgSend
+ * that could have had access to the garbage has finished or moved past the
+ * cache lookup stage, so it is safe to free the memory.
+ *
+ * All functions that modify cache data or structures must acquire the
+ * cacheUpdateLock to prevent interference from concurrent modifications.
+ * The function that frees cache garbage must acquire the cacheUpdateLock
+ * and use collecting_in_critical() to flush out cache readers.
+ * The cacheUpdateLock is also used to protect the custom allocator used
+ * for large method cache blocks.
+ *
+ * Cache readers (PC-checked by collecting_in_critical())
+ * objc_msgSend*
+ * _cache_getImp
+ * _cache_getMethod
+ *
+ * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
+ * _cache_fill (acquires lock)
+ * _cache_expand (only called from cache_fill)
+ * _cache_create (only called from cache_expand)
+ * bcopy (only called from instrumented cache_expand)
+ * flush_caches (acquires lock)
+ * _cache_flush (only called from cache_fill and flush_caches)
+ * _cache_collect_free (only called from cache_expand and cache_flush)
+ *
+ * UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
+ * _cache_print
+ * _class_printMethodCaches
+ * _class_printDuplicateCacheEntries
+ * _class_printMethodCacheStatistics
+ *
+ * _class_lookupMethodAndLoadCache is a special case. It may read a
+ * method triplet out of one cache and store it in another cache. This
+ * is unsafe if the method triplet is a forward:: entry, because the
+ * triplet itself could be freed unless _class_lookupMethodAndLoadCache
+ * were PC-checked or used a lock. Additionally, storing the method
+ * triplet in both caches would result in double-freeing if both caches
+ * were flushed or expanded. The solution is for _cache_getMethod to
+ * ignore all entries whose implementation is _objc_msgForward_impcache,
+ * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry
+ * unsafely or place it in multiple caches.
+ ***********************************************************************/
+
+#if !__OBJC2__
+
+#include "objc-private.h"
+#include "objc-cache-old.h"
+#include "hashtable2.h"
+
+typedef struct {
+ SEL name; // same layout as struct old_method
+ void *unused;
+ IMP imp; // same layout as struct old_method
+} cache_entry;
+
+
+/* When _class_slow_grow is non-zero, any given cache is actually grown
+ * only on the odd-numbered times it becomes full; on the even-numbered
+ * times, it is simply emptied and re-used. When this flag is zero,
+ * caches are grown every time. */
+static const int _class_slow_grow = 1;
+
+/* For min cache size: clear_cache=1, slow_grow=1
+ For max cache size: clear_cache=0, slow_grow=0 */
+
+/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
+enum {
+ INIT_CACHE_SIZE_LOG2 = 2,
+ INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
+};
+
+
+/* Amount of space required for `count` hash table buckets, knowing that
+ * one entry is embedded in the cache structure itself. */
+#define TABLE_SIZE(count) ((count - 1) * sizeof(cache_entry *))
+
+
+#if !TARGET_OS_WIN32
+# define CACHE_ALLOCATOR
+#endif
+
+/* Custom cache allocator parameters.
+ * CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM. */
+#define CACHE_ALLOCATOR_MIN 512
+#define CACHE_QUANTUM (CACHE_ALLOCATOR_MIN+sizeof(struct objc_cache)-sizeof(cache_entry*))
+#define CACHE_REGION_SIZE ((128*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
+// #define CACHE_REGION_SIZE ((256*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
+
+static uintptr_t cache_allocator_mask_for_size(size_t size)
+{
+ return (size - sizeof(struct objc_cache)) / sizeof(cache_entry *);
+}
+
+static size_t cache_allocator_size_for_mask(uintptr_t mask)
+{
+ size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
+ size_t actual = CACHE_QUANTUM;
+ while (actual < requested) actual += CACHE_QUANTUM;
+ return actual;
+}
+
+
+/* Cache instrumentation data. Immediately follows the cache block itself. */
+#ifdef OBJC_INSTRUMENTED
+typedef struct
+{
+ unsigned int hitCount; // cache lookup success tally
+ unsigned int hitProbes; // sum entries checked to hit
+ unsigned int maxHitProbes; // max entries checked to hit
+ unsigned int missCount; // cache lookup no-find tally
+ unsigned int missProbes; // sum entries checked to miss
+ unsigned int maxMissProbes; // max entries checked to miss
+ unsigned int flushCount; // cache flush tally
+ unsigned int flushedEntries; // sum cache entries flushed
+ unsigned int maxFlushedEntries; // max cache entries flushed
+} CacheInstrumentation;
+
+#define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
+#endif
+
+/* Cache filling and flushing instrumentation */
+
+static int totalCacheFills = 0;
+
+#ifdef OBJC_INSTRUMENTED
+unsigned int LinearFlushCachesCount = 0;
+unsigned int LinearFlushCachesVisitedCount = 0;
+unsigned int MaxLinearFlushCachesVisitedCount = 0;
+unsigned int NonlinearFlushCachesCount = 0;
+unsigned int NonlinearFlushCachesClassCount = 0;
+unsigned int NonlinearFlushCachesVisitedCount = 0;
+unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
+unsigned int IdealFlushCachesCount = 0;
+unsigned int MaxIdealFlushCachesCount = 0;
+#endif
+
+
+/***********************************************************************
+* A static empty cache. All classes initially point at this cache.
+* When the first message is sent it misses in the cache, and when
+* the cache is grown it checks for this case and uses malloc rather
+* than realloc. This avoids the need to check for NULL caches in the
+* messenger.
+***********************************************************************/
+
+struct objc_cache _objc_empty_cache =
+{
+ 0, // mask
+ 0, // occupied
+ { NULL } // buckets
+};
+#ifdef OBJC_INSTRUMENTED
+CacheInstrumentation emptyCacheInstrumentation = {0};
+#endif
+
+
+/* Local prototypes */
+
+static BOOL _cache_isEmpty(Cache cache);
+static Cache _cache_malloc(uintptr_t slotCount);
+static Cache _cache_create(Class cls);
+static Cache _cache_expand(Class cls);
+
+static int _collecting_in_critical(void);
+static void _garbage_make_room(void);
+static void _cache_collect_free(void *data, size_t size);
+
+#if defined(CACHE_ALLOCATOR)
+static BOOL cache_allocator_is_block(void *block);
+static Cache cache_allocator_calloc(size_t size);
+static void cache_allocator_free(void *block);
+#endif
+
+/***********************************************************************
+* Cache statistics for OBJC_PRINT_CACHE_SETUP
+**********************************************************************/
+static unsigned int cache_counts[16];
+static size_t cache_allocations;
+static size_t cache_collections;
+static size_t cache_allocator_regions;
+
+static size_t log2u(size_t x)
+{
+ unsigned int log;
+
+ log = 0;
+ while (x >>= 1)
+ log += 1;
+
+ return log;
+}
+
+
+/***********************************************************************
+* _cache_isEmpty.
+* Returns YES if the given cache is some empty cache.
+* Empty caches should never be allocated on the heap.
+**********************************************************************/
+static BOOL _cache_isEmpty(Cache cache)
+{
+ return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0);
+}
+
+
+/***********************************************************************
+* _cache_malloc.
+*
+* Called from _cache_create() and cache_expand()
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+static Cache _cache_malloc(uintptr_t slotCount)
+{
+ Cache new_cache;
+ size_t size;
+
+ mutex_assert_locked(&cacheUpdateLock);
+
+ // Allocate table (why not check for failure?)
+ size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
+#if defined(OBJC_INSTRUMENTED)
+ // Custom cache allocator can't handle instrumentation.
+ size += sizeof(CacheInstrumentation);
+ new_cache = _calloc_internal(size, 1);
+ new_cache->mask = slotCount - 1;
+#elif !defined(CACHE_ALLOCATOR)
+ // fixme cache allocator implementation isn't 64-bit clean
+ new_cache = _calloc_internal(size, 1);
+ new_cache->mask = (unsigned int)(slotCount - 1);
+#else
+ if (size < CACHE_ALLOCATOR_MIN || UseInternalZone) {
+ new_cache = (Cache)_calloc_internal(size, 1);
+ new_cache->mask = slotCount - 1;
+ // occupied and buckets and instrumentation are all zero
+ } else {
+ new_cache = cache_allocator_calloc(size);
+ // mask is already set
+ // occupied and buckets and instrumentation are all zero
+ }
+#endif
+
+ if (PrintCaches) {
+ size_t bucket = log2u(slotCount);
+ if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
+ cache_counts[bucket]++;
+ }
+ cache_allocations++;
+ }
+
+ return new_cache;
+}
+
+/***********************************************************************
+* _cache_free_block.
+*
+* Called from _cache_free() and _cache_collect_free().
+* block may be a cache or a forward:: entry.
+* If block is a cache, forward:: entries it points to will NOT be freed.
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); }
+static void _cache_free_block(void *block)
+{
+ mutex_assert_locked(&cacheUpdateLock);
+
+#if !TARGET_OS_WIN32
+ if (PrintCaches) {
+ Cache cache = (Cache)block;
+ size_t slotCount = cache->mask + 1;
+ if (isPowerOf2(slotCount)) {
+ size_t bucket = log2u(slotCount);
+ if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
+ cache_counts[bucket]--;
+ }
+ }
+ }
+#endif
+
+#if defined(CACHE_ALLOCATOR)
+ if (cache_allocator_is_block(block)) {
+ cache_allocator_free(block);
+ } else
+#endif
+ {
+ free(block);
+ }
+}
+
+
+/***********************************************************************
+* _cache_free.
+*
+* Called from _objc_remove_classes_in_image().
+* forward:: entries in the cache ARE freed.
+* Cache locks: cacheUpdateLock must NOT be held by the caller.
+**********************************************************************/
+void _cache_free(Cache cache)
+{
+ unsigned int i;
+
+ mutex_lock(&cacheUpdateLock);
+
+ for (i = 0; i < cache->mask + 1; i++) {
+ cache_entry *entry = (cache_entry *)cache->buckets[i];
+ if (entry && entry->imp == _objc_msgForward_impcache) {
+ _cache_free_block(entry);
+ }
+ }
+
+ _cache_free_block(cache);
+
+ mutex_unlock(&cacheUpdateLock);
+}
+
+
+/***********************************************************************
+* _cache_create.
+*
+* Called from _cache_expand().
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+static Cache _cache_create(Class cls)
+{
+ Cache new_cache;
+
+ mutex_assert_locked(&cacheUpdateLock);
+
+ // Allocate new cache block
+ new_cache = _cache_malloc(INIT_CACHE_SIZE);
+
+ // Install the cache
+ cls->cache = new_cache;
+
+ // Clear the grow flag so that we will re-use the current storage,
+ // rather than actually grow the cache, when expanding the cache
+ // for the first time
+ if (_class_slow_grow) {
+ cls->setShouldGrowCache(false);
+ }
+
+ // Return our creation
+ return new_cache;
+}
+
+
+/***********************************************************************
+* _cache_expand.
+*
+* Called from _cache_fill ()
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+static Cache _cache_expand(Class cls)
+{
+ Cache old_cache;
+ Cache new_cache;
+ uintptr_t slotCount;
+ uintptr_t index;
+
+ mutex_assert_locked(&cacheUpdateLock);
+
+ // First growth goes from empty cache to a real one
+ old_cache = cls->cache;
+ if (_cache_isEmpty(old_cache))
+ return _cache_create (cls);
+
+ if (_class_slow_grow) {
+ // Cache grows every other time only.
+ if (cls->shouldGrowCache()) {
+ // Grow the cache this time. Don't grow next time.
+ cls->setShouldGrowCache(false);
+ }
+ else {
+ // Reuse the current cache storage this time. Do grow next time.
+ cls->setShouldGrowCache(true);
+
+ // Clear the valid-entry counter
+ old_cache->occupied = 0;
+
+ // Invalidate all the cache entries
+ for (index = 0; index < old_cache->mask + 1; index += 1)
+ {
+ // Remember what this entry was, so we can possibly
+ // deallocate it after the bucket has been invalidated
+ cache_entry *oldEntry = (cache_entry *)old_cache->buckets[index];
+
+ // Skip invalid entry
+ if (!oldEntry)
+ continue;
+
+ // Invalidate this entry
+ old_cache->buckets[index] = NULL;
+
+ // Deallocate "forward::" entry
+ if (oldEntry->imp == _objc_msgForward_impcache) {
+ _cache_collect_free (oldEntry, sizeof(cache_entry));
+ }
+ }
+
+ // Return the same old cache, freshly emptied
+ return old_cache;
+ }
+ }
+
+ // Double the cache size
+ slotCount = (old_cache->mask + 1) << 1;
+
+ new_cache = _cache_malloc(slotCount);
+
+#ifdef OBJC_INSTRUMENTED
+ // Propagate the instrumentation data
+ {
+ CacheInstrumentation *oldCacheData;
+ CacheInstrumentation *newCacheData;
+
+ oldCacheData = CACHE_INSTRUMENTATION(old_cache);
+ newCacheData = CACHE_INSTRUMENTATION(new_cache);
+ bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
+ }
+#endif
+
+ // Deallocate "forward::" entries from the old cache
+ for (index = 0; index < old_cache->mask + 1; index++) {
+ cache_entry *entry = (cache_entry *)old_cache->buckets[index];
+ if (entry && entry->imp == _objc_msgForward_impcache) {
+ _cache_collect_free (entry, sizeof(cache_entry));
+ }
+ }
+
+ // Install new cache
+ cls->cache = new_cache;
+
+ // Deallocate old cache, try freeing all the garbage
+ _cache_collect_free (old_cache, old_cache->mask * sizeof(cache_entry *));
+ _cache_collect(false);
+
+ return new_cache;
+}
+
+
+/***********************************************************************
+* _cache_fill. Add the specified method to the specified class' cache.
+* Returns NO if the cache entry wasn't added: cache was busy,
+* class is still being initialized, new entry is a duplicate.
+*
+* Called only from _class_lookupMethodAndLoadCache and
+* class_respondsToMethod and _cache_addForwardEntry.
+*
+* Cache locks: cacheUpdateLock must not be held.
+**********************************************************************/
+BOOL _cache_fill(Class cls, Method smt, SEL sel)
+{
+ uintptr_t newOccupied;
+ uintptr_t index;
+ cache_entry **buckets;
+ cache_entry *entry;
+ Cache cache;
+
+ mutex_assert_unlocked(&cacheUpdateLock);
+
+ // Never cache before +initialize is done
+ if (!cls->isInitialized()) {
+ return NO;
+ }
+
+ // Keep tally of cache additions
+ totalCacheFills += 1;
+
+ mutex_lock(&cacheUpdateLock);
+
+ entry = (cache_entry *)smt;
+
+ cache = cls->cache;
+
+ // Make sure the entry wasn't added to the cache by some other thread
+ // before we grabbed the cacheUpdateLock.
+ // Don't use _cache_getMethod() because _cache_getMethod() doesn't
+ // return forward:: entries.
+ if (_cache_getImp(cls, sel)) {
+ mutex_unlock(&cacheUpdateLock);
+ return NO; // entry is already cached, didn't add new one
+ }
+
+ // Use the cache as-is if it is less than 3/4 full
+ newOccupied = cache->occupied + 1;
+ if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
+ // Cache is less than 3/4 full.
+ cache->occupied = (unsigned int)newOccupied;
+ } else {
+ // Cache is too full. Expand it.
+ cache = _cache_expand (cls);
+
+ // Account for the addition
+ cache->occupied += 1;
+ }
+
+ // Scan for the first unused slot and insert there.
+ // There is guaranteed to be an empty slot because the
+ // minimum size is 4 and we resized at 3/4 full.
+ buckets = (cache_entry **)cache->buckets;
+ for (index = CACHE_HASH(sel, cache->mask);
+ buckets[index] != NULL;
+ index = (index+1) & cache->mask)
+ {
+ // empty
+ }
+ buckets[index] = entry;
+
+ mutex_unlock(&cacheUpdateLock);
+
+ return YES; // successfully added new cache entry
+}
+
+
+/***********************************************************************
+* _cache_addForwardEntry
+* Add a forward:: entry for the given selector to cls's method cache.
+* Does nothing if the cache addition fails for any reason.
+* Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
+* Cache locks: cacheUpdateLock must not be held.
+**********************************************************************/
+void _cache_addForwardEntry(Class cls, SEL sel)
+{
+ cache_entry *smt;
+
+ smt = (cache_entry *)_malloc_internal(sizeof(cache_entry));
+ smt->name = sel;
+ smt->imp = _objc_msgForward_impcache;
+ if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack
+ // Entry not added to cache. Don't leak the method struct.
+ _free_internal(smt);
+ }
+}
+
+
+/***********************************************************************
+* _cache_addIgnoredEntry
+* Add an entry for the ignored selector to cls's method cache.
+* Does nothing if the cache addition fails for any reason.
+* Returns the ignored IMP.
+* Cache locks: cacheUpdateLock must not be held.
+**********************************************************************/
+#if SUPPORT_GC && !SUPPORT_IGNORED_SELECTOR_CONSTANT
+static cache_entry *alloc_ignored_entries(void)
+{
+ cache_entry *e = (cache_entry *)_malloc_internal(5 * sizeof(cache_entry));
+ e[0] = (cache_entry){ @selector(retain), 0,(IMP)&_objc_ignored_method};
+ e[1] = (cache_entry){ @selector(release), 0,(IMP)&_objc_ignored_method};
+ e[2] = (cache_entry){ @selector(autorelease),0,(IMP)&_objc_ignored_method};
+ e[3] = (cache_entry){ @selector(retainCount),0,(IMP)&_objc_ignored_method};
+ e[4] = (cache_entry){ @selector(dealloc), 0,(IMP)&_objc_ignored_method};
+ return e;
+}
+#endif
+
+IMP _cache_addIgnoredEntry(Class cls, SEL sel)
+{
+ cache_entry *entryp = NULL;
+
+#if !SUPPORT_GC
+ _objc_fatal("selector ignored with GC off");
+#elif SUPPORT_IGNORED_SELECTOR_CONSTANT
+ static cache_entry entry = { (SEL)kIgnore, 0, (IMP)&_objc_ignored_method };
+ entryp = &entry;
+ assert(sel == (SEL)kIgnore);
+#else
+ // hack
+ int i;
+ static cache_entry *entries;
+ INIT_ONCE_PTR(entries, alloc_ignored_entries(), free(v));
+
+ assert(ignoreSelector(sel));
+ for (i = 0; i < 5; i++) {
+ if (sel == entries[i].name) {
+ entryp = &entries[i];
+ break;
+ }
+ }
+ if (!entryp) _objc_fatal("selector %s (%p) is not ignored",
+ sel_getName(sel), sel);
+#endif
+
+ _cache_fill(cls, (Method)entryp, sel);
+ return entryp->imp;
+}
+
+
+/***********************************************************************
+* _cache_flush. Invalidate all valid entries in the given class' cache.
+*
+* Called from flush_caches() and _cache_fill()
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+void _cache_flush(Class cls)
+{
+ Cache cache;
+ unsigned int index;
+
+ mutex_assert_locked(&cacheUpdateLock);
+
+ // Locate cache. Ignore unused cache.
+ cache = cls->cache;
+ if (_cache_isEmpty(cache)) return;
+
+#ifdef OBJC_INSTRUMENTED
+ {
+ CacheInstrumentation *cacheData;
+
+ // Tally this flush
+ cacheData = CACHE_INSTRUMENTATION(cache);
+ cacheData->flushCount += 1;
+ cacheData->flushedEntries += cache->occupied;
+ if (cache->occupied > cacheData->maxFlushedEntries)
+ cacheData->maxFlushedEntries = cache->occupied;
+ }
+#endif
+
+ // Traverse the cache
+ for (index = 0; index <= cache->mask; index += 1)
+ {
+ // Remember what this entry was, so we can possibly
+ // deallocate it after the bucket has been invalidated
+ cache_entry *oldEntry = (cache_entry *)cache->buckets[index];
+
+ // Invalidate this entry
+ cache->buckets[index] = NULL;
+
+ // Deallocate "forward::" entry
+ if (oldEntry && oldEntry->imp == _objc_msgForward_impcache)
+ _cache_collect_free (oldEntry, sizeof(cache_entry));
+ }
+
+ // Clear the valid-entry counter
+ cache->occupied = 0;
+}
+
+
+/***********************************************************************
+* flush_cache. Flushes the instance method cache for class cls only.
+* Use flush_caches() if cls might have in-use subclasses.
+**********************************************************************/
+void flush_cache(Class cls)
+{
+ if (cls) {
+ mutex_lock(&cacheUpdateLock);
+ _cache_flush(cls);
+ mutex_unlock(&cacheUpdateLock);
+ }
+}
+
+
+/***********************************************************************
+* cache collection.
+**********************************************************************/
+
+#if !TARGET_OS_WIN32
+
+// A sentinel (magic value) to report bad thread_get_state status.
+// Must not be a valid PC.
+// Must not be zero - thread_get_state() on a new thread returns PC == 0.
+#define PC_SENTINEL 1
+
+// UNIX03 compliance hack (4508809)
+#if !__DARWIN_UNIX03
+#define __srr0 srr0
+#define __eip eip
+#endif
+
+static uintptr_t _get_pc_for_thread(thread_t thread)
+#if defined(__i386__)
+{
+ i386_thread_state_t state;
+ unsigned int count = i386_THREAD_STATE_COUNT;
+ kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count);
+ return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL;
+}
+#elif defined(__x86_64__)
+{
+ x86_thread_state64_t state;
+ unsigned int count = x86_THREAD_STATE64_COUNT;
+ kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count);
+ return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL;
+}
+#elif defined(__arm__)
+{
+ arm_thread_state_t state;
+ unsigned int count = ARM_THREAD_STATE_COUNT;
+ kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count);
+ return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL;
+}
+#else
+{
+#error _get_pc_for_thread () not implemented for this architecture
+}
+#endif
+
+#endif
+
+/***********************************************************************
+* _collecting_in_critical.
+* Returns TRUE if some thread is currently executing a cache-reading
+* function. Collection of cache garbage is not allowed when a cache-
+* reading function is in progress because it might still be using
+* the garbage memory.
+**********************************************************************/
+OBJC_EXPORT uintptr_t objc_entryPoints[];
+OBJC_EXPORT uintptr_t objc_exitPoints[];
+
+static int _collecting_in_critical(void)
+{
+#if TARGET_OS_WIN32
+ return TRUE;
+#else
+ thread_act_port_array_t threads;
+ unsigned number;
+ unsigned count;
+ kern_return_t ret;
+ int result;
+
+ mach_port_t mythread = pthread_mach_thread_np(pthread_self());
+
+ // Get a list of all the threads in the current task
+ ret = task_threads (mach_task_self (), &threads, &number);
+ if (ret != KERN_SUCCESS)
+ {
+ _objc_fatal("task_thread failed (result %d)\n", ret);
+ }
+
+ // Check whether any thread is in the cache lookup code
+ result = FALSE;
+ for (count = 0; count < number; count++)
+ {
+ int region;
+ uintptr_t pc;
+
+ // Don't bother checking ourselves
+ if (threads[count] == mythread)
+ continue;
+
+ // Find out where thread is executing
+ pc = _get_pc_for_thread (threads[count]);
+
+ // Check for bad status, and if so, assume the worse (can't collect)
+ if (pc == PC_SENTINEL)
+ {
+ result = TRUE;
+ goto done;
+ }
+
+ // Check whether it is in the cache lookup code
+ for (region = 0; objc_entryPoints[region] != 0; region++)
+ {
+ if ((pc >= objc_entryPoints[region]) &&
+ (pc <= objc_exitPoints[region]))
+ {
+ result = TRUE;
+ goto done;
+ }
+ }
+ }
+
+ done:
+ // Deallocate the port rights for the threads
+ for (count = 0; count < number; count++) {
+ mach_port_deallocate(mach_task_self (), threads[count]);
+ }
+
+ // Deallocate the thread list
+ vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number);
+
+ // Return our finding
+ return result;
+#endif
+}
+
+
+/***********************************************************************
+* _garbage_make_room. Ensure that there is enough room for at least
+* one more ref in the garbage.
+**********************************************************************/
+
+// amount of memory represented by all refs in the garbage
+static size_t garbage_byte_size = 0;
+
+// do not empty the garbage until garbage_byte_size gets at least this big
+static size_t garbage_threshold = 1024;
+
+// table of refs to free
+static void **garbage_refs = 0;
+
+// current number of refs in garbage_refs
+static size_t garbage_count = 0;
+
+// capacity of current garbage_refs
+static size_t garbage_max = 0;
+
+// capacity of initial garbage_refs
+enum {
+ INIT_GARBAGE_COUNT = 128
+};
+
+static void _garbage_make_room(void)
+{
+ static int first = 1;
+
+ // Create the collection table the first time it is needed
+ if (first)
+ {
+ first = 0;
+ garbage_refs = (void**)
+ _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
+ garbage_max = INIT_GARBAGE_COUNT;
+ }
+
+ // Double the table if it is full
+ else if (garbage_count == garbage_max)
+ {
+ garbage_refs = (void**)
+ _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
+ garbage_max *= 2;
+ }
+}
+
+
+/***********************************************************************
+* _cache_collect_free. Add the specified malloc'd memory to the list
+* of them to free at some later point.
+* size is used for the collection threshold. It does not have to be
+* precisely the block's size.
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+static void _cache_collect_free(void *data, size_t size)
+{
+ mutex_assert_locked(&cacheUpdateLock);
+
+ _garbage_make_room ();
+ garbage_byte_size += size;
+ garbage_refs[garbage_count++] = data;
+}
+
+
+/***********************************************************************
+* _cache_collect. Try to free accumulated dead caches.
+* collectALot tries harder to free memory.
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+void _cache_collect(bool collectALot)
+{
+ mutex_assert_locked(&cacheUpdateLock);
+
+ // Done if the garbage is not full
+ if (garbage_byte_size < garbage_threshold && !collectALot) {
+ return;
+ }
+
+ // Synchronize collection with objc_msgSend and other cache readers
+ if (!collectALot) {
+ if (_collecting_in_critical ()) {
+ // objc_msgSend (or other cache reader) is currently looking in
+ // the cache and might still be using some garbage.
+ if (PrintCaches) {
+ _objc_inform ("CACHES: not collecting; "
+ "objc_msgSend in progress");
+ }
+ return;
+ }
+ }
+ else {
+ // No excuses.
+ while (_collecting_in_critical())
+ ;
+ }
+
+ // No cache readers in progress - garbage is now deletable
+
+ // Log our progress
+ if (PrintCaches) {
+ cache_collections++;
+ _objc_inform ("CACHES: COLLECTING %zu bytes (%zu regions, %zu allocations, %zu collections)", garbage_byte_size, cache_allocator_regions, cache_allocations, cache_collections);
+ }
+
+ // Dispose all refs now in the garbage
+ while (garbage_count--) {
+ _cache_free_block(garbage_refs[garbage_count]);
+ }
+
+ // Clear the garbage count and total size indicator
+ garbage_count = 0;
+ garbage_byte_size = 0;
+
+ if (PrintCaches) {
+ size_t i;
+ size_t total = 0;
+ size_t ideal_total = 0;
+ size_t malloc_total = 0;
+ size_t local_total = 0;
+
+ for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
+ int count = cache_counts[i];
+ int slots = 1 << i;
+ size_t size = sizeof(struct objc_cache) + TABLE_SIZE(slots);
+ size_t ideal = size;
+#if TARGET_OS_WIN32
+ size_t malloc = size;
+#else
+ size_t malloc = malloc_good_size(size);
+#endif
+ size_t local = size < CACHE_ALLOCATOR_MIN ? malloc : cache_allocator_size_for_mask(cache_allocator_mask_for_size(size));
+
+ if (!count) continue;
+
+ _objc_inform("CACHES: %4d slots: %4d caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", slots, count, ideal*count, malloc*count, local*count, malloc*count-ideal*count, local*count-ideal*count);
+
+ total += count;
+ ideal_total += ideal*count;
+ malloc_total += malloc*count;
+ local_total += local*count;
+ }
+
+ _objc_inform("CACHES: total: %4zu caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", total, ideal_total, malloc_total, local_total, malloc_total-ideal_total, local_total-ideal_total);
+ }
+}
+
+
+
+
+
+#if defined(CACHE_ALLOCATOR)
+
+/***********************************************************************
+* Custom method cache allocator.
+* Method cache block sizes are 2^slots+2 words, which is a pessimal
+* case for the system allocator. It wastes 504 bytes per cache block
+* with 128 or more slots, which adds up to tens of KB for an AppKit process.
+* To save memory, the custom cache allocator below is used.
+*
+* The cache allocator uses 128 KB allocation regions. Few processes will
+* require a second region. Within a region, allocation is address-ordered
+* first fit.
+*
+* The cache allocator uses a quantum of 520.
+* Cache block ideal sizes: 520, 1032, 2056, 4104
+* Cache allocator sizes: 520, 1040, 2080, 4160
+*
+* Because all blocks are known to be genuine method caches, the ordinary
+* cache->mask and cache->occupied fields are used as block headers.
+* No out-of-band headers are maintained. The number of blocks will
+* almost always be fewer than 200, so for simplicity there is no free
+* list or other optimization.
+*
+* Block in use: mask != 0, occupied != -1 (mask indicates block size)
+* Block free: mask != 0, occupied == -1 (mask is precisely block size)
+*
+* No cache allocator functions take any locks. Instead, the caller
+* must hold the cacheUpdateLock.
+*
+* fixme with 128 KB regions and 520 B min block size, an allocation
+* bitmap would be only 32 bytes - better than free list?
+**********************************************************************/
+
+typedef struct cache_allocator_block {
+ uintptr_t size;
+ uintptr_t state;
+ struct cache_allocator_block *nextFree;
+} cache_allocator_block;
+
+typedef struct cache_allocator_region {
+ cache_allocator_block *start;
+ cache_allocator_block *end; // first non-block address
+ cache_allocator_block *freeList;
+ struct cache_allocator_region *next;
+} cache_allocator_region;
+
+static cache_allocator_region *cacheRegion = NULL;
+
+
+/***********************************************************************
+* cache_allocator_add_region
+* Allocates and returns a new region that can hold at least size
+* bytes of large method caches.
+* The actual size will be rounded up to a CACHE_QUANTUM boundary,
+* with a minimum of CACHE_REGION_SIZE.
+* The new region is lowest-priority for new allocations. Callers that
+* know the other regions are already full should allocate directly
+* into the returned region.
+**********************************************************************/
+static cache_allocator_region *cache_allocator_add_region(size_t size)
+{
+ vm_address_t addr;
+ cache_allocator_block *b;
+ cache_allocator_region **rgnP;
+ cache_allocator_region *newRegion = (cache_allocator_region *)
+ _calloc_internal(1, sizeof(cache_allocator_region));
+
+ // Round size up to quantum boundary, and apply the minimum size.
+ size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
+ if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
+
+ // Allocate the region
+ addr = (vm_address_t)calloc(size, 1);
+ newRegion->start = (cache_allocator_block *)addr;
+ newRegion->end = (cache_allocator_block *)(addr + size);
+
+ // Mark the first block: free and covers the entire region
+ b = newRegion->start;
+ b->size = size;
+ b->state = (uintptr_t)-1;
+ b->nextFree = NULL;
+ newRegion->freeList = b;
+
+ // Add to end of the linked list of regions.
+ // Other regions should be re-used before this one is touched.
+ newRegion->next = NULL;
+ rgnP = &cacheRegion;
+ while (*rgnP) {
+ rgnP = &(**rgnP).next;
+ }
+ *rgnP = newRegion;
+
+ cache_allocator_regions++;
+
+ return newRegion;
+}
+
+
+/***********************************************************************
+* cache_allocator_coalesce
+* Attempts to coalesce a free block with the single free block following
+* it in the free list, if any.
+**********************************************************************/
+static void cache_allocator_coalesce(cache_allocator_block *block)
+{
+ if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
+ block->size += block->nextFree->size;
+ block->nextFree = block->nextFree->nextFree;
+ }
+}
+
+
+/***********************************************************************
+* cache_region_calloc
+* Attempt to allocate a size-byte block in the given region.
+* Allocation is first-fit. The free list is already fully coalesced.
+* Returns NULL if there is not enough room in the region for the block.
+**********************************************************************/
+static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
+{
+ cache_allocator_block **blockP;
+ uintptr_t mask;
+
+ // Save mask for allocated block, then round size
+ // up to CACHE_QUANTUM boundary
+ mask = cache_allocator_mask_for_size(size);
+ size = cache_allocator_size_for_mask(mask);
+
+ // Search the free list for a sufficiently large free block.
+
+ for (blockP = &rgn->freeList;
+ *blockP != NULL;
+ blockP = &(**blockP).nextFree)
+ {
+ cache_allocator_block *block = *blockP;
+ if (block->size < size) continue; // not big enough
+
+ // block is now big enough. Allocate from it.
+
+ // Slice off unneeded fragment of block, if any,
+ // and reconnect the free list around block.
+ if (block->size - size >= CACHE_QUANTUM) {
+ cache_allocator_block *leftover =
+ (cache_allocator_block *)(size + (uintptr_t)block);
+ leftover->size = block->size - size;
+ leftover->state = (uintptr_t)-1;
+ leftover->nextFree = block->nextFree;
+ *blockP = leftover;
+ } else {
+ *blockP = block->nextFree;
+ }
+
+ // block is now exactly the right size.
+
+ bzero(block, size);
+ block->size = mask; // Cache->mask
+ block->state = 0; // Cache->occupied
+
+ return block;
+ }
+
+ // No room in this region.
+ return NULL;
+}
+
+
+/***********************************************************************
+* cache_allocator_calloc
+* Custom allocator for large method caches (128+ slots)
+* The returned cache block already has cache->mask set.
+* cache->occupied and the cache contents are zero.
+* Cache locks: cacheUpdateLock must be held by the caller
+**********************************************************************/
+static Cache cache_allocator_calloc(size_t size)
+{
+ cache_allocator_region *rgn;
+
+ mutex_assert_locked(&cacheUpdateLock);
+
+ for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
+ void *p = cache_region_calloc(rgn, size);
+ if (p) {
+ return (Cache)p;
+ }
+ }
+
+ // No regions or all regions full - make a region and try one more time
+ // In the unlikely case of a cache over 256KB, it will get its own region.
+ return (Cache)cache_region_calloc(cache_allocator_add_region(size), size);
+}
+
+
+/***********************************************************************
+* cache_allocator_region_for_block
+* Returns the cache allocator region that ptr points into, or NULL.
+**********************************************************************/
+static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
+{
+ cache_allocator_region *rgn;
+ for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
+ if (block >= rgn->start && block < rgn->end) return rgn;
+ }
+ return NULL;
+}
+
+
+/***********************************************************************
+* cache_allocator_is_block
+* If ptr is a live block from the cache allocator, return YES
+* If ptr is a block from some other allocator, return NO.
+* If ptr is a dead block from the cache allocator, result is undefined.
+* Cache locks: cacheUpdateLock must be held by the caller
+**********************************************************************/
+static BOOL cache_allocator_is_block(void *ptr)
+{
+ mutex_assert_locked(&cacheUpdateLock);
+ return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
+}
+
+/***********************************************************************
+* cache_allocator_free
+* Frees a block allocated by the cache allocator.
+* Cache locks: cacheUpdateLock must be held by the caller.
+**********************************************************************/
+static void cache_allocator_free(void *ptr)
+{
+ cache_allocator_block *dead = (cache_allocator_block *)ptr;
+ cache_allocator_block *cur;
+ cache_allocator_region *rgn;
+
+ mutex_assert_locked(&cacheUpdateLock);
+
+ if (! (rgn = cache_allocator_region_for_block(dead))) {
+ // free of non-pointer
+ _objc_inform("cache_allocator_free of non-pointer %p", dead);
+ return;
+ }
+
+ dead->size = cache_allocator_size_for_mask(dead->size);
+ dead->state = (uintptr_t)-1;
+
+ if (!rgn->freeList || rgn->freeList > dead) {
+ // dead block belongs at front of free list
+ dead->nextFree = rgn->freeList;
+ rgn->freeList = dead;
+ cache_allocator_coalesce(dead);
+ return;
+ }
+
+ // dead block belongs in the middle or end of free list
+ for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
+ cache_allocator_block *ahead = cur->nextFree;
+
+ if (!ahead || ahead > dead) {
+ // cur and ahead straddle dead, OR dead belongs at end of free list
+ cur->nextFree = dead;
+ dead->nextFree = ahead;
+
+ // coalesce into dead first in case both succeed
+ cache_allocator_coalesce(dead);
+ cache_allocator_coalesce(cur);
+ return;
+ }
+ }
+
+ // uh-oh
+ _objc_inform("cache_allocator_free of non-pointer %p", ptr);
+}
+
+// defined(CACHE_ALLOCATOR)
+#endif
+
+/***********************************************************************
+* Cache instrumentation and debugging
+**********************************************************************/
+
+#ifdef OBJC_INSTRUMENTED
+enum {
+ CACHE_HISTOGRAM_SIZE = 512
+};
+
+unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
+unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
+#endif
+
+
+/***********************************************************************
+* _cache_print.
+**********************************************************************/
+static void _cache_print(Cache cache)
+{
+ uintptr_t index;
+ uintptr_t count;
+
+ count = cache->mask + 1;
+ for (index = 0; index < count; index += 1) {
+ cache_entry *entry = (cache_entry *)cache->buckets[index];
+ if (entry) {
+ if (entry->imp == _objc_msgForward_impcache)
+ printf ("does not recognize: \n");
+ printf ("%s\n", sel_getName(entry->name));
+ }
+ }
+}
+
+
+/***********************************************************************
+* _class_printMethodCaches.
+**********************************************************************/
+void _class_printMethodCaches(Class cls)
+{
+ if (_cache_isEmpty(cls->cache)) {
+ printf("no instance-method cache for class %s\n", cls->getName());
+ } else {
+ printf("instance-method cache for class %s:\n", cls->getName());
+ _cache_print(cls->cache);
+ }
+
+ if (_cache_isEmpty(cls->ISA()->cache)) {
+ printf("no class-method cache for class %s\n", cls->getName());
+ } else {
+ printf ("class-method cache for class %s:\n", cls->getName());
+ _cache_print(cls->ISA()->cache);
+ }
+}
+
+
+#if 0
+#warning fixme
+
+
+/***********************************************************************
+* _class_printDuplicateCacheEntries.
+**********************************************************************/
+void _class_printDuplicateCacheEntries(BOOL detail)
+{
+ NXHashState state;
+ Class cls;
+ unsigned int duplicates;
+ unsigned int index1;
+ unsigned int index2;
+ unsigned int mask;
+ unsigned int count;
+ unsigned int isMeta;
+ Cache cache;
+
+
+ printf ("Checking for duplicate cache entries \n");
+
+ // Outermost loop - iterate over all classes
+ state = NXInitHashState (class_hash);
+ duplicates = 0;
+ while (NXNextHashState (class_hash, &state, (void **) &cls))
+ {
+ // Control loop - do given class' cache, then its isa's cache
+ for (isMeta = 0; isMeta <= 1; isMeta += 1)
+ {
+ // Select cache of interest and make sure it exists
+ cache = (isMeta ? cls->ISA : cls)->cache;
+ if (_cache_isEmpty(cache))
+ continue;
+
+ // Middle loop - check each entry in the given cache
+ mask = cache->mask;
+ count = mask + 1;
+ for (index1 = 0; index1 < count; index1 += 1)
+ {
+ // Skip invalid entry
+ if (!cache->buckets[index1])
+ continue;
+
+ // Inner loop - check that given entry matches no later entry
+ for (index2 = index1 + 1; index2 < count; index2 += 1)
+ {
+ // Skip invalid entry
+ if (!cache->buckets[index2])
+ continue;
+
+ // Check for duplication by method name comparison
+ if (strcmp ((char *) cache->buckets[index1]->name),
+ (char *) cache->buckets[index2]->name)) == 0)
+ {
+ if (detail)
+ printf ("%s %s\n", cls->getName(), sel_getName(cache->buckets[index1]->name));
+ duplicates += 1;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Log the findings
+ printf ("duplicates = %d\n", duplicates);
+ printf ("total cache fills = %d\n", totalCacheFills);
+}
+
+
+/***********************************************************************
+* PrintCacheHeader.
+**********************************************************************/
+static void PrintCacheHeader(void)
+{
+#ifdef OBJC_INSTRUMENTED
+ printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
+ printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
+ printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
+#else
+ printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
+ printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
+ printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
+#endif
+}
+
+
+/***********************************************************************
+* PrintCacheInfo.
+**********************************************************************/
+static void PrintCacheInfo(unsigned int cacheSize,
+ unsigned int cacheCount,
+ unsigned int slotsUsed,
+ float avgUsed, unsigned int maxUsed,
+ float avgSHit, unsigned int maxSHit,
+ float avgSMiss, unsigned int maxSMiss
+#ifdef OBJC_INSTRUMENTED
+ , unsigned int totDHits,
+ float avgDHit,
+ unsigned int maxDHit,
+ unsigned int totDMisses,
+ float avgDMiss,
+ unsigned int maxDMiss,
+ unsigned int totDFlsh,
+ float avgDFlsh,
+ unsigned int maxDFlsh
+#endif
+ )
+{
+#ifdef OBJC_INSTRUMENTED
+ printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
+#else
+ printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
+#endif
+ cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
+#ifdef OBJC_INSTRUMENTED
+ , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
+#endif
+ );
+
+}
+
+
+#ifdef OBJC_INSTRUMENTED
+/***********************************************************************
+* PrintCacheHistogram. Show the non-zero entries from the specified
+* cache histogram.
+**********************************************************************/
+static void PrintCacheHistogram(char *title,
+ unsigned int *firstEntry,
+ unsigned int entryCount)
+{
+ unsigned int index;
+ unsigned int *thisEntry;
+
+ printf ("%s\n", title);
+ printf (" Probes Tally\n");
+ printf (" ------ -----\n");
+ for (index = 0, thisEntry = firstEntry;
+ index < entryCount;
+ index += 1, thisEntry += 1)
+ {
+ if (*thisEntry == 0)
+ continue;
+
+ printf (" %6d %5d\n", index, *thisEntry);
+ }
+}
+#endif
+
+
+/***********************************************************************
+* _class_printMethodCacheStatistics.
+**********************************************************************/
+
+#define MAX_LOG2_SIZE 32
+#define MAX_CHAIN_SIZE 100
+
+void _class_printMethodCacheStatistics(void)
+{
+ unsigned int isMeta;
+ unsigned int index;
+ NXHashState state;
+ Class cls;
+ unsigned int totalChain;
+ unsigned int totalMissChain;
+ unsigned int maxChain;
+ unsigned int maxMissChain;
+ unsigned int classCount;
+ unsigned int negativeEntryCount;
+ unsigned int cacheExpandCount;
+ unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
+ unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
+#ifdef OBJC_INSTRUMENTED
+ unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
+ unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
+#endif
+
+ printf ("Printing cache statistics\n");
+
+ // Outermost loop - iterate over all classes
+ state = NXInitHashState (class_hash);
+ classCount = 0;
+ negativeEntryCount = 0;
+ cacheExpandCount = 0;
+ while (NXNextHashState (class_hash, &state, (void **) &cls))
+ {
+ // Tally classes
+ classCount += 1;
+
+ // Control loop - do given class' cache, then its isa's cache
+ for (isMeta = 0; isMeta <= 1; isMeta += 1)
+ {
+ Cache cache;
+ unsigned int mask;
+ unsigned int log2Size;
+ unsigned int entryCount;
+
+ // Select cache of interest
+ cache = (isMeta ? cls->ISA : cls)->cache;
+
+ // Ignore empty cache... should we?
+ if (_cache_isEmpty(cache))
+ continue;
+
+ // Middle loop - do each entry in the given cache
+ mask = cache->mask;
+ entryCount = 0;
+ totalChain = 0;
+ totalMissChain = 0;
+ maxChain = 0;
+ maxMissChain = 0;
+ for (index = 0; index < mask + 1; index += 1)
+ {
+ cache_entry **buckets;
+ cache_entry *entry;
+ unsigned int hash;
+ unsigned int methodChain;
+ unsigned int methodMissChain;
+ unsigned int index2;
+
+ // If entry is invalid, the only item of
+ // interest is that future insert hashes
+ // to this entry can use it directly.
+ buckets = (cache_entry **)cache->buckets;
+ if (!buckets[index])
+ {
+ missChainCount[0] += 1;
+ continue;
+ }
+
+ entry = buckets[index];
+
+ // Tally valid entries
+ entryCount += 1;
+
+ // Tally "forward::" entries
+ if (entry->imp == _objc_msgForward_impcache)
+ negativeEntryCount += 1;
+
+ // Calculate search distance (chain length) for this method
+ // The chain may wrap around to the beginning of the table.
+ hash = CACHE_HASH(entry->name, mask);
+ if (index >= hash) methodChain = index - hash;
+ else methodChain = (mask+1) + index - hash;
+
+ // Tally chains of this length
+ if (methodChain < MAX_CHAIN_SIZE)
+ chainCount[methodChain] += 1;
+
+ // Keep sum of all chain lengths
+ totalChain += methodChain;
+
+ // Record greatest chain length
+ if (methodChain > maxChain)
+ maxChain = methodChain;
+
+ // Calculate search distance for miss that hashes here
+ index2 = index;
+ while (buckets[index2])
+ {
+ index2 += 1;
+ index2 &= mask;
+ }
+ methodMissChain = ((index2 - index) & mask);
+
+ // Tally miss chains of this length
+ if (methodMissChain < MAX_CHAIN_SIZE)
+ missChainCount[methodMissChain] += 1;
+
+ // Keep sum of all miss chain lengths in this class
+ totalMissChain += methodMissChain;
+
+ // Record greatest miss chain length
+ if (methodMissChain > maxMissChain)
+ maxMissChain = methodMissChain;
+ }
+
+ // Factor this cache into statistics about caches of the same
+ // type and size (all caches are a power of two in size)
+ log2Size = log2u (mask + 1);
+ cacheCountBySize[isMeta][log2Size] += 1;
+ totalEntriesBySize[isMeta][log2Size] += entryCount;
+ if (entryCount > maxEntriesBySize[isMeta][log2Size])
+ maxEntriesBySize[isMeta][log2Size] = entryCount;
+ totalChainBySize[isMeta][log2Size] += totalChain;
+ totalMissChainBySize[isMeta][log2Size] += totalMissChain;
+ totalMaxChainBySize[isMeta][log2Size] += maxChain;
+ totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
+ if (maxChain > maxChainBySize[isMeta][log2Size])
+ maxChainBySize[isMeta][log2Size] = maxChain;
+ if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
+ maxMissChainBySize[isMeta][log2Size] = maxMissChain;
+#ifdef OBJC_INSTRUMENTED
+ {
+ CacheInstrumentation *cacheData;
+
+ cacheData = CACHE_INSTRUMENTATION(cache);
+ hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
+ hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
+ if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
+ maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
+ missCountBySize[isMeta][log2Size] += cacheData->missCount;
+ missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
+ if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
+ maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
+ flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
+ flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
+ if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
+ maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
+ }
+#endif
+ // Caches start with a power of two number of entries, and grow by doubling, so
+ // we can calculate the number of times this cache has expanded
+ cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
+ }
+ }
+
+ {
+ unsigned int cacheCountByType[2] = {0};
+ unsigned int totalCacheCount = 0;
+ unsigned int totalEntries = 0;
+ unsigned int maxEntries = 0;
+ unsigned int totalSlots = 0;
+#ifdef OBJC_INSTRUMENTED
+ unsigned int totalHitCount = 0;
+ unsigned int totalHitProbes = 0;
+ unsigned int maxHitProbes = 0;
+ unsigned int totalMissCount = 0;
+ unsigned int totalMissProbes = 0;
+ unsigned int maxMissProbes = 0;
+ unsigned int totalFlushCount = 0;
+ unsigned int totalFlushedEntries = 0;
+ unsigned int maxFlushedEntries = 0;
+#endif
+
+ totalChain = 0;
+ maxChain = 0;
+ totalMissChain = 0;
+ maxMissChain = 0;
+
+ // Sum information over all caches
+ for (isMeta = 0; isMeta <= 1; isMeta += 1)
+ {
+ for (index = 0; index < MAX_LOG2_SIZE; index += 1)
+ {
+ cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
+ totalEntries += totalEntriesBySize[isMeta][index];
+ totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
+ totalChain += totalChainBySize[isMeta][index];
+ if (maxEntriesBySize[isMeta][index] > maxEntries)
+ maxEntries = maxEntriesBySize[isMeta][index];
+ if (maxChainBySize[isMeta][index] > maxChain)
+ maxChain = maxChainBySize[isMeta][index];
+ totalMissChain += totalMissChainBySize[isMeta][index];
+ if (maxMissChainBySize[isMeta][index] > maxMissChain)
+ maxMissChain = maxMissChainBySize[isMeta][index];
+#ifdef OBJC_INSTRUMENTED
+ totalHitCount += hitCountBySize[isMeta][index];
+ totalHitProbes += hitProbesBySize[isMeta][index];
+ if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
+ maxHitProbes = maxHitProbesBySize[isMeta][index];
+ totalMissCount += missCountBySize[isMeta][index];
+ totalMissProbes += missProbesBySize[isMeta][index];
+ if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
+ maxMissProbes = maxMissProbesBySize[isMeta][index];
+ totalFlushCount += flushCountBySize[isMeta][index];
+ totalFlushedEntries += flushedEntriesBySize[isMeta][index];
+ if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
+ maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
+#endif
+ }
+
+ totalCacheCount += cacheCountByType[isMeta];
+ }
+
+ // Log our findings
+ printf ("There are %u classes\n", classCount);
+
+ for (isMeta = 0; isMeta <= 1; isMeta += 1)
+ {
+ // Number of this type of class
+ printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
+ cacheCountByType[isMeta],
+ isMeta ? "class" : "instance");
+
+ // Print header
+ PrintCacheHeader ();
+
+ // Keep format consistent even if there are caches of this kind
+ if (cacheCountByType[isMeta] == 0)
+ {
+ printf ("(none)\n");
+ continue;
+ }
+
+ // Usage information by cache size
+ for (index = 0; index < MAX_LOG2_SIZE; index += 1)
+ {
+ unsigned int cacheCount;
+ unsigned int cacheSlotCount;
+ unsigned int cacheEntryCount;
+
+ // Get number of caches of this type and size
+ cacheCount = cacheCountBySize[isMeta][index];
+ if (cacheCount == 0)
+ continue;
+
+ // Get the cache slot count and the total number of valid entries
+ cacheSlotCount = (1 << index);
+ cacheEntryCount = totalEntriesBySize[isMeta][index];
+
+ // Give the analysis
+ PrintCacheInfo (cacheSlotCount,
+ cacheCount,
+ cacheEntryCount,
+ (float) cacheEntryCount / (float) cacheCount,
+ maxEntriesBySize[isMeta][index],
+ (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
+ maxChainBySize[isMeta][index],
+ (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
+ maxMissChainBySize[isMeta][index]
+#ifdef OBJC_INSTRUMENTED
+ , hitCountBySize[isMeta][index],
+ hitCountBySize[isMeta][index] ?
+ (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
+ maxHitProbesBySize[isMeta][index],
+ missCountBySize[isMeta][index],
+ missCountBySize[isMeta][index] ?
+ (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
+ maxMissProbesBySize[isMeta][index],
+ flushCountBySize[isMeta][index],
+ flushCountBySize[isMeta][index] ?
+ (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
+ maxFlushedEntriesBySize[isMeta][index]
+#endif
+ );
+ }
+ }
+
+ // Give overall numbers
+ printf ("\nCumulative:\n");
+ PrintCacheHeader ();
+ PrintCacheInfo (totalSlots,
+ totalCacheCount,
+ totalEntries,
+ (float) totalEntries / (float) totalCacheCount,
+ maxEntries,
+ (float) totalChain / (float) totalEntries,
+ maxChain,
+ (float) totalMissChain / (float) totalSlots,
+ maxMissChain
+#ifdef OBJC_INSTRUMENTED
+ , totalHitCount,
+ totalHitCount ?
+ (float) totalHitProbes / (float) totalHitCount : 0.0,
+ maxHitProbes,
+ totalMissCount,
+ totalMissCount ?
+ (float) totalMissProbes / (float) totalMissCount : 0.0,
+ maxMissProbes,
+ totalFlushCount,
+ totalFlushCount ?
+ (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
+ maxFlushedEntries
+#endif
+ );
+
+ printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
+ printf ("Number of cache expansions: %d\n", cacheExpandCount);
+#ifdef OBJC_INSTRUMENTED
+ printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
+ printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
+ printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
+ LinearFlushCachesCount,
+ LinearFlushCachesVisitedCount,
+ LinearFlushCachesCount ?
+ (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
+ MaxLinearFlushCachesVisitedCount,
+ LinearFlushCachesVisitedCount,
+ 1.0);
+ printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
+ NonlinearFlushCachesCount,
+ NonlinearFlushCachesVisitedCount,
+ NonlinearFlushCachesCount ?
+ (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
+ MaxNonlinearFlushCachesVisitedCount,
+ NonlinearFlushCachesClassCount,
+ NonlinearFlushCachesClassCount ?
+ (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
+ printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
+ LinearFlushCachesCount + NonlinearFlushCachesCount,
+ IdealFlushCachesCount,
+ LinearFlushCachesCount + NonlinearFlushCachesCount ?
+ (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
+ MaxIdealFlushCachesCount,
+ LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
+ LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
+ (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
+
+ PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
+ PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
+#endif
+
+#if 0
+ printf ("\nLookup chains:");
+ for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
+ {
+ if (chainCount[index] != 0)
+ printf (" %u:%u", index, chainCount[index]);
+ }
+
+ printf ("\nMiss chains:");
+ for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
+ {
+ if (missChainCount[index] != 0)
+ printf (" %u:%u", index, missChainCount[index]);
+ }
+
+ printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
+ totalCacheCount * (sizeof(struct objc_cache) - sizeof(cache_entry *)) +
+ totalSlots * sizeof(cache_entry *) +
+ negativeEntryCount * sizeof(cache_entry));
+#endif
+ }
+}
+
+#endif
+
+
+// !__OBJC2__
+#endif
--- /dev/null
+
+#ifndef _OBJC_CACHE_H
+#define _OBJC_CACHE_H
+
+#include "objc-private.h"
+
+__BEGIN_DECLS
+
+extern IMP cache_getImp(Class cls, SEL sel);
+
+extern void cache_fill(Class cls, SEL sel, IMP imp);
+
+extern void cache_eraseMethods(Class cls, method_list_t *mlist);
+
+extern void cache_eraseImp(Class cls, SEL sel, IMP imp);
+
+extern void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp);
+
+extern void cache_erase_nolock(cache_t *cache);
+
+extern void cache_collect(bool collectALot);
+
+__END_DECLS
+
+#endif
*
* Cache readers (PC-checked by collecting_in_critical())
* objc_msgSend*
- * _cache_getImp
- * _cache_getMethod
+ * cache_getImp
+ * cache_getMethod
*
* Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked)
- * _cache_fill (acquires lock)
- * _cache_expand (only called from cache_fill)
- * _cache_create (only called from cache_expand)
+ * cache_fill (acquires lock)
+ * cache_expand (only called from cache_fill)
+ * cache_create (only called from cache_expand)
* bcopy (only called from instrumented cache_expand)
* flush_caches (acquires lock)
- * _cache_flush (only called from cache_fill and flush_caches)
- * _cache_collect_free (only called from cache_expand and cache_flush)
+ * cache_flush (only called from cache_fill and flush_caches)
+ * cache_collect_free (only called from cache_expand and cache_flush)
*
* UNPROTECTED cache readers (NOT thread-safe; used for debug info only)
- * _cache_print
+ * cache_print
* _class_printMethodCaches
* _class_printDuplicateCacheEntries
* _class_printMethodCacheStatistics
* triplet itself could be freed unless _class_lookupMethodAndLoadCache
* were PC-checked or used a lock. Additionally, storing the method
* triplet in both caches would result in double-freeing if both caches
- * were flushed or expanded. The solution is for _cache_getMethod to
- * ignore all entries whose implementation is _objc_msgForward_internal,
+ * were flushed or expanded. The solution is for cache_getMethod to
+ * ignore all entries whose implementation is _objc_msgForward_impcache,
* so _class_lookupMethodAndLoadCache cannot look at a forward:: entry
* unsafely or place it in multiple caches.
***********************************************************************/
-#include "objc-private.h"
-#include "hashtable2.h"
-
-typedef struct {
- SEL name; // same layout as struct old_method
- void *unused;
- IMP imp; // same layout as struct old_method
-} cache_entry;
#if __OBJC2__
-#ifndef __LP64__
-# define CACHE_HASH(sel, mask) (((uintptr_t)(sel)>>2) & (mask))
-#else
-# define CACHE_HASH(sel, mask) (((unsigned int)((uintptr_t)(sel)>>0)) & (mask))
-#endif
-
-struct objc_cache {
- uintptr_t mask; /* total = mask + 1 */
- uintptr_t occupied;
- cache_entry *buckets[1];
-};
-
-#define CACHE_BUCKET(e) ((cache_entry *)e)
-
-#else
-
-/* Most definitions are in runtime.h */
-#define CACHE_BUCKET(e) ((Method)e)
-
-#endif
-
-
-/* When _class_slow_grow is non-zero, any given cache is actually grown
- * only on the odd-numbered times it becomes full; on the even-numbered
- * times, it is simply emptied and re-used. When this flag is zero,
- * caches are grown every time. */
-static const int _class_slow_grow = 1;
+#include "objc-private.h"
+#include "objc-cache.h"
-/* For min cache size: clear_cache=1, slow_grow=1
- For max cache size: clear_cache=0, slow_grow=0 */
/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */
enum {
INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
};
+static size_t log2u(size_t x)
+{
+ unsigned int log;
-/* Amount of space required for `count` hash table buckets, knowing that
- * one entry is embedded in the cache structure itself. */
-#define TABLE_SIZE(count) ((count - 1) * sizeof(cache_entry *))
-
-
-#if !TARGET_OS_WIN32
-# define CACHE_ALLOCATOR
-#endif
-
-/* Custom cache allocator parameters.
- * CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM. */
-#define CACHE_ALLOCATOR_MIN 512
-#define CACHE_QUANTUM (CACHE_ALLOCATOR_MIN+sizeof(struct objc_cache)-sizeof(cache_entry*))
-#define CACHE_REGION_SIZE ((128*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
-// #define CACHE_REGION_SIZE ((256*1024 / CACHE_QUANTUM) * CACHE_QUANTUM)
+ log = 0;
+ while (x >>= 1)
+ log += 1;
-static uintptr_t cache_allocator_mask_for_size(size_t size)
-{
- return (size - sizeof(struct objc_cache)) / sizeof(cache_entry *);
+ return log;
}
-static size_t cache_allocator_size_for_mask(uintptr_t mask)
-{
- size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1);
- size_t actual = CACHE_QUANTUM;
- while (actual < requested) actual += CACHE_QUANTUM;
- return actual;
-}
+static void cache_collect_free(struct bucket_t *data, size_t size);
+static int _collecting_in_critical(void);
+static void _garbage_make_room(void);
-/* Cache instrumentation data. Immediately follows the cache block itself. */
-#ifdef OBJC_INSTRUMENTED
-typedef struct
-{
- unsigned int hitCount; // cache lookup success tally
- unsigned int hitProbes; // sum entries checked to hit
- unsigned int maxHitProbes; // max entries checked to hit
- unsigned int missCount; // cache lookup no-find tally
- unsigned int missProbes; // sum entries checked to miss
- unsigned int maxMissProbes; // max entries checked to miss
- unsigned int flushCount; // cache flush tally
- unsigned int flushedEntries; // sum cache entries flushed
- unsigned int maxFlushedEntries; // max cache entries flushed
-} CacheInstrumentation;
-
-#define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1];
-#endif
+/***********************************************************************
+* Cache statistics for OBJC_PRINT_CACHE_SETUP
+**********************************************************************/
+static unsigned int cache_counts[16];
+static size_t cache_allocations;
+static size_t cache_collections;
-/* Cache filling and flushing instrumentation */
-static int totalCacheFills = 0;
+/***********************************************************************
+* Pointers used by compiled class objects
+* These use asm to avoid conflicts with the compiler's internal declarations
+**********************************************************************/
-#ifdef OBJC_INSTRUMENTED
-unsigned int LinearFlushCachesCount = 0;
-unsigned int LinearFlushCachesVisitedCount = 0;
-unsigned int MaxLinearFlushCachesVisitedCount = 0;
-unsigned int NonlinearFlushCachesCount = 0;
-unsigned int NonlinearFlushCachesClassCount = 0;
-unsigned int NonlinearFlushCachesVisitedCount = 0;
-unsigned int MaxNonlinearFlushCachesVisitedCount = 0;
-unsigned int IdealFlushCachesCount = 0;
-unsigned int MaxIdealFlushCachesCount = 0;
+// "cache" is cache->buckets; "vtable" is cache->mask/occupied
+// hack to avoid conflicts with compiler's internal declaration
+asm("\n .section __TEXT,__const"
+ "\n .globl __objc_empty_cache"
+#if __LP64__
+ "\n .align 3"
+ "\n __objc_empty_cache: .quad 0"
+#else
+ "\n .align 2"
+ "\n __objc_empty_cache: .long 0"
#endif
+ "\n .globl __objc_empty_vtable"
+ "\n .set __objc_empty_vtable, 0"
+ );
-/***********************************************************************
-* A static empty cache. All classes initially point at this cache.
-* When the first message is sent it misses in the cache, and when
-* the cache is grown it checks for this case and uses malloc rather
-* than realloc. This avoids the need to check for NULL caches in the
-* messenger.
-***********************************************************************/
-
-struct objc_cache _objc_empty_cache =
-{
- 0, // mask
- 0, // occupied
- { NULL } // buckets
-};
-#ifdef OBJC_INSTRUMENTED
-CacheInstrumentation emptyCacheInstrumentation = {0};
-#endif
-
+#if __i386__ || __arm__
+// objc_msgSend has few registers available.
+// Cache scan increments and wraps at special end-marking bucket.
+#define CACHE_END_MARKER 1
+static inline mask_t cache_next(mask_t i, mask_t mask) {
+ return (i+1) & mask;
+}
-/* Local prototypes */
+#elif __x86_64__
+// objc_msgSend has lots of registers and/or memory operands available.
+// Cache scan decrements. No end marker needed.
+#define CACHE_END_MARKER 0
+static inline mask_t cache_next(mask_t i, mask_t mask) {
+ return i ? i-1 : mask;
+}
-static BOOL _cache_isEmpty(Cache cache);
-static Cache _cache_malloc(uintptr_t slotCount);
-static Cache _cache_create(Class cls);
-static Cache _cache_expand(Class cls);
-#if __OBJC2__
-static void _cache_flush(Class cls);
#else
-extern "C" void _cache_flush(Class cls);
-#endif
-
-static int _collecting_in_critical(void);
-static void _garbage_make_room(void);
-static void _cache_collect_free(void *data, size_t size);
-
-#if defined(CACHE_ALLOCATOR)
-static BOOL cache_allocator_is_block(void *block);
-static Cache cache_allocator_calloc(size_t size);
-static void cache_allocator_free(void *block);
+#error unknown architecture
+#endif
+
+
+// cannot mix sel-side caches with ignored selector constant
+// ignored selector constant also not implemented for class-side caches here
+#if SUPPORT_IGNORED_SELECTOR_CONSTANT
+#error sorry
+#endif
+
+
+// copied from dispatch_atomic_maximally_synchronizing_barrier
+// fixme verify that this barrier hack does in fact work here
+#if __x86_64__
+#define mega_barrier() \
+ do { unsigned long _clbr; __asm__ __volatile__( \
+ "cpuid" \
+ : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \
+ ); } while(0)
+#elif __i386__
+#define mega_barrier() \
+ do { unsigned long _clbr; __asm__ __volatile__( \
+ "cpuid" \
+ : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
+ ); } while(0)
+#elif __arm__
+#define mega_barrier() \
+ __asm__ __volatile__( \
+ "dsb ish" \
+ : : : "memory")
+#else
+#error unknown architecture
#endif
-/***********************************************************************
-* Cache statistics for OBJC_PRINT_CACHE_SETUP
-**********************************************************************/
-static unsigned int cache_counts[16];
-static size_t cache_allocations;
-static size_t cache_collections;
-static size_t cache_allocator_regions;
-static size_t log2u(size_t x)
+static inline mask_t cache_hash(cache_key_t key, mask_t mask)
{
- unsigned int log;
-
- log = 0;
- while (x >>= 1)
- log += 1;
-
- return log;
+ return (mask_t)((key >> MASK_SHIFT) & mask);
}
-/***********************************************************************
-* _cache_isEmpty.
-* Returns YES if the given cache is some empty cache.
-* Empty caches should never be allocated on the heap.
-**********************************************************************/
-static BOOL _cache_isEmpty(Cache cache)
+// Class points to cache. Cache buckets store SEL+IMP.
+cache_t *getCache(Class cls, SEL sel __unused)
{
- return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0);
+ assert(cls);
+ return &cls->cache;
+}
+cache_key_t getKey(Class cls __unused, SEL sel)
+{
+ assert(sel);
+ return (cache_key_t)sel;
}
-/***********************************************************************
-* _cache_malloc.
-*
-* Called from _cache_create() and cache_expand()
-* Cache locks: cacheUpdateLock must be held by the caller.
-**********************************************************************/
-static Cache _cache_malloc(uintptr_t slotCount)
-{
- Cache new_cache;
- size_t size;
+struct bucket_t {
+ cache_key_t key;
+ IMP imp;
- mutex_assert_locked(&cacheUpdateLock);
+ void set(cache_key_t newKey, IMP newImp)
+ {
+ // objc_msgSend uses key and imp with no locks.
+ // It is safe for objc_msgSend to see new imp but NULL key
+ // (It will get a cache miss but not dispatch to the wrong place.)
+ // It is unsafe for objc_msgSend to see old imp and new key.
+ // Therefore we write new imp, wait a lot, then write new key.
- // Allocate table (why not check for failure?)
- size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
-#if defined(OBJC_INSTRUMENTED)
- // Custom cache allocator can't handle instrumentation.
- size += sizeof(CacheInstrumentation);
- new_cache = _calloc_internal(size, 1);
- new_cache->mask = slotCount - 1;
-#elif !defined(CACHE_ALLOCATOR)
- // fixme cache allocator implementation isn't 64-bit clean
- new_cache = _calloc_internal(size, 1);
- new_cache->mask = (unsigned int)(slotCount - 1);
-#else
- if (size < CACHE_ALLOCATOR_MIN || UseInternalZone) {
- new_cache = (Cache)_calloc_internal(size, 1);
- new_cache->mask = slotCount - 1;
- // occupied and buckets and instrumentation are all zero
- } else {
- new_cache = cache_allocator_calloc(size);
- // mask is already set
- // occupied and buckets and instrumentation are all zero
- }
-#endif
+ assert(key == 0 || key == newKey);
+
+ imp = newImp;
- if (PrintCaches) {
- size_t bucket = log2u(slotCount);
- if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
- cache_counts[bucket]++;
+ if (key != newKey) {
+ mega_barrier();
+ key = newKey;
}
- cache_allocations++;
}
+};
- return new_cache;
-}
-/***********************************************************************
-* _cache_free_block.
-*
-* Called from _cache_free() and _cache_collect_free().
-* block may be a cache or a forward:: entry.
-* If block is a cache, forward:: entries it points to will NOT be freed.
-* Cache locks: cacheUpdateLock must be held by the caller.
-**********************************************************************/
-static void _cache_free_block(void *block)
+void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
{
- mutex_assert_locked(&cacheUpdateLock);
-
-#if !TARGET_OS_WIN32
if (PrintCaches) {
- Cache cache = (Cache)block;
- size_t slotCount = cache->mask + 1;
- if (isPowerOf2(slotCount)) {
- size_t bucket = log2u(slotCount);
+ size_t bucket = log2u(newCapacity);
+ if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
+ cache_counts[bucket]++;
+ }
+ cache_allocations++;
+
+ if (oldCapacity) {
+ bucket = log2u(oldCapacity);
if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
cache_counts[bucket]--;
}
}
}
+
+ // objc_msgSend uses shiftmask and buckets with no locks.
+ // It is safe for objc_msgSend to see new buckets but old shiftmask.
+ // (It will get a cache miss but not overrun the buckets' bounds).
+ // It is unsafe for objc_msgSend to see old buckets and new shiftmask.
+ // Therefore we write new buckets, wait a lot, then write new shiftmask.
+ // objc_msgSend reads shiftmask first, then buckets.
+
+ bucket_t *oldBuckets = buckets;
+
+#if CACHE_END_MARKER
+ // Allocate one extra bucket to mark the end of the list.
+ // fixme instead put the end mark inline when +1 is malloc-inefficient
+ bucket_t *newBuckets =
+ (bucket_t *)_calloc_internal(newCapacity + 1, sizeof(bucket_t));
+
+ // End marker's key is 1 and imp points to the first bucket.
+ newBuckets[newCapacity].key = (cache_key_t)(uintptr_t)1;
+# if __arm__
+ // Point before the first bucket instead to save an instruction in msgSend
+ newBuckets[newCapacity].imp = (IMP)(newBuckets - 1);
+# else
+ newBuckets[newCapacity].imp = (IMP)newBuckets;
+# endif
+#else
+ bucket_t *newBuckets =
+ (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t));
#endif
-
-#if defined(CACHE_ALLOCATOR)
- if (cache_allocator_is_block(block)) {
- cache_allocator_free(block);
- } else
-#endif
- {
- free(block);
+
+ // Cache's old contents are not propagated.
+ // This is thought to save cache memory at the cost of extra cache fills.
+ // fixme re-measure this
+
+ // ensure other threads see buckets contents before buckets pointer
+ mega_barrier();
+
+ buckets = newBuckets;
+
+ // ensure other threads see new buckets before new shiftmask
+ mega_barrier();
+
+ setCapacity(newCapacity);
+ occupied = 0;
+
+ if (oldCapacity > 0) {
+ cache_collect_free(oldBuckets, oldCapacity * sizeof(bucket_t));
+ cache_collect(false);
}
}
-/***********************************************************************
-* _cache_free.
-*
-* Called from _objc_remove_classes_in_image().
-* forward:: entries in the cache ARE freed.
-* Cache locks: cacheUpdateLock must NOT be held by the caller.
-**********************************************************************/
-void _cache_free(Cache cache)
+// called by objc_msgSend
+extern "C"
+void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa,
+ bucket_t *bucket)
{
- unsigned int i;
-
- mutex_lock(&cacheUpdateLock);
-
- for (i = 0; i < cache->mask + 1; i++) {
- cache_entry *entry = (cache_entry *)cache->buckets[i];
- if (entry && entry->imp == _objc_msgForward_internal) {
- _cache_free_block(entry);
- }
- }
-
- _cache_free_block(cache);
-
- mutex_unlock(&cacheUpdateLock);
+ cache_t::bad_cache(receiver, sel, isa, bucket);
}
-
-/***********************************************************************
-* _cache_create.
-*
-* Called from _cache_expand().
-* Cache locks: cacheUpdateLock must be held by the caller.
-**********************************************************************/
-static Cache _cache_create(Class cls)
+extern "C"
+void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa,
+ bucket_t *bucket)
{
- Cache new_cache;
-
- mutex_assert_locked(&cacheUpdateLock);
-
- // Allocate new cache block
- new_cache = _cache_malloc(INIT_CACHE_SIZE);
-
- // Install the cache
- _class_setCache(cls, new_cache);
-
- // Clear the grow flag so that we will re-use the current storage,
- // rather than actually grow the cache, when expanding the cache
- // for the first time
- if (_class_slow_grow) {
- _class_setGrowCache(cls, NO);
- }
-
- // Return our creation
- return new_cache;
+ cache_t::bad_cache(receiver, sel, isa, bucket);
}
-
-/***********************************************************************
-* _cache_expand.
-*
-* Called from _cache_fill ()
-* Cache locks: cacheUpdateLock must be held by the caller.
-**********************************************************************/
-static Cache _cache_expand(Class cls)
+void cache_t::bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket)
{
- Cache old_cache;
- Cache new_cache;
- uintptr_t slotCount;
- uintptr_t index;
+ // Log in separate steps in case the logging itself causes a crash.
+ _objc_inform_now_and_on_crash
+ ("Method cache corrupted. This may be a message to an "
+ "invalid object, or a memory error somewhere else.");
+ cache_t *cache = &isa->cache;
+ _objc_inform_now_and_on_crash
+ ("%s %p, SEL %p, isa %p, cache %p, buckets %p, "
+ "mask 0x%x, occupied 0x%x, wrap bucket %p",
+ receiver ? "receiver" : "unused", receiver,
+ sel, isa, cache, cache->buckets,
+ cache->shiftmask >> MASK_SHIFT, cache->occupied, bucket);
+ _objc_inform_now_and_on_crash
+ ("%s %zu bytes, buckets %zu bytes",
+ receiver ? "receiver" : "unused", malloc_size(receiver),
+ malloc_size(cache->buckets));
+ _objc_inform_now_and_on_crash
+ ("selector '%s'", sel_getName(sel));
+ _objc_inform_now_and_on_crash
+ ("isa '%s'", isa->getName());
+ _objc_fatal
+ ("Method cache corrupted.");
+}
- mutex_assert_locked(&cacheUpdateLock);
- // First growth goes from empty cache to a real one
- old_cache = _class_getCache(cls);
- if (_cache_isEmpty(old_cache))
- return _cache_create (cls);
-
- if (_class_slow_grow) {
- // Cache grows every other time only.
- if (_class_shouldGrowCache(cls)) {
- // Grow the cache this time. Don't grow next time.
- _class_setGrowCache(cls, NO);
- }
- else {
- // Reuse the current cache storage this time. Do grow next time.
- _class_setGrowCache(cls, YES);
-
- // Clear the valid-entry counter
- old_cache->occupied = 0;
-
- // Invalidate all the cache entries
- for (index = 0; index < old_cache->mask + 1; index += 1)
- {
- // Remember what this entry was, so we can possibly
- // deallocate it after the bucket has been invalidated
- cache_entry *oldEntry = (cache_entry *)old_cache->buckets[index];
-
- // Skip invalid entry
- if (!oldEntry)
- continue;
-
- // Invalidate this entry
- old_cache->buckets[index] = NULL;
-
- // Deallocate "forward::" entry
- if (oldEntry->imp == _objc_msgForward_internal) {
- _cache_collect_free (oldEntry, sizeof(cache_entry));
- }
- }
-
- // Return the same old cache, freshly emptied
- return old_cache;
+bucket_t * cache_t::find(cache_key_t k)
+{
+ mask_t m = mask();
+ mask_t begin = cache_hash(k, m);
+ mask_t i = begin;
+ do {
+ if (buckets[i].key == 0 || buckets[i].key == k) {
+ return &buckets[i];
}
- }
+ } while ((i = cache_next(i, m)) != begin);
- // Double the cache size
- slotCount = (old_cache->mask + 1) << 1;
-
- new_cache = _cache_malloc(slotCount);
+ // hack
+ Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
+ cache_t::bad_cache(nil, (SEL)k, cls, nil);
+}
-#ifdef OBJC_INSTRUMENTED
- // Propagate the instrumentation data
- {
- CacheInstrumentation *oldCacheData;
- CacheInstrumentation *newCacheData;
- oldCacheData = CACHE_INSTRUMENTATION(old_cache);
- newCacheData = CACHE_INSTRUMENTATION(new_cache);
- bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation));
- }
-#endif
+void cache_t::expand()
+{
+ mutex_assert_locked(&cacheUpdateLock);
+
+ mask_t oldCapacity = capacity();
+ mask_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
- // Deallocate "forward::" entries from the old cache
- for (index = 0; index < old_cache->mask + 1; index++) {
- cache_entry *entry = (cache_entry *)old_cache->buckets[index];
- if (entry && entry->imp == _objc_msgForward_internal) {
- _cache_collect_free (entry, sizeof(cache_entry));
- }
+ if ((((newCapacity-1) << MASK_SHIFT) >> MASK_SHIFT) != newCapacity-1) {
+ // shiftmask overflow - can't grow further
+ newCapacity = oldCapacity;
}
- // Install new cache
- _class_setCache(cls, new_cache);
-
- // Deallocate old cache, try freeing all the garbage
- _cache_collect_free (old_cache, old_cache->mask * sizeof(cache_entry *));
- _cache_collect(false);
-
- return new_cache;
+ reallocate(oldCapacity, newCapacity);
}
-/***********************************************************************
-* _cache_fill. Add the specified method to the specified class' cache.
-* Returns NO if the cache entry wasn't added: cache was busy,
-* class is still being initialized, new entry is a duplicate.
-*
-* Called only from _class_lookupMethodAndLoadCache and
-* class_respondsToMethod and _cache_addForwardEntry.
-*
-* Cache locks: cacheUpdateLock must not be held.
-**********************************************************************/
-BOOL _cache_fill(Class cls, Method smt, SEL sel)
+static void cache_fill_nolock(Class cls, SEL sel, IMP imp)
{
- uintptr_t newOccupied;
- uintptr_t index;
- cache_entry **buckets;
- cache_entry *entry;
- Cache cache;
-
- mutex_assert_unlocked(&cacheUpdateLock);
+ mutex_assert_locked(&cacheUpdateLock);
// Never cache before +initialize is done
- if (!_class_isInitialized(cls)) {
- return NO;
- }
-
- // Keep tally of cache additions
- totalCacheFills += 1;
-
- mutex_lock(&cacheUpdateLock);
-
- entry = (cache_entry *)smt;
-
- cache = _class_getCache(cls);
+ if (!cls->isInitialized()) return;
// Make sure the entry wasn't added to the cache by some other thread
// before we grabbed the cacheUpdateLock.
- // Don't use _cache_getMethod() because _cache_getMethod() doesn't
- // return forward:: entries.
- if (_cache_getImp(cls, sel)) {
- mutex_unlock(&cacheUpdateLock);
- return NO; // entry is already cached, didn't add new one
- }
+ if (cache_getImp(cls, sel)) return;
+
+ cache_t *cache = getCache(cls, sel);
+ cache_key_t key = getKey(cls, sel);
// Use the cache as-is if it is less than 3/4 full
- newOccupied = cache->occupied + 1;
- if ((newOccupied * 4) <= (cache->mask + 1) * 3) {
+ mask_t newOccupied = cache->occupied + 1;
+ if ((newOccupied * 4) <= (cache->mask() + 1) * 3) {
// Cache is less than 3/4 full.
- cache->occupied = (unsigned int)newOccupied;
} else {
// Cache is too full. Expand it.
- cache = _cache_expand (cls);
-
- // Account for the addition
- cache->occupied += 1;
+ cache->expand();
}
- // Scan for the first unused slot and insert there.
+ // Scan for the first unused slot (or used for this class) and insert there
// There is guaranteed to be an empty slot because the
// minimum size is 4 and we resized at 3/4 full.
- buckets = (cache_entry **)cache->buckets;
- for (index = CACHE_HASH(sel, cache->mask);
- buckets[index] != NULL;
- index = (index+1) & cache->mask)
- {
- // empty
- }
- buckets[index] = entry;
+ bucket_t *bucket = cache->find(key);
+ if (bucket->key == 0) cache->occupied++;
+ bucket->set(key, imp);
+}
+void cache_fill(Class cls, SEL sel, IMP imp)
+{
+#if !DEBUG_TASK_THREADS
+ mutex_lock(&cacheUpdateLock);
+ cache_fill_nolock(cls, sel, imp);
mutex_unlock(&cacheUpdateLock);
-
- return YES; // successfully added new cache entry
+#else
+ _collecting_in_critical();
+ return;
+#endif
}
-/***********************************************************************
-* _cache_addForwardEntry
-* Add a forward:: entry for the given selector to cls's method cache.
-* Does nothing if the cache addition fails for any reason.
-* Called from class_respondsToMethod and _class_lookupMethodAndLoadCache.
-* Cache locks: cacheUpdateLock must not be held.
-**********************************************************************/
-void _cache_addForwardEntry(Class cls, SEL sel)
+// Reset any entry for cls/sel to the uncached lookup
+static void cache_eraseMethod_nolock(Class cls, SEL sel)
{
- cache_entry *smt;
-
- smt = (cache_entry *)_malloc_internal(sizeof(cache_entry));
- smt->name = sel;
- smt->imp = _objc_msgForward_internal;
- if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack
- // Entry not added to cache. Don't leak the method struct.
- _free_internal(smt);
+ mutex_assert_locked(&cacheUpdateLock);
+
+ cache_t *cache = getCache(cls, sel);
+ cache_key_t key = getKey(cls, sel);
+
+ bucket_t *bucket = cache->find(key);
+ if (bucket->key == key) {
+ bucket->imp = _objc_msgSend_uncached_impcache;
}
}
-/***********************************************************************
-* _cache_addIgnoredEntry
-* Add an entry for the ignored selector to cls's method cache.
-* Does nothing if the cache addition fails for any reason.
-* Returns the ignored IMP.
-* Cache locks: cacheUpdateLock must not be held.
-**********************************************************************/
-#if SUPPORT_GC && !SUPPORT_IGNORED_SELECTOR_CONSTANT
-static cache_entry *alloc_ignored_entries(void)
+// Resets cache entries for all methods in mlist for cls and its subclasses.
+void cache_eraseMethods(Class cls, method_list_t *mlist)
{
- cache_entry *e = (cache_entry *)_malloc_internal(5 * sizeof(cache_entry));
- e[0] = (cache_entry){ @selector(retain), 0,(IMP)&_objc_ignored_method};
- e[1] = (cache_entry){ @selector(release), 0,(IMP)&_objc_ignored_method};
- e[2] = (cache_entry){ @selector(autorelease),0,(IMP)&_objc_ignored_method};
- e[3] = (cache_entry){ @selector(retainCount),0,(IMP)&_objc_ignored_method};
- e[4] = (cache_entry){ @selector(dealloc), 0,(IMP)&_objc_ignored_method};
- return e;
-}
-#endif
+ rwlock_assert_writing(&runtimeLock);
+ mutex_lock(&cacheUpdateLock);
-IMP _cache_addIgnoredEntry(Class cls, SEL sel)
-{
- cache_entry *entryp = NULL;
-
-#if !SUPPORT_GC
- _objc_fatal("selector ignored with GC off");
-#elif SUPPORT_IGNORED_SELECTOR_CONSTANT
- static cache_entry entry = { (SEL)kIgnore, 0, (IMP)&_objc_ignored_method };
- entryp = &entry;
- assert(sel == (SEL)kIgnore);
-#else
- // hack
- int i;
- static cache_entry *entries;
- INIT_ONCE_PTR(entries, alloc_ignored_entries(), free(v));
-
- assert(ignoreSelector(sel));
- for (i = 0; i < 5; i++) {
- if (sel == entries[i].name) {
- entryp = &entries[i];
- break;
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, {
+ for (uint32_t m = 0; m < mlist->count; m++) {
+ SEL sel = mlist->get(m).name;
+ cache_eraseMethod_nolock(c, sel);
}
- }
- if (!entryp) _objc_fatal("selector %s (%p) is not ignored",
- sel_getName(sel), sel);
-#endif
+ });
- _cache_fill(cls, (Method)entryp, sel);
- return entryp->imp;
+ mutex_unlock(&cacheUpdateLock);
}
-/***********************************************************************
-* _cache_flush. Invalidate all valid entries in the given class' cache.
-*
-* Called from flush_caches() and _cache_fill()
-* Cache locks: cacheUpdateLock must be held by the caller.
-**********************************************************************/
-#if __OBJC2__
-static
-#endif
-void _cache_flush(Class cls)
+// Reset any copies of imp in this cache to the uncached lookup
+void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp)
{
- Cache cache;
- unsigned int index;
-
mutex_assert_locked(&cacheUpdateLock);
- // Locate cache. Ignore unused cache.
- cache = _class_getCache(cls);
- if (_cache_isEmpty(cache)) return;
+ cache_t *cache = getCache(cls, sel);
-#ifdef OBJC_INSTRUMENTED
- {
- CacheInstrumentation *cacheData;
-
- // Tally this flush
- cacheData = CACHE_INSTRUMENTATION(cache);
- cacheData->flushCount += 1;
- cacheData->flushedEntries += cache->occupied;
- if (cache->occupied > cacheData->maxFlushedEntries)
- cacheData->maxFlushedEntries = cache->occupied;
+ bucket_t *buckets = cache->buckets;
+ mask_t count = cache->capacity();
+ for (mask_t i = 0; i < count; i++) {
+ if (buckets[i].imp == imp) {
+ buckets[i].imp = _objc_msgSend_uncached_impcache;
+ }
}
-#endif
-
- // Traverse the cache
- for (index = 0; index <= cache->mask; index += 1)
- {
- // Remember what this entry was, so we can possibly
- // deallocate it after the bucket has been invalidated
- cache_entry *oldEntry = (cache_entry *)cache->buckets[index];
-
- // Invalidate this entry
- cache->buckets[index] = NULL;
+}
- // Deallocate "forward::" entry
- if (oldEntry && oldEntry->imp == _objc_msgForward_internal)
- _cache_collect_free (oldEntry, sizeof(cache_entry));
- }
- // Clear the valid-entry counter
- cache->occupied = 0;
+void cache_eraseImp(Class cls, SEL sel, IMP imp)
+{
+ mutex_lock(&cacheUpdateLock);
+ cache_eraseImp_nolock(cls, sel, imp);
+ mutex_unlock(&cacheUpdateLock);
}
-/***********************************************************************
-* flush_cache. Flushes the instance method cache for class cls only.
-* Use flush_caches() if cls might have in-use subclasses.
-**********************************************************************/
-void flush_cache(Class cls)
+// Reset this entire cache to the uncached lookup by reallocating it.
+// This must not shrink the cache - that breaks the lock-free scheme.
+void cache_erase_nolock(cache_t *cache)
{
- if (cls) {
- mutex_lock(&cacheUpdateLock);
- _cache_flush(cls);
- mutex_unlock(&cacheUpdateLock);
+ mutex_assert_locked(&cacheUpdateLock);
+
+ mask_t capacity = cache->capacity();
+ if (capacity > 0 && cache->occupied > 0) {
+ cache->reallocate(capacity, capacity);
}
}
// Must not be zero - thread_get_state() on a new thread returns PC == 0.
#define PC_SENTINEL 1
-// UNIX03 compliance hack (4508809)
-#if !__DARWIN_UNIX03
-#define __srr0 srr0
-#define __eip eip
-#endif
-
static uintptr_t _get_pc_for_thread(thread_t thread)
#if defined(__i386__)
{
mach_port_t mythread = pthread_mach_thread_np(pthread_self());
// Get a list of all the threads in the current task
- ret = task_threads (mach_task_self (), &threads, &number);
- if (ret != KERN_SUCCESS)
- {
- _objc_fatal("task_thread failed (result %d)\n", ret);
+#if !DEBUG_TASK_THREADS
+ ret = task_threads(mach_task_self(), &threads, &number);
+#else
+ ret = objc_task_threads(mach_task_self(), &threads, &number);
+#endif
+
+ if (ret != KERN_SUCCESS) {
+ // See DEBUG_TASK_THREADS below to help debug this.
+ _objc_fatal("task_threads failed (result 0x%x)\n", ret);
}
// Check whether any thread is in the cache lookup code
static size_t garbage_threshold = 32*1024;
// table of refs to free
-static void **garbage_refs = 0;
+static bucket_t **garbage_refs = 0;
// current number of refs in garbage_refs
static size_t garbage_count = 0;
if (first)
{
first = 0;
- garbage_refs = (void**)
+ garbage_refs = (bucket_t**)
_malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
garbage_max = INIT_GARBAGE_COUNT;
}
// Double the table if it is full
else if (garbage_count == garbage_max)
{
- garbage_refs = (void**)
+ garbage_refs = (bucket_t**)
_realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
garbage_max *= 2;
}
/***********************************************************************
-* _cache_collect_free. Add the specified malloc'd memory to the list
+* cache_collect_free. Add the specified malloc'd memory to the list
* of them to free at some later point.
* size is used for the collection threshold. It does not have to be
* precisely the block's size.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
-static void _cache_collect_free(void *data, size_t size)
+static void cache_collect_free(bucket_t *data, size_t size)
{
mutex_assert_locked(&cacheUpdateLock);
/***********************************************************************
-* _cache_collect. Try to free accumulated dead caches.
+* cache_collect. Try to free accumulated dead caches.
* collectALot tries harder to free memory.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
-void _cache_collect(bool collectALot)
+void cache_collect(bool collectALot)
{
mutex_assert_locked(&cacheUpdateLock);
// Log our progress
if (PrintCaches) {
cache_collections++;
- _objc_inform ("CACHES: COLLECTING %zu bytes (%zu regions, %zu allocations, %zu collections)", garbage_byte_size, cache_allocator_regions, cache_allocations, cache_collections);
+ _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections);
}
// Dispose all refs now in the garbage
while (garbage_count--) {
- _cache_free_block(garbage_refs[garbage_count]);
+ free(garbage_refs[garbage_count]);
}
// Clear the garbage count and total size indicator
if (PrintCaches) {
size_t i;
- size_t total = 0;
- size_t ideal_total = 0;
- size_t malloc_total = 0;
- size_t local_total = 0;
+ size_t total_count = 0;
+ size_t total_size = 0;
for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
int count = cache_counts[i];
int slots = 1 << i;
- size_t size = sizeof(struct objc_cache) + TABLE_SIZE(slots);
- size_t ideal = size;
-#if TARGET_OS_WIN32
- size_t malloc = size;
-#else
- size_t malloc = malloc_good_size(size);
-#endif
- size_t local = size < CACHE_ALLOCATOR_MIN ? malloc : cache_allocator_size_for_mask(cache_allocator_mask_for_size(size));
+ size_t size = count * slots * sizeof(bucket_t);
if (!count) continue;
- _objc_inform("CACHES: %4d slots: %4d caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", slots, count, ideal*count, malloc*count, local*count, malloc*count-ideal*count, local*count-ideal*count);
+ _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes",
+ slots, count, size);
- total += count;
- ideal_total += ideal*count;
- malloc_total += malloc*count;
- local_total += local*count;
+ total_count += count;
+ total_size += size;
}
- _objc_inform("CACHES: total: %4zu caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", total, ideal_total, malloc_total, local_total, malloc_total-ideal_total, local_total-ideal_total);
+ _objc_inform("CACHES: total: %4zu caches, %6zu bytes",
+ total_count, total_size);
}
}
-
-
-
-#if defined(CACHE_ALLOCATOR)
-
/***********************************************************************
-* Custom method cache allocator.
-* Method cache block sizes are 2^slots+2 words, which is a pessimal
-* case for the system allocator. It wastes 504 bytes per cache block
-* with 128 or more slots, which adds up to tens of KB for an AppKit process.
-* To save memory, the custom cache allocator below is used.
-*
-* The cache allocator uses 128 KB allocation regions. Few processes will
-* require a second region. Within a region, allocation is address-ordered
-* first fit.
-*
-* The cache allocator uses a quantum of 520.
-* Cache block ideal sizes: 520, 1032, 2056, 4104
-* Cache allocator sizes: 520, 1040, 2080, 4160
+* objc_task_threads
+* Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug
+* crashes when task_threads() is failing.
*
-* Because all blocks are known to be genuine method caches, the ordinary
-* cache->mask and cache->occupied fields are used as block headers.
-* No out-of-band headers are maintained. The number of blocks will
-* almost always be fewer than 200, so for simplicity there is no free
-* list or other optimization.
-*
-* Block in use: mask != 0, occupied != -1 (mask indicates block size)
-* Block free: mask != 0, occupied == -1 (mask is precisely block size)
-*
-* No cache allocator functions take any locks. Instead, the caller
-* must hold the cacheUpdateLock.
-*
-* fixme with 128 KB regions and 520 B min block size, an allocation
-* bitmap would be only 32 bytes - better than free list?
-**********************************************************************/
-
-typedef struct cache_allocator_block {
- uintptr_t size;
- uintptr_t state;
- struct cache_allocator_block *nextFree;
-} cache_allocator_block;
-
-typedef struct cache_allocator_region {
- cache_allocator_block *start;
- cache_allocator_block *end; // first non-block address
- cache_allocator_block *freeList;
- struct cache_allocator_region *next;
-} cache_allocator_region;
-
-static cache_allocator_region *cacheRegion = NULL;
-
-
-/***********************************************************************
-* cache_allocator_add_region
-* Allocates and returns a new region that can hold at least size
-* bytes of large method caches.
-* The actual size will be rounded up to a CACHE_QUANTUM boundary,
-* with a minimum of CACHE_REGION_SIZE.
-* The new region is lowest-priority for new allocations. Callers that
-* know the other regions are already full should allocate directly
-* into the returned region.
-**********************************************************************/
-static cache_allocator_region *cache_allocator_add_region(size_t size)
-{
- vm_address_t addr;
- cache_allocator_block *b;
- cache_allocator_region **rgnP;
- cache_allocator_region *newRegion = (cache_allocator_region *)
- _calloc_internal(1, sizeof(cache_allocator_region));
-
- // Round size up to quantum boundary, and apply the minimum size.
- size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
- if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE;
-
- // Allocate the region
- addr = (vm_address_t)calloc(size, 1);
- newRegion->start = (cache_allocator_block *)addr;
- newRegion->end = (cache_allocator_block *)(addr + size);
-
- // Mark the first block: free and covers the entire region
- b = newRegion->start;
- b->size = size;
- b->state = (uintptr_t)-1;
- b->nextFree = NULL;
- newRegion->freeList = b;
-
- // Add to end of the linked list of regions.
- // Other regions should be re-used before this one is touched.
- newRegion->next = NULL;
- rgnP = &cacheRegion;
- while (*rgnP) {
- rgnP = &(**rgnP).next;
- }
- *rgnP = newRegion;
-
- cache_allocator_regions++;
-
- return newRegion;
-}
-
-
-/***********************************************************************
-* cache_allocator_coalesce
-* Attempts to coalesce a free block with the single free block following
-* it in the free list, if any.
-**********************************************************************/
-static void cache_allocator_coalesce(cache_allocator_block *block)
-{
- if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) {
- block->size += block->nextFree->size;
- block->nextFree = block->nextFree->nextFree;
- }
-}
-
-
-/***********************************************************************
-* cache_region_calloc
-* Attempt to allocate a size-byte block in the given region.
-* Allocation is first-fit. The free list is already fully coalesced.
-* Returns NULL if there is not enough room in the region for the block.
-**********************************************************************/
-static void *cache_region_calloc(cache_allocator_region *rgn, size_t size)
-{
- cache_allocator_block **blockP;
- uintptr_t mask;
-
- // Save mask for allocated block, then round size
- // up to CACHE_QUANTUM boundary
- mask = cache_allocator_mask_for_size(size);
- size = cache_allocator_size_for_mask(mask);
-
- // Search the free list for a sufficiently large free block.
-
- for (blockP = &rgn->freeList;
- *blockP != NULL;
- blockP = &(**blockP).nextFree)
- {
- cache_allocator_block *block = *blockP;
- if (block->size < size) continue; // not big enough
-
- // block is now big enough. Allocate from it.
-
- // Slice off unneeded fragment of block, if any,
- // and reconnect the free list around block.
- if (block->size - size >= CACHE_QUANTUM) {
- cache_allocator_block *leftover =
- (cache_allocator_block *)(size + (uintptr_t)block);
- leftover->size = block->size - size;
- leftover->state = (uintptr_t)-1;
- leftover->nextFree = block->nextFree;
- *blockP = leftover;
- } else {
- *blockP = block->nextFree;
- }
-
- // block is now exactly the right size.
-
- bzero(block, size);
- block->size = mask; // Cache->mask
- block->state = 0; // Cache->occupied
-
- return block;
- }
-
- // No room in this region.
- return NULL;
-}
-
-
-/***********************************************************************
-* cache_allocator_calloc
-* Custom allocator for large method caches (128+ slots)
-* The returned cache block already has cache->mask set.
-* cache->occupied and the cache contents are zero.
-* Cache locks: cacheUpdateLock must be held by the caller
-**********************************************************************/
-static Cache cache_allocator_calloc(size_t size)
-{
- cache_allocator_region *rgn;
-
- mutex_assert_locked(&cacheUpdateLock);
-
- for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
- void *p = cache_region_calloc(rgn, size);
- if (p) {
- return (Cache)p;
- }
- }
-
- // No regions or all regions full - make a region and try one more time
- // In the unlikely case of a cache over 256KB, it will get its own region.
- return (Cache)cache_region_calloc(cache_allocator_add_region(size), size);
-}
-
-
-/***********************************************************************
-* cache_allocator_region_for_block
-* Returns the cache allocator region that ptr points into, or NULL.
-**********************************************************************/
-static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block)
-{
- cache_allocator_region *rgn;
- for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
- if (block >= rgn->start && block < rgn->end) return rgn;
- }
- return NULL;
-}
-
-
-/***********************************************************************
-* cache_allocator_is_block
-* If ptr is a live block from the cache allocator, return YES
-* If ptr is a block from some other allocator, return NO.
-* If ptr is a dead block from the cache allocator, result is undefined.
-* Cache locks: cacheUpdateLock must be held by the caller
-**********************************************************************/
-static BOOL cache_allocator_is_block(void *ptr)
-{
- mutex_assert_locked(&cacheUpdateLock);
- return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
-}
-
-/***********************************************************************
-* cache_allocator_free
-* Frees a block allocated by the cache allocator.
-* Cache locks: cacheUpdateLock must be held by the caller.
-**********************************************************************/
-static void cache_allocator_free(void *ptr)
-{
- cache_allocator_block *dead = (cache_allocator_block *)ptr;
- cache_allocator_block *cur;
- cache_allocator_region *rgn;
-
- mutex_assert_locked(&cacheUpdateLock);
-
- if (! (rgn = cache_allocator_region_for_block(dead))) {
- // free of non-pointer
- _objc_inform("cache_allocator_free of non-pointer %p", dead);
- return;
- }
-
- dead->size = cache_allocator_size_for_mask(dead->size);
- dead->state = (uintptr_t)-1;
-
- if (!rgn->freeList || rgn->freeList > dead) {
- // dead block belongs at front of free list
- dead->nextFree = rgn->freeList;
- rgn->freeList = dead;
- cache_allocator_coalesce(dead);
- return;
- }
-
- // dead block belongs in the middle or end of free list
- for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) {
- cache_allocator_block *ahead = cur->nextFree;
-
- if (!ahead || ahead > dead) {
- // cur and ahead straddle dead, OR dead belongs at end of free list
- cur->nextFree = dead;
- dead->nextFree = ahead;
-
- // coalesce into dead first in case both succeed
- cache_allocator_coalesce(dead);
- cache_allocator_coalesce(cur);
- return;
- }
- }
-
- // uh-oh
- _objc_inform("cache_allocator_free of non-pointer %p", ptr);
-}
-
-// defined(CACHE_ALLOCATOR)
-#endif
-
-/***********************************************************************
-* Cache instrumentation and debugging
-**********************************************************************/
-
-#ifdef OBJC_INSTRUMENTED
-enum {
- CACHE_HISTOGRAM_SIZE = 512
-};
-
-unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE];
-unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE];
-#endif
-
-
-/***********************************************************************
-* _cache_print.
-**********************************************************************/
-static void _cache_print(Cache cache)
-{
- uintptr_t index;
- uintptr_t count;
-
- count = cache->mask + 1;
- for (index = 0; index < count; index += 1) {
- cache_entry *entry = (cache_entry *)cache->buckets[index];
- if (entry) {
- if (entry->imp == _objc_msgForward_internal)
- printf ("does not recognize: \n");
- printf ("%s\n", sel_getName(entry->name));
- }
- }
-}
-
-
-/***********************************************************************
-* _class_printMethodCaches.
-**********************************************************************/
-void _class_printMethodCaches(Class cls)
-{
- if (_cache_isEmpty(_class_getCache(cls))) {
- printf("no instance-method cache for class %s\n", _class_getName(cls));
- } else {
- printf("instance-method cache for class %s:\n", _class_getName(cls));
- _cache_print(_class_getCache(cls));
- }
-
- if (_cache_isEmpty(_class_getCache(((id)cls)->isa))) {
- printf("no class-method cache for class %s\n", _class_getName(cls));
- } else {
- printf ("class-method cache for class %s:\n", _class_getName(cls));
- _cache_print(_class_getCache(((id)cls)->isa));
- }
-}
-
-
-#if 0
-#warning fixme
-
-
-/***********************************************************************
-* _class_printDuplicateCacheEntries.
-**********************************************************************/
-void _class_printDuplicateCacheEntries(BOOL detail)
-{
- NXHashState state;
- Class cls;
- unsigned int duplicates;
- unsigned int index1;
- unsigned int index2;
- unsigned int mask;
- unsigned int count;
- unsigned int isMeta;
- Cache cache;
-
-
- printf ("Checking for duplicate cache entries \n");
-
- // Outermost loop - iterate over all classes
- state = NXInitHashState (class_hash);
- duplicates = 0;
- while (NXNextHashState (class_hash, &state, (void **) &cls))
- {
- // Control loop - do given class' cache, then its isa's cache
- for (isMeta = 0; isMeta <= 1; isMeta += 1)
- {
- // Select cache of interest and make sure it exists
- cache = _class_getCache(isMeta ? ((id)cls)->isa : cls);
- if (_cache_isEmpty(cache))
- continue;
-
- // Middle loop - check each entry in the given cache
- mask = cache->mask;
- count = mask + 1;
- for (index1 = 0; index1 < count; index1 += 1)
- {
- // Skip invalid entry
- if (!cache->buckets[index1])
- continue;
-
- // Inner loop - check that given entry matches no later entry
- for (index2 = index1 + 1; index2 < count; index2 += 1)
- {
- // Skip invalid entry
- if (!cache->buckets[index2])
- continue;
-
- // Check for duplication by method name comparison
- if (strcmp ((char *) cache->buckets[index1]->name),
- (char *) cache->buckets[index2]->name)) == 0)
- {
- if (detail)
- printf ("%s %s\n", _class_getName(cls), sel_getName(cache->buckets[index1]->name));
- duplicates += 1;
- break;
- }
- }
- }
- }
- }
-
- // Log the findings
- printf ("duplicates = %d\n", duplicates);
- printf ("total cache fills = %d\n", totalCacheFills);
-}
+* A failure in task_threads() usually means somebody has botched their
+* Mach or MIG traffic. For example, somebody's error handling was wrong
+* and they left a message queued on the MIG reply port for task_threads()
+* to trip over.
+*
+* The code below is a modified version of task_threads(). It logs
+* the msgh_id of the reply message. The msgh_id can identify the sender
+* of the message, which can help pinpoint the faulty code.
+* DEBUG_TASK_THREADS also calls collecting_in_critical() during every
+* message dispatch, which can increase reproducibility of bugs.
+*
+* This code can be regenerated by running
+* `mig /usr/include/mach/task.defs`.
+**********************************************************************/
+#if DEBUG_TASK_THREADS
+
+#include <mach/mach.h>
+#include <mach/message.h>
+#include <mach/mig.h>
+
+#define __MIG_check__Reply__task_subsystem__ 1
+#define mig_internal static inline
+#define __DeclareSendRpc(a, b)
+#define __BeforeSendRpc(a, b)
+#define __AfterSendRpc(a, b)
+#define msgh_request_port msgh_remote_port
+#define msgh_reply_port msgh_local_port
+
+#ifndef __MachMsgErrorWithTimeout
+#define __MachMsgErrorWithTimeout(_R_) { \
+ switch (_R_) { \
+ case MACH_SEND_INVALID_DATA: \
+ case MACH_SEND_INVALID_DEST: \
+ case MACH_SEND_INVALID_HEADER: \
+ mig_put_reply_port(InP->Head.msgh_reply_port); \
+ break; \
+ case MACH_SEND_TIMED_OUT: \
+ case MACH_RCV_TIMED_OUT: \
+ default: \
+ mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
+ } \
+ }
+#endif /* __MachMsgErrorWithTimeout */
+
+#ifndef __MachMsgErrorWithoutTimeout
+#define __MachMsgErrorWithoutTimeout(_R_) { \
+ switch (_R_) { \
+ case MACH_SEND_INVALID_DATA: \
+ case MACH_SEND_INVALID_DEST: \
+ case MACH_SEND_INVALID_HEADER: \
+ mig_put_reply_port(InP->Head.msgh_reply_port); \
+ break; \
+ default: \
+ mig_dealloc_reply_port(InP->Head.msgh_reply_port); \
+ } \
+ }
+#endif /* __MachMsgErrorWithoutTimeout */
+
+
+#if ( __MigTypeCheck )
+#if __MIG_check__Reply__task_subsystem__
+#if !defined(__MIG_check__Reply__task_threads_t__defined)
+#define __MIG_check__Reply__task_threads_t__defined
+
+mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P)
+{
+
+ typedef __Reply__task_threads_t __Reply;
+ boolean_t msgh_simple;
+#if __MigTypeCheck
+ unsigned int msgh_size;
+#endif /* __MigTypeCheck */
+ if (Out0P->Head.msgh_id != 3502) {
+ if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE)
+ { return MIG_SERVER_DIED; }
+ else
+ { return MIG_REPLY_MISMATCH; }
+ }
+
+ msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX);
+#if __MigTypeCheck
+ msgh_size = Out0P->Head.msgh_size;
+
+ if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 ||
+ msgh_size != (mach_msg_size_t)sizeof(__Reply)) &&
+ (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) ||
+ ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS))
+ { return MIG_TYPE_ERROR ; }
+#endif /* __MigTypeCheck */
+
+ if (msgh_simple) {
+ return ((mig_reply_error_t *)Out0P)->RetCode;
+ }
+
+#if __MigTypeCheck
+ if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR ||
+ Out0P->act_list.disposition != 17) {
+ return MIG_TYPE_ERROR;
+ }
+#endif /* __MigTypeCheck */
+
+ return MACH_MSG_SUCCESS;
+}
+#endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */
+#endif /* __MIG_check__Reply__task_subsystem__ */
+#endif /* ( __MigTypeCheck ) */
+
+
+/* Routine task_threads */
+static kern_return_t objc_task_threads
+(
+ task_t target_task,
+ thread_act_array_t *act_list,
+ mach_msg_type_number_t *act_listCnt
+)
+{
+
+#ifdef __MigPackStructs
+#pragma pack(4)
+#endif
+ typedef struct {
+ mach_msg_header_t Head;
+ } Request;
+#ifdef __MigPackStructs
+#pragma pack()
+#endif
+
+#ifdef __MigPackStructs
+#pragma pack(4)
+#endif
+ typedef struct {
+ mach_msg_header_t Head;
+ /* start of the kernel processed data */
+ mach_msg_body_t msgh_body;
+ mach_msg_ool_ports_descriptor_t act_list;
+ /* end of the kernel processed data */
+ NDR_record_t NDR;
+ mach_msg_type_number_t act_listCnt;
+ mach_msg_trailer_t trailer;
+ } Reply;
+#ifdef __MigPackStructs
+#pragma pack()
+#endif
+
+#ifdef __MigPackStructs
+#pragma pack(4)
+#endif
+ typedef struct {
+ mach_msg_header_t Head;
+ /* start of the kernel processed data */
+ mach_msg_body_t msgh_body;
+ mach_msg_ool_ports_descriptor_t act_list;
+ /* end of the kernel processed data */
+ NDR_record_t NDR;
+ mach_msg_type_number_t act_listCnt;
+ } __Reply;
+#ifdef __MigPackStructs
+#pragma pack()
+#endif
+ /*
+ * typedef struct {
+ * mach_msg_header_t Head;
+ * NDR_record_t NDR;
+ * kern_return_t RetCode;
+ * } mig_reply_error_t;
+ */
+
+ union {
+ Request In;
+ Reply Out;
+ } Mess;
+
+ Request *InP = &Mess.In;
+ Reply *Out0P = &Mess.Out;
+
+ mach_msg_return_t msg_result;
+
+#ifdef __MIG_check__Reply__task_threads_t__defined
+ kern_return_t check_result;
+#endif /* __MIG_check__Reply__task_threads_t__defined */
+
+ __DeclareSendRpc(3402, "task_threads")
+
+ InP->Head.msgh_bits =
+ MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ /* msgh_size passed as argument */
+ InP->Head.msgh_request_port = target_task;
+ InP->Head.msgh_reply_port = mig_get_reply_port();
+ InP->Head.msgh_id = 3402;
+ __BeforeSendRpc(3402, "task_threads")
+ msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ __AfterSendRpc(3402, "task_threads")
+ if (msg_result != MACH_MSG_SUCCESS) {
+ _objc_inform("task_threads received unexpected reply msgh_id 0x%zx",
+ (size_t)Out0P->Head.msgh_id);
+ __MachMsgErrorWithoutTimeout(msg_result);
+ { return msg_result; }
+ }
-/***********************************************************************
-* PrintCacheHeader.
-**********************************************************************/
-static void PrintCacheHeader(void)
-{
-#ifdef OBJC_INSTRUMENTED
- printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n");
- printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n");
- printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n");
-#else
- printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n");
- printf ("Size Count Used Used Used Hit Hit Miss Miss\n");
- printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n");
-#endif
-}
+#if defined(__MIG_check__Reply__task_threads_t__defined)
+ check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P);
+ if (check_result != MACH_MSG_SUCCESS)
+ { return check_result; }
+#endif /* defined(__MIG_check__Reply__task_threads_t__defined) */
-/***********************************************************************
-* PrintCacheInfo.
-**********************************************************************/
-static void PrintCacheInfo(unsigned int cacheSize,
- unsigned int cacheCount,
- unsigned int slotsUsed,
- float avgUsed, unsigned int maxUsed,
- float avgSHit, unsigned int maxSHit,
- float avgSMiss, unsigned int maxSMiss
-#ifdef OBJC_INSTRUMENTED
- , unsigned int totDHits,
- float avgDHit,
- unsigned int maxDHit,
- unsigned int totDMisses,
- float avgDMiss,
- unsigned int maxDMiss,
- unsigned int totDFlsh,
- float avgDFlsh,
- unsigned int maxDFlsh
-#endif
- )
-{
-#ifdef OBJC_INSTRUMENTED
- printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n",
-#else
- printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n",
-#endif
- cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss
-#ifdef OBJC_INSTRUMENTED
- , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh
-#endif
- );
-
+ *act_list = (thread_act_array_t)(Out0P->act_list.address);
+ *act_listCnt = Out0P->act_listCnt;
+
+ return KERN_SUCCESS;
}
-
-#ifdef OBJC_INSTRUMENTED
-/***********************************************************************
-* PrintCacheHistogram. Show the non-zero entries from the specified
-* cache histogram.
-**********************************************************************/
-static void PrintCacheHistogram(char *title,
- unsigned int *firstEntry,
- unsigned int entryCount)
-{
- unsigned int index;
- unsigned int *thisEntry;
-
- printf ("%s\n", title);
- printf (" Probes Tally\n");
- printf (" ------ -----\n");
- for (index = 0, thisEntry = firstEntry;
- index < entryCount;
- index += 1, thisEntry += 1)
- {
- if (*thisEntry == 0)
- continue;
-
- printf (" %6d %5d\n", index, *thisEntry);
- }
-}
+// DEBUG_TASK_THREADS
#endif
-/***********************************************************************
-* _class_printMethodCacheStatistics.
-**********************************************************************/
-
-#define MAX_LOG2_SIZE 32
-#define MAX_CHAIN_SIZE 100
-
-void _class_printMethodCacheStatistics(void)
-{
- unsigned int isMeta;
- unsigned int index;
- NXHashState state;
- Class cls;
- unsigned int totalChain;
- unsigned int totalMissChain;
- unsigned int maxChain;
- unsigned int maxMissChain;
- unsigned int classCount;
- unsigned int negativeEntryCount;
- unsigned int cacheExpandCount;
- unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int chainCount[MAX_CHAIN_SIZE] = {0};
- unsigned int missChainCount[MAX_CHAIN_SIZE] = {0};
-#ifdef OBJC_INSTRUMENTED
- unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
- unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}};
-#endif
-
- printf ("Printing cache statistics\n");
-
- // Outermost loop - iterate over all classes
- state = NXInitHashState (class_hash);
- classCount = 0;
- negativeEntryCount = 0;
- cacheExpandCount = 0;
- while (NXNextHashState (class_hash, &state, (void **) &cls))
- {
- // Tally classes
- classCount += 1;
-
- // Control loop - do given class' cache, then its isa's cache
- for (isMeta = 0; isMeta <= 1; isMeta += 1)
- {
- Cache cache;
- unsigned int mask;
- unsigned int log2Size;
- unsigned int entryCount;
-
- // Select cache of interest
- cache = _class_getCache(isMeta ? ((id)cls)->isa : cls);
-
- // Ignore empty cache... should we?
- if (_cache_isEmpty(cache))
- continue;
-
- // Middle loop - do each entry in the given cache
- mask = cache->mask;
- entryCount = 0;
- totalChain = 0;
- totalMissChain = 0;
- maxChain = 0;
- maxMissChain = 0;
- for (index = 0; index < mask + 1; index += 1)
- {
- cache_entry **buckets;
- cache_entry *entry;
- unsigned int hash;
- unsigned int methodChain;
- unsigned int methodMissChain;
- unsigned int index2;
-
- // If entry is invalid, the only item of
- // interest is that future insert hashes
- // to this entry can use it directly.
- buckets = (cache_entry **)cache->buckets;
- if (!buckets[index])
- {
- missChainCount[0] += 1;
- continue;
- }
-
- entry = buckets[index];
-
- // Tally valid entries
- entryCount += 1;
-
- // Tally "forward::" entries
- if (entry->imp == _objc_msgForward_internal)
- negativeEntryCount += 1;
-
- // Calculate search distance (chain length) for this method
- // The chain may wrap around to the beginning of the table.
- hash = CACHE_HASH(entry->name, mask);
- if (index >= hash) methodChain = index - hash;
- else methodChain = (mask+1) + index - hash;
-
- // Tally chains of this length
- if (methodChain < MAX_CHAIN_SIZE)
- chainCount[methodChain] += 1;
-
- // Keep sum of all chain lengths
- totalChain += methodChain;
-
- // Record greatest chain length
- if (methodChain > maxChain)
- maxChain = methodChain;
-
- // Calculate search distance for miss that hashes here
- index2 = index;
- while (buckets[index2])
- {
- index2 += 1;
- index2 &= mask;
- }
- methodMissChain = ((index2 - index) & mask);
-
- // Tally miss chains of this length
- if (methodMissChain < MAX_CHAIN_SIZE)
- missChainCount[methodMissChain] += 1;
-
- // Keep sum of all miss chain lengths in this class
- totalMissChain += methodMissChain;
-
- // Record greatest miss chain length
- if (methodMissChain > maxMissChain)
- maxMissChain = methodMissChain;
- }
-
- // Factor this cache into statistics about caches of the same
- // type and size (all caches are a power of two in size)
- log2Size = log2u (mask + 1);
- cacheCountBySize[isMeta][log2Size] += 1;
- totalEntriesBySize[isMeta][log2Size] += entryCount;
- if (entryCount > maxEntriesBySize[isMeta][log2Size])
- maxEntriesBySize[isMeta][log2Size] = entryCount;
- totalChainBySize[isMeta][log2Size] += totalChain;
- totalMissChainBySize[isMeta][log2Size] += totalMissChain;
- totalMaxChainBySize[isMeta][log2Size] += maxChain;
- totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain;
- if (maxChain > maxChainBySize[isMeta][log2Size])
- maxChainBySize[isMeta][log2Size] = maxChain;
- if (maxMissChain > maxMissChainBySize[isMeta][log2Size])
- maxMissChainBySize[isMeta][log2Size] = maxMissChain;
-#ifdef OBJC_INSTRUMENTED
- {
- CacheInstrumentation *cacheData;
-
- cacheData = CACHE_INSTRUMENTATION(cache);
- hitCountBySize[isMeta][log2Size] += cacheData->hitCount;
- hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes;
- if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size])
- maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes;
- missCountBySize[isMeta][log2Size] += cacheData->missCount;
- missProbesBySize[isMeta][log2Size] += cacheData->missProbes;
- if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size])
- maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes;
- flushCountBySize[isMeta][log2Size] += cacheData->flushCount;
- flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries;
- if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size])
- maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries;
- }
-#endif
- // Caches start with a power of two number of entries, and grow by doubling, so
- // we can calculate the number of times this cache has expanded
- cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2;
- }
- }
-
- {
- unsigned int cacheCountByType[2] = {0};
- unsigned int totalCacheCount = 0;
- unsigned int totalEntries = 0;
- unsigned int maxEntries = 0;
- unsigned int totalSlots = 0;
-#ifdef OBJC_INSTRUMENTED
- unsigned int totalHitCount = 0;
- unsigned int totalHitProbes = 0;
- unsigned int maxHitProbes = 0;
- unsigned int totalMissCount = 0;
- unsigned int totalMissProbes = 0;
- unsigned int maxMissProbes = 0;
- unsigned int totalFlushCount = 0;
- unsigned int totalFlushedEntries = 0;
- unsigned int maxFlushedEntries = 0;
-#endif
-
- totalChain = 0;
- maxChain = 0;
- totalMissChain = 0;
- maxMissChain = 0;
-
- // Sum information over all caches
- for (isMeta = 0; isMeta <= 1; isMeta += 1)
- {
- for (index = 0; index < MAX_LOG2_SIZE; index += 1)
- {
- cacheCountByType[isMeta] += cacheCountBySize[isMeta][index];
- totalEntries += totalEntriesBySize[isMeta][index];
- totalSlots += cacheCountBySize[isMeta][index] * (1 << index);
- totalChain += totalChainBySize[isMeta][index];
- if (maxEntriesBySize[isMeta][index] > maxEntries)
- maxEntries = maxEntriesBySize[isMeta][index];
- if (maxChainBySize[isMeta][index] > maxChain)
- maxChain = maxChainBySize[isMeta][index];
- totalMissChain += totalMissChainBySize[isMeta][index];
- if (maxMissChainBySize[isMeta][index] > maxMissChain)
- maxMissChain = maxMissChainBySize[isMeta][index];
-#ifdef OBJC_INSTRUMENTED
- totalHitCount += hitCountBySize[isMeta][index];
- totalHitProbes += hitProbesBySize[isMeta][index];
- if (maxHitProbesBySize[isMeta][index] > maxHitProbes)
- maxHitProbes = maxHitProbesBySize[isMeta][index];
- totalMissCount += missCountBySize[isMeta][index];
- totalMissProbes += missProbesBySize[isMeta][index];
- if (maxMissProbesBySize[isMeta][index] > maxMissProbes)
- maxMissProbes = maxMissProbesBySize[isMeta][index];
- totalFlushCount += flushCountBySize[isMeta][index];
- totalFlushedEntries += flushedEntriesBySize[isMeta][index];
- if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries)
- maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index];
-#endif
- }
-
- totalCacheCount += cacheCountByType[isMeta];
- }
-
- // Log our findings
- printf ("There are %u classes\n", classCount);
-
- for (isMeta = 0; isMeta <= 1; isMeta += 1)
- {
- // Number of this type of class
- printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n",
- cacheCountByType[isMeta],
- isMeta ? "class" : "instance");
-
- // Print header
- PrintCacheHeader ();
-
- // Keep format consistent even if there are caches of this kind
- if (cacheCountByType[isMeta] == 0)
- {
- printf ("(none)\n");
- continue;
- }
-
- // Usage information by cache size
- for (index = 0; index < MAX_LOG2_SIZE; index += 1)
- {
- unsigned int cacheCount;
- unsigned int cacheSlotCount;
- unsigned int cacheEntryCount;
-
- // Get number of caches of this type and size
- cacheCount = cacheCountBySize[isMeta][index];
- if (cacheCount == 0)
- continue;
-
- // Get the cache slot count and the total number of valid entries
- cacheSlotCount = (1 << index);
- cacheEntryCount = totalEntriesBySize[isMeta][index];
-
- // Give the analysis
- PrintCacheInfo (cacheSlotCount,
- cacheCount,
- cacheEntryCount,
- (float) cacheEntryCount / (float) cacheCount,
- maxEntriesBySize[isMeta][index],
- (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount,
- maxChainBySize[isMeta][index],
- (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount),
- maxMissChainBySize[isMeta][index]
-#ifdef OBJC_INSTRUMENTED
- , hitCountBySize[isMeta][index],
- hitCountBySize[isMeta][index] ?
- (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0,
- maxHitProbesBySize[isMeta][index],
- missCountBySize[isMeta][index],
- missCountBySize[isMeta][index] ?
- (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0,
- maxMissProbesBySize[isMeta][index],
- flushCountBySize[isMeta][index],
- flushCountBySize[isMeta][index] ?
- (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0,
- maxFlushedEntriesBySize[isMeta][index]
-#endif
- );
- }
- }
-
- // Give overall numbers
- printf ("\nCumulative:\n");
- PrintCacheHeader ();
- PrintCacheInfo (totalSlots,
- totalCacheCount,
- totalEntries,
- (float) totalEntries / (float) totalCacheCount,
- maxEntries,
- (float) totalChain / (float) totalEntries,
- maxChain,
- (float) totalMissChain / (float) totalSlots,
- maxMissChain
-#ifdef OBJC_INSTRUMENTED
- , totalHitCount,
- totalHitCount ?
- (float) totalHitProbes / (float) totalHitCount : 0.0,
- maxHitProbes,
- totalMissCount,
- totalMissCount ?
- (float) totalMissProbes / (float) totalMissCount : 0.0,
- maxMissProbes,
- totalFlushCount,
- totalFlushCount ?
- (float) totalFlushedEntries / (float) totalFlushCount : 0.0,
- maxFlushedEntries
-#endif
- );
-
- printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount);
- printf ("Number of cache expansions: %d\n", cacheExpandCount);
-#ifdef OBJC_INSTRUMENTED
- printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n");
- printf (" ----------- ------------ -------------- ---------- ------------- -------------\n");
- printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n",
- LinearFlushCachesCount,
- LinearFlushCachesVisitedCount,
- LinearFlushCachesCount ?
- (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0,
- MaxLinearFlushCachesVisitedCount,
- LinearFlushCachesVisitedCount,
- 1.0);
- printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n",
- NonlinearFlushCachesCount,
- NonlinearFlushCachesVisitedCount,
- NonlinearFlushCachesCount ?
- (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0,
- MaxNonlinearFlushCachesVisitedCount,
- NonlinearFlushCachesClassCount,
- NonlinearFlushCachesClassCount ?
- (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0);
- printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n",
- LinearFlushCachesCount + NonlinearFlushCachesCount,
- IdealFlushCachesCount,
- LinearFlushCachesCount + NonlinearFlushCachesCount ?
- (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0,
- MaxIdealFlushCachesCount,
- LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount,
- LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ?
- (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0);
-
- PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE);
- PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE);
-#endif
-
-#if 0
- printf ("\nLookup chains:");
- for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
- {
- if (chainCount[index] != 0)
- printf (" %u:%u", index, chainCount[index]);
- }
-
- printf ("\nMiss chains:");
- for (index = 0; index < MAX_CHAIN_SIZE; index += 1)
- {
- if (missChainCount[index] != 0)
- printf (" %u:%u", index, missChainCount[index]);
- }
-
- printf ("\nTotal memory usage for cache data structures: %lu bytes\n",
- totalCacheCount * (sizeof(struct objc_cache) - sizeof(cache_entry *)) +
- totalSlots * sizeof(cache_entry *) +
- negativeEntryCount * sizeof(cache_entry));
-#endif
- }
-}
-
+// __OBJC2__
#endif
+++ /dev/null
-/*
- * Copyright (c) 1999-2009 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/***********************************************************************
-* objc-class-old.m
-* Support for old-ABI classes, methods, and categories.
-**********************************************************************/
-
-#if !__OBJC2__
-
-#include "objc-private.h"
-#include "objc-runtime-old.h"
-
-// Freed objects have their isa set to point to this dummy class.
-// This avoids the need to check for Nil classes in the messenger.
-static const struct old_class freedObjectClass =
-{
- Nil, // isa
- Nil, // super_class
- "FREED(id)", // name
- 0, // version
- 0, // info
- 0, // instance_size
- NULL, // ivars
- NULL, // methodLists
- (Cache) &_objc_empty_cache, // cache
- NULL, // protocols
- NULL, // ivar_layout;
- NULL // ext
-};
-
-
-/***********************************************************************
-* _class_getFreedObjectClass. Return a pointer to the dummy freed
-* object class. Freed objects get their isa pointers replaced with
-* a pointer to the freedObjectClass, so that we can catch usages of
-* the freed object.
-**********************************************************************/
-static Class _class_getFreedObjectClass(void)
-{
- return (Class)&freedObjectClass;
-}
-
-
-/***********************************************************************
-* _objc_getFreedObjectClass. Return a pointer to the dummy freed
-* object class. Freed objects get their isa pointers replaced with
-* a pointer to the freedObjectClass, so that we can catch usages of
-* the freed object.
-**********************************************************************/
-Class _objc_getFreedObjectClass(void)
-{
- return _class_getFreedObjectClass();
-}
-
-
-static void allocateExt(struct old_class *cls)
-{
- if (! (cls->info & CLS_EXT)) {
- _objc_inform("class '%s' needs to be recompiled", cls->name);
- return;
- }
- if (!cls->ext) {
- uint32_t size = (uint32_t)sizeof(struct old_class_ext);
- cls->ext = _calloc_internal(size, 1);
- cls->ext->size = size;
- }
-}
-
-
-static inline struct old_method *_findNamedMethodInList(struct old_method_list * mlist, const char *meth_name) {
- int i;
- if (!mlist) return NULL;
- if (ignoreSelectorNamed(meth_name)) return NULL;
- for (i = 0; i < mlist->method_count; i++) {
- struct old_method *m = &mlist->method_list[i];
- if (0 == strcmp((const char *)(m->method_name), meth_name)) {
- return m;
- }
- }
- return NULL;
-}
-
-
-/***********************************************************************
-* Method list fixup markers.
-* mlist->obsolete == fixed_up_method_list marks method lists with real SELs
-* versus method lists with un-uniqued char*.
-* PREOPTIMIZED VERSION:
-* Fixed-up method lists get mlist->obsolete == OBJC_FIXED_UP
-* dyld shared cache sets this for method lists it preoptimizes.
-* UN-PREOPTIMIZED VERSION
-* Fixed-up method lists get mlist->obsolete == OBJC_FIXED_UP_outside_dyld
-* dyld shared cache uses OBJC_FIXED_UP, but those aren't trusted.
-**********************************************************************/
-#define OBJC_FIXED_UP ((void *)1771)
-#define OBJC_FIXED_UP_outside_dyld ((void *)1773)
-static void *fixed_up_method_list = OBJC_FIXED_UP;
-
-// sel_init() decided that selectors in the dyld shared cache are untrustworthy
-void disableSharedCacheOptimizations(void)
-{
- fixed_up_method_list = OBJC_FIXED_UP_outside_dyld;
-}
-
-/***********************************************************************
-* fixupSelectorsInMethodList
-* Uniques selectors in the given method list.
-* Also replaces imps for GC-ignored selectors
-* The given method list must be non-NULL and not already fixed-up.
-* If the class was loaded from a bundle:
-* fixes up the given list in place with heap-allocated selector strings
-* If the class was not from a bundle:
-* allocates a copy of the method list, fixes up the copy, and returns
-* the copy. The given list is unmodified.
-*
-* If cls is already in use, methodListLock must be held by the caller.
-**********************************************************************/
-static struct old_method_list *fixupSelectorsInMethodList(struct old_class *cls, struct old_method_list *mlist)
-{
- int i;
- size_t size;
- struct old_method *method;
- struct old_method_list *old_mlist;
-
- if ( ! mlist ) return NULL;
- if ( mlist->obsolete == fixed_up_method_list ) {
- // method list OK
- } else {
- BOOL isBundle = (cls->info & CLS_FROM_BUNDLE) ? YES : NO;
- if (!isBundle) {
- old_mlist = mlist;
- size = sizeof(struct old_method_list) - sizeof(struct old_method) + old_mlist->method_count * sizeof(struct old_method);
- mlist = _malloc_internal(size);
- memmove(mlist, old_mlist, size);
- } else {
- // Mach-O bundles are fixed up in place.
- // This prevents leaks when a bundle is unloaded.
- }
- sel_lock();
- for ( i = 0; i < mlist->method_count; i += 1 ) {
- method = &mlist->method_list[i];
- method->method_name =
- sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles.
-
- if (ignoreSelector(method->method_name)) {
- method->method_imp = (IMP)&_objc_ignored_method;
- }
- }
- sel_unlock();
- mlist->obsolete = fixed_up_method_list;
- }
- return mlist;
-}
-
-
-/***********************************************************************
-* nextMethodList
-* Returns successive method lists from the given class.
-* Method lists are returned in method search order (i.e. highest-priority
-* implementations first).
-* All necessary method list fixups are performed, so the
-* returned method list is fully-constructed.
-*
-* If cls is already in use, methodListLock must be held by the caller.
-* For full thread-safety, methodListLock must be continuously held by the
-* caller across all calls to nextMethodList(). If the lock is released,
-* the bad results listed in class_nextMethodList() may occur.
-*
-* void *iterator = NULL;
-* struct old_method_list *mlist;
-* mutex_lock(&methodListLock);
-* while ((mlist = nextMethodList(cls, &iterator))) {
-* // do something with mlist
-* }
-* mutex_unlock(&methodListLock);
-**********************************************************************/
-static struct old_method_list *nextMethodList(struct old_class *cls,
- void **it)
-{
- uintptr_t index = *(uintptr_t *)it;
- struct old_method_list **resultp;
-
- if (index == 0) {
- // First call to nextMethodList.
- if (!cls->methodLists) {
- resultp = NULL;
- } else if (cls->info & CLS_NO_METHOD_ARRAY) {
- resultp = (struct old_method_list **)&cls->methodLists;
- } else {
- resultp = &cls->methodLists[0];
- if (!*resultp || *resultp == END_OF_METHODS_LIST) {
- resultp = NULL;
- }
- }
- } else {
- // Subsequent call to nextMethodList.
- if (!cls->methodLists) {
- resultp = NULL;
- } else if (cls->info & CLS_NO_METHOD_ARRAY) {
- resultp = NULL;
- } else {
- resultp = &cls->methodLists[index];
- if (!*resultp || *resultp == END_OF_METHODS_LIST) {
- resultp = NULL;
- }
- }
- }
-
- // resultp now is NULL, meaning there are no more method lists,
- // OR the address of the method list pointer to fix up and return.
-
- if (resultp) {
- if (*resultp) {
- *resultp = fixupSelectorsInMethodList(cls, *resultp);
- }
- *it = (void *)(index + 1);
- return *resultp;
- } else {
- *it = 0;
- return NULL;
- }
-}
-
-
-/* These next three functions are the heart of ObjC method lookup.
- * If the class is currently in use, methodListLock must be held by the caller.
- */
-static inline struct old_method *_findMethodInList(struct old_method_list * mlist, SEL sel) {
- int i;
- if (!mlist) return NULL;
- for (i = 0; i < mlist->method_count; i++) {
- struct old_method *m = &mlist->method_list[i];
- if (m->method_name == sel) {
- return m;
- }
- }
- return NULL;
-}
-
-static inline struct old_method * _findMethodInClass(struct old_class *cls, SEL sel) __attribute__((always_inline));
-static inline struct old_method * _findMethodInClass(struct old_class *cls, SEL sel) {
- // Flattened version of nextMethodList(). The optimizer doesn't
- // do a good job with hoisting the conditionals out of the loop.
- // Conceptually, this looks like:
- // while ((mlist = nextMethodList(cls, &iterator))) {
- // struct old_method *m = _findMethodInList(mlist, sel);
- // if (m) return m;
- // }
-
- if (!cls->methodLists) {
- // No method lists.
- return NULL;
- }
- else if (cls->info & CLS_NO_METHOD_ARRAY) {
- // One method list.
- struct old_method_list **mlistp;
- mlistp = (struct old_method_list **)&cls->methodLists;
- *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
- return _findMethodInList(*mlistp, sel);
- }
- else {
- // Multiple method lists.
- struct old_method_list **mlistp;
- for (mlistp = cls->methodLists;
- *mlistp != NULL && *mlistp != END_OF_METHODS_LIST;
- mlistp++)
- {
- struct old_method *m;
- *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
- m = _findMethodInList(*mlistp, sel);
- if (m) return m;
- }
- return NULL;
- }
-}
-
-static inline struct old_method * _getMethod(struct old_class *cls, SEL sel) {
- for (; cls; cls = cls->super_class) {
- struct old_method *m;
- m = _findMethodInClass(cls, sel);
- if (m) return m;
- }
- return NULL;
-}
-
-
-// fixme for gc debugging temporary use
-IMP findIMPInClass(struct old_class *cls, SEL sel)
-{
- struct old_method *m = _findMethodInClass(cls, sel);
- if (m) return m->method_imp;
- else return NULL;
-}
-
-
-/***********************************************************************
-* _freedHandler.
-**********************************************************************/
-static void _freedHandler(id obj, SEL sel)
-{
- __objc_error (obj, "message %s sent to freed object=%p",
- sel_getName(sel), obj);
-}
-
-
-/***********************************************************************
-* ABI-specific lookUpMethod helpers.
-**********************************************************************/
-void lockForMethodLookup(void)
-{
- mutex_lock(&methodListLock);
-}
-void unlockForMethodLookup(void)
-{
- mutex_unlock(&methodListLock);
-}
-IMP prepareForMethodLookup(Class cls, SEL sel, BOOL init, id obj)
-{
- mutex_assert_unlocked(&methodListLock);
-
- // Check for freed class
- if (cls == _class_getFreedObjectClass())
- return (IMP) _freedHandler;
-
- if (init && !_class_isInitialized(cls)) {
- _class_initialize (_class_getNonMetaClass(cls, obj));
- // If sel == initialize, _class_initialize will send +initialize and
- // then the messenger will send +initialize again after this
- // procedure finishes. Of course, if this is not being called
- // from the messenger then it won't happen. 2778172
- }
-
- return NULL;
-}
-
-
-/***********************************************************************
-* class_getVariable. Return the named instance variable.
-**********************************************************************/
-
-Ivar _class_getVariable(Class cls_gen, const char *name, Class *memberOf)
-{
- struct old_class *cls = oldcls(cls_gen);
-
- for (; cls != Nil; cls = cls->super_class) {
- int i;
-
- // Skip class having no ivars
- if (!cls->ivars) continue;
-
- for (i = 0; i < cls->ivars->ivar_count; i++) {
- // Check this ivar's name. Be careful because the
- // compiler generates ivar entries with NULL ivar_name
- // (e.g. for anonymous bit fields).
- struct old_ivar *ivar = &cls->ivars->ivar_list[i];
- if (ivar->ivar_name && 0 == strcmp(name, ivar->ivar_name)) {
- if (memberOf) *memberOf = (Class)cls;
- return (Ivar)ivar;
- }
- }
- }
-
- // Not found
- return NULL;
-}
-
-
-struct old_property *
-property_list_nth(const struct old_property_list *plist, uint32_t i)
-{
- return (struct old_property *)(i*plist->entsize + (char *)&plist->first);
-}
-
-struct old_property **
-copyPropertyList(struct old_property_list *plist, unsigned int *outCount)
-{
- struct old_property **result = NULL;
- unsigned int count = 0;
-
- if (plist) {
- count = plist->count;
- }
-
- if (count > 0) {
- unsigned int i;
- result = malloc((count+1) * sizeof(struct old_property *));
-
- for (i = 0; i < count; i++) {
- result[i] = property_list_nth(plist, i);
- }
- result[i] = NULL;
- }
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-static struct old_property_list *
-nextPropertyList(struct old_class *cls, uintptr_t *indexp)
-{
- struct old_property_list *result = NULL;
-
- mutex_assert_locked(&classLock);
- if (! ((cls->info & CLS_EXT) && cls->ext)) {
- // No class ext
- result = NULL;
- } else if (!cls->ext->propertyLists) {
- // No property lists
- result = NULL;
- } else if (cls->info & CLS_NO_PROPERTY_ARRAY) {
- // Only one property list
- if (*indexp == 0) {
- result = (struct old_property_list *)cls->ext->propertyLists;
- } else {
- result = NULL;
- }
- } else {
- // More than one property list
- result = cls->ext->propertyLists[*indexp];
- }
-
- if (result) {
- ++*indexp;
- return result;
- } else {
- *indexp = 0;
- return NULL;
- }
-}
-
-
-/***********************************************************************
-* class_getIvarLayout
-* NULL means all-scanned. "" means non-scanned.
-**********************************************************************/
-const uint8_t *
-class_getIvarLayout(Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
- if (cls && (cls->info & CLS_EXT)) {
- return cls->ivar_layout;
- } else {
- return NULL; // conservative scan
- }
-}
-
-
-/***********************************************************************
-* class_getWeakIvarLayout
-* NULL means no weak ivars.
-**********************************************************************/
-const uint8_t *
-class_getWeakIvarLayout(Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
- if (cls && (cls->info & CLS_EXT) && cls->ext) {
- return cls->ext->weak_ivar_layout;
- } else {
- return NULL; // no weak ivars
- }
-}
-
-
-/***********************************************************************
-* class_setIvarLayout
-* NULL means all-scanned. "" means non-scanned.
-**********************************************************************/
-void class_setIvarLayout(Class cls_gen, const uint8_t *layout)
-{
- struct old_class *cls = oldcls(cls_gen);
- if (!cls) return;
-
- if (! (cls->info & CLS_EXT)) {
- _objc_inform("class '%s' needs to be recompiled", cls->name);
- return;
- }
-
- // fixme leak
- cls->ivar_layout = _ustrdup_internal(layout);
-}
-
-// SPI: Instance-specific object layout.
-
-void _class_setIvarLayoutAccessor(Class cls_gen, const uint8_t* (*accessor) (id object)) {
- struct old_class *cls = oldcls(cls_gen);
- if (!cls) return;
-
- if (! (cls->info & CLS_EXT)) {
- _objc_inform("class '%s' needs to be recompiled", cls->name);
- return;
- }
-
- // fixme leak
- cls->ivar_layout = (const uint8_t *)accessor;
- _class_setInfo(cls_gen, CLS_HAS_INSTANCE_SPECIFIC_LAYOUT);
-}
-
-const uint8_t *_object_getIvarLayout(Class cls_gen, id object) {
- struct old_class *cls = oldcls(cls_gen);
- if (cls && (cls->info & CLS_EXT)) {
- const uint8_t* layout = cls->ivar_layout;
- if (cls->info & CLS_HAS_INSTANCE_SPECIFIC_LAYOUT) {
- const uint8_t* (*accessor) (id object) = (const uint8_t* (*)(id))layout;
- layout = accessor(object);
- }
- return layout;
- } else {
- return NULL;
- }
-}
-
-/***********************************************************************
-* class_setWeakIvarLayout
-* NULL means no weak ivars.
-**********************************************************************/
-void class_setWeakIvarLayout(Class cls_gen, const uint8_t *layout)
-{
- struct old_class *cls = oldcls(cls_gen);
- if (!cls) return;
-
- mutex_lock(&classLock);
-
- allocateExt(cls);
-
- // fixme leak
- cls->ext->weak_ivar_layout = _ustrdup_internal(layout);
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* _class_changeInfo
-* Atomically sets and clears some bits in cls's info field.
-* set and clear must not overlap.
-**********************************************************************/
-void _class_changeInfo(Class cls, long set, long clear)
-{
- struct old_class *old = oldcls(cls);
- long newinfo;
- long oldinfo;
- do {
- oldinfo = old->info;
- newinfo = (oldinfo | set) & ~clear;
- } while (! OSAtomicCompareAndSwapLong(oldinfo, newinfo, &old->info));
-}
-
-
-/***********************************************************************
-* _class_getInfo
-* Returns YES iff all set bits in get are also set in cls's info field.
-**********************************************************************/
-BOOL _class_getInfo(Class cls, int get)
-{
- struct old_class *old = oldcls(cls);
- return ((old->info & get) == get) ? YES : NO;
-}
-
-
-/***********************************************************************
-* _class_setInfo
-* Atomically sets some bits in cls's info field.
-**********************************************************************/
-void _class_setInfo(Class cls, long set)
-{
- _class_changeInfo(cls, set, 0);
-}
-
-
-/***********************************************************************
-* _class_clearInfo
-* Atomically clears some bits in cls's info field.
-**********************************************************************/
-void _class_clearInfo(Class cls, long clear)
-{
- _class_changeInfo(cls, 0, clear);
-}
-
-
-/***********************************************************************
-* isInitializing
-* Return YES if cls is currently being initialized.
-* The initializing bit is stored in the metaclass only.
-**********************************************************************/
-BOOL _class_isInitializing(Class cls)
-{
- return _class_getInfo(_class_getMeta(cls), CLS_INITIALIZING);
-}
-
-
-/***********************************************************************
-* isInitialized
-* Return YES if cls is already initialized.
-* The initialized bit is stored in the metaclass only.
-**********************************************************************/
-BOOL _class_isInitialized(Class cls)
-{
- return _class_getInfo(_class_getMeta(cls), CLS_INITIALIZED);
-}
-
-
-/***********************************************************************
-* setInitializing
-* Mark cls as initialization in progress.
-**********************************************************************/
-void _class_setInitializing(Class cls)
-{
- _class_setInfo(_class_getMeta(cls), CLS_INITIALIZING);
-}
-
-
-/***********************************************************************
-* setInitialized
-* Atomically mark cls as initialized and not initializing.
-**********************************************************************/
-void _class_setInitialized(Class cls)
-{
- _class_changeInfo(_class_getMeta(cls), CLS_INITIALIZED, CLS_INITIALIZING);
-}
-
-
-/***********************************************************************
-* class_setVersion. Record the specified version with the class.
-**********************************************************************/
-void class_setVersion(Class cls, int version)
-{
- if (!cls) return;
- cls->version = version;
-}
-
-/***********************************************************************
-* class_getVersion. Return the version recorded with the class.
-**********************************************************************/
-int class_getVersion(Class cls)
-{
- if (!cls) return 0;
- return (int)cls->version;
-}
-
-
-Class _class_getMeta(Class cls)
-{
- if (_class_getInfo(cls, CLS_META)) return cls;
- else return ((id)cls)->isa;
-}
-
-BOOL _class_isMetaClass(Class cls)
-{
- if (!cls) return NO;
- return _class_getInfo(cls, CLS_META);
-}
-
-
-/***********************************************************************
-* _class_getNonMetaClass.
-* Return the ordinary class for this class or metaclass.
-* Used by +initialize.
-**********************************************************************/
-Class _class_getNonMetaClass(Class cls, id obj __unused)
-{
- // fixme ick
- if (_class_isMetaClass(cls)) {
- if (strncmp(_class_getName(cls), "_%", 2) == 0) {
- // Posee's meta's name is smashed and isn't in the class_hash,
- // so objc_getClass doesn't work.
- const char *baseName = strchr(_class_getName(cls), '%'); // get posee's real name
- cls = (Class)objc_getClass(baseName);
- } else {
- cls = (Class)objc_getClass(_class_getName(cls));
- }
- assert(cls);
- }
-
- return cls;
-}
-
-
-Class _class_getSuperclass(Class cls)
-{
- if (!cls) return nil;
- return (Class)cls->super_class;
-}
-
-
-Cache _class_getCache(Class cls)
-{
- return cls->cache;
-}
-
-void _class_setCache(Class cls, Cache cache)
-{
- cls->cache = cache;
-}
-
-size_t _class_getInstanceSize(Class cls)
-{
- if (!cls) return 0;
- return (cls->instance_size + WORD_MASK) & ~WORD_MASK;
-}
-
-const char * _class_getName(Class cls)
-{
- if (!cls) return "nil";
- return cls->name;
-}
-
-
-
-const char *_category_getName(Category cat)
-{
- return oldcategory(cat)->category_name;
-}
-
-const char *_category_getClassName(Category cat)
-{
- return oldcategory(cat)->class_name;
-}
-
-Class _category_getClass(Category cat)
-{
- return (Class)objc_getClass(oldcategory(cat)->class_name);
-}
-
-IMP _category_getLoadMethod(Category cat)
-{
- struct old_method_list *mlist = oldcategory(cat)->class_methods;
- if (mlist) {
- return lookupNamedMethodInMethodList(mlist, "load");
- } else {
- return NULL;
- }
-}
-
-
-
-/***********************************************************************
-* class_nextMethodList.
-* External version of nextMethodList().
-*
-* This function is not fully thread-safe. A series of calls to
-* class_nextMethodList() may fail if methods are added to or removed
-* from the class between calls.
-* If methods are added between calls to class_nextMethodList(), it may
-* return previously-returned method lists again, and may fail to return
-* newly-added lists.
-* If methods are removed between calls to class_nextMethodList(), it may
-* omit surviving method lists or simply crash.
-**********************************************************************/
-OBJC_EXPORT struct objc_method_list *class_nextMethodList(Class cls, void **it)
-{
- struct old_method_list *result;
-
- OBJC_WARN_DEPRECATED;
-
- mutex_lock(&methodListLock);
- result = nextMethodList(oldcls(cls), it);
- mutex_unlock(&methodListLock);
- return (struct objc_method_list *)result;
-}
-
-
-/***********************************************************************
-* class_addMethods.
-*
-* Formerly class_addInstanceMethods ()
-**********************************************************************/
-OBJC_EXPORT void class_addMethods(Class cls, struct objc_method_list *meths)
-{
- OBJC_WARN_DEPRECATED;
-
- // Add the methods.
- mutex_lock(&methodListLock);
- _objc_insertMethods(oldcls(cls), (struct old_method_list *)meths, NULL);
- mutex_unlock(&methodListLock);
-
- // Must flush when dynamically adding methods. No need to flush
- // all the class method caches. If cls is a meta class, though,
- // this will still flush it and any of its sub-meta classes.
- flush_caches (cls, NO);
-}
-
-
-/***********************************************************************
-* class_removeMethods.
-**********************************************************************/
-OBJC_EXPORT void class_removeMethods(Class cls, struct objc_method_list *meths)
-{
- OBJC_WARN_DEPRECATED;
-
- // Remove the methods
- mutex_lock(&methodListLock);
- _objc_removeMethods(oldcls(cls), (struct old_method_list *)meths);
- mutex_unlock(&methodListLock);
-
- // Must flush when dynamically removing methods. No need to flush
- // all the class method caches. If cls is a meta class, though,
- // this will still flush it and any of its sub-meta classes.
- flush_caches (cls, NO);
-}
-
-/***********************************************************************
-* lookupNamedMethodInMethodList
-* Only called to find +load/-.cxx_construct/-.cxx_destruct methods,
-* without fixing up the entire method list.
-* The class is not yet in use, so methodListLock is not taken.
-**********************************************************************/
-IMP lookupNamedMethodInMethodList(struct old_method_list *mlist, const char *meth_name)
-{
- struct old_method *m;
- m = meth_name ? _findNamedMethodInList(mlist, meth_name) : NULL;
- return (m ? m->method_imp : NULL);
-}
-
-Method _class_getMethod(Class cls, SEL sel)
-{
- Method result;
-
- mutex_lock(&methodListLock);
- result = (Method)_getMethod(oldcls(cls), sel);
- mutex_unlock(&methodListLock);
-
- return result;
-}
-
-Method _class_getMethodNoSuper(Class cls, SEL sel)
-{
- Method result;
-
- mutex_lock(&methodListLock);
- result = (Method)_findMethodInClass(oldcls(cls), sel);
- mutex_unlock(&methodListLock);
-
- return result;
-}
-
-Method _class_getMethodNoSuper_nolock(Class cls, SEL sel)
-{
- mutex_assert_locked(&methodListLock);
- return (Method)_findMethodInClass(oldcls(cls), sel);
-}
-
-
-BOOL class_conformsToProtocol(Class cls_gen, Protocol *proto_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
- struct old_protocol *proto = oldprotocol(proto_gen);
-
- if (!cls_gen) return NO;
- if (!proto) return NO;
-
- if (cls->isa->version >= 3) {
- struct old_protocol_list *list;
- for (list = cls->protocols; list != NULL; list = list->next) {
- int i;
- for (i = 0; i < list->count; i++) {
- if (list->list[i] == proto) return YES;
- if (protocol_conformsToProtocol((Protocol *)list->list[i], proto_gen)) return YES;
- }
- if (cls->isa->version <= 4) break;
- }
- }
- return NO;
-}
-
-
-static NXMapTable * posed_class_hash = NULL;
-
-/***********************************************************************
-* objc_getOrigClass.
-**********************************************************************/
-Class _objc_getOrigClass(const char *name)
-{
- Class ret;
-
- // Look for class among the posers
- ret = Nil;
- mutex_lock(&classLock);
- if (posed_class_hash)
- ret = (Class) NXMapGet (posed_class_hash, name);
- mutex_unlock(&classLock);
- if (ret)
- return ret;
-
- // Not a poser. Do a normal lookup.
- ret = (Class)objc_getClass (name);
- if (!ret)
- _objc_inform ("class `%s' not linked into application", name);
-
- return ret;
-}
-
-Class objc_getOrigClass(const char *name)
-{
- OBJC_WARN_DEPRECATED;
- return _objc_getOrigClass(name);
-}
-
-/***********************************************************************
-* _objc_addOrigClass. This function is only used from class_poseAs.
-* Registers the original class names, before they get obscured by
-* posing, so that [super ..] will work correctly from categories
-* in posing classes and in categories in classes being posed for.
-**********************************************************************/
-static void _objc_addOrigClass (struct old_class *origClass)
-{
- mutex_lock(&classLock);
-
- // Create the poser's hash table on first use
- if (!posed_class_hash)
- {
- posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
- 8,
- _objc_internal_zone ());
- }
-
- // Add the named class iff it is not already there (or collides?)
- if (NXMapGet (posed_class_hash, origClass->name) == 0)
- NXMapInsert (posed_class_hash, origClass->name, origClass);
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* change_class_references
-* Change classrefs and superclass pointers from original to imposter
-* But if copy!=nil, don't change copy->super_class.
-* If changeSuperRefs==YES, also change [super message] classrefs.
-* Used by class_poseAs and objc_setFutureClass
-* classLock must be locked.
-**********************************************************************/
-void change_class_references(struct old_class *imposter,
- struct old_class *original,
- struct old_class *copy,
- BOOL changeSuperRefs)
-{
- header_info *hInfo;
- struct old_class *clsObject;
- NXHashState state;
-
- // Change all subclasses of the original to point to the imposter.
- state = NXInitHashState (class_hash);
- while (NXNextHashState (class_hash, &state, (void **) &clsObject))
- {
- while ((clsObject) && (clsObject != imposter) &&
- (clsObject != copy))
- {
- if (clsObject->super_class == original)
- {
- clsObject->super_class = imposter;
- clsObject->isa->super_class = imposter->isa;
- // We must flush caches here!
- break;
- }
-
- clsObject = clsObject->super_class;
- }
- }
-
- // Replace the original with the imposter in all class refs
- // Major loop - process all headers
- for (hInfo = FirstHeader; hInfo != NULL; hInfo = hInfo->next)
- {
- struct old_class **cls_refs;
- size_t refCount;
- unsigned int index;
-
- // Fix class refs associated with this header
- cls_refs = _getObjcClassRefs(hInfo, &refCount);
- if (cls_refs) {
- for (index = 0; index < refCount; index += 1) {
- if (cls_refs[index] == original) {
- cls_refs[index] = imposter;
- }
- }
- }
- }
-}
-
-
-/***********************************************************************
-* class_poseAs.
-*
-* !!! class_poseAs () does not currently flush any caches.
-**********************************************************************/
-Class class_poseAs(Class imposter_gen, Class original_gen)
-{
- struct old_class *imposter = oldcls(imposter_gen);
- struct old_class *original = oldcls(original_gen);
- char * imposterNamePtr;
- struct old_class * copy;
-
- OBJC_WARN_DEPRECATED;
-
- // Trivial case is easy
- if (imposter_gen == original_gen)
- return imposter_gen;
-
- // Imposter must be an immediate subclass of the original
- if (imposter->super_class != original) {
- __objc_error((id)imposter_gen,
- "[%s poseAs:%s]: target not immediate superclass",
- imposter->name, original->name);
- }
-
- // Can't pose when you have instance variables (how could it work?)
- if (imposter->ivars) {
- __objc_error((id)imposter_gen,
- "[%s poseAs:%s]: %s defines new instance variables",
- imposter->name, original->name, imposter->name);
- }
-
- // Build a string to use to replace the name of the original class.
-#if TARGET_OS_WIN32
-# define imposterNamePrefix "_%"
- imposterNamePtr = _malloc_internal(strlen(original->name) + strlen(imposterNamePrefix) + 1);
- strcpy(imposterNamePtr, imposterNamePrefix);
- strcat(imposterNamePtr, original->name);
-# undef imposterNamePrefix
-#else
- asprintf(&imposterNamePtr, "_%%%s", original->name);
-#endif
-
- // We lock the class hashtable, so we are thread safe with respect to
- // calls to objc_getClass (). However, the class names are not
- // changed atomically, nor are all of the subclasses updated
- // atomically. I have ordered the operations so that you will
- // never crash, but you may get inconsistent results....
-
- // Register the original class so that [super ..] knows
- // exactly which classes are the "original" classes.
- _objc_addOrigClass (original);
- _objc_addOrigClass (imposter);
-
- // Copy the imposter, so that the imposter can continue
- // its normal life in addition to changing the behavior of
- // the original. As a hack we don't bother to copy the metaclass.
- // For some reason we modify the original rather than the copy.
- copy = (struct old_class *)_malloc_internal(sizeof(struct old_class));
- memmove(copy, imposter, sizeof(struct old_class));
-
- mutex_lock(&classLock);
-
- // Remove both the imposter and the original class.
- NXHashRemove (class_hash, imposter);
- NXHashRemove (class_hash, original);
-
- NXHashInsert (class_hash, copy);
- objc_addRegisteredClass((Class)copy); // imposter & original will rejoin later, just track the new guy
-
- // Mark the imposter as such
- _class_setInfo((Class)imposter, CLS_POSING);
- _class_setInfo((Class)imposter->isa, CLS_POSING);
-
- // Change the name of the imposter to that of the original class.
- imposter->name = original->name;
- imposter->isa->name = original->isa->name;
-
- // Also copy the version field to avoid archiving problems.
- imposter->version = original->version;
-
- // Change classrefs and superclass pointers
- // Don't change copy->super_class
- // Don't change [super ...] messages
- change_class_references(imposter, original, copy, NO);
-
- // Change the name of the original class.
- original->name = imposterNamePtr + 1;
- original->isa->name = imposterNamePtr;
-
- // Restore the imposter and the original class with their new names.
- NXHashInsert (class_hash, imposter);
- NXHashInsert (class_hash, original);
-
- mutex_unlock(&classLock);
-
- return imposter_gen;
-}
-
-
-/***********************************************************************
-* flush_caches. Flush the instance and optionally class method caches
-* of cls and all its subclasses.
-*
-* Specifying Nil for the class "all classes."
-**********************************************************************/
-void flush_caches(Class target_gen, BOOL flush_meta)
-{
- NXHashState state;
- struct old_class *target = oldcls(target_gen);
- struct old_class *clsObject;
-#ifdef OBJC_INSTRUMENTED
- unsigned int classesVisited;
- unsigned int subclassCount;
-#endif
-
- mutex_lock(&classLock);
- mutex_lock(&cacheUpdateLock);
-
- // Leaf classes are fastest because there are no subclass caches to flush.
- // fixme instrument
- if (target && (target->info & CLS_LEAF)) {
- _cache_flush ((Class)target);
-
- if (!flush_meta) {
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
- return; // done
- } else if (target->isa && (target->isa->info & CLS_LEAF)) {
- _cache_flush ((Class)target->isa);
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
- return; // done
- } else {
- // Reset target and handle it by one of the methods below.
- target = target->isa;
- flush_meta = NO;
- // NOT done
- }
- }
-
- state = NXInitHashState(class_hash);
-
- // Handle nil and root instance class specially: flush all
- // instance and class method caches. Nice that this
- // loop is linear vs the N-squared loop just below.
- if (!target || !target->super_class)
- {
-#ifdef OBJC_INSTRUMENTED
- LinearFlushCachesCount += 1;
- classesVisited = 0;
- subclassCount = 0;
-#endif
- // Traverse all classes in the hash table
- while (NXNextHashState(class_hash, &state, (void**)&clsObject))
- {
- struct old_class *metaClsObject;
-#ifdef OBJC_INSTRUMENTED
- classesVisited += 1;
-#endif
-
- // Skip class that is known not to be a subclass of this root
- // (the isa pointer of any meta class points to the meta class
- // of the root).
- // NOTE: When is an isa pointer of a hash tabled class ever nil?
- metaClsObject = clsObject->isa;
- if (target && metaClsObject && target->isa != metaClsObject->isa) {
- continue;
- }
-
-#ifdef OBJC_INSTRUMENTED
- subclassCount += 1;
-#endif
-
- _cache_flush ((Class)clsObject);
- if (flush_meta && metaClsObject != NULL) {
- _cache_flush ((Class)metaClsObject);
- }
- }
-#ifdef OBJC_INSTRUMENTED
- LinearFlushCachesVisitedCount += classesVisited;
- if (classesVisited > MaxLinearFlushCachesVisitedCount)
- MaxLinearFlushCachesVisitedCount = classesVisited;
- IdealFlushCachesCount += subclassCount;
- if (subclassCount > MaxIdealFlushCachesCount)
- MaxIdealFlushCachesCount = subclassCount;
-#endif
-
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
- return;
- }
-
- // Outer loop - flush any cache that could now get a method from
- // cls (i.e. the cache associated with cls and any of its subclasses).
-#ifdef OBJC_INSTRUMENTED
- NonlinearFlushCachesCount += 1;
- classesVisited = 0;
- subclassCount = 0;
-#endif
- while (NXNextHashState(class_hash, &state, (void**)&clsObject))
- {
- struct old_class *clsIter;
-
-#ifdef OBJC_INSTRUMENTED
- NonlinearFlushCachesClassCount += 1;
-#endif
-
- // Inner loop - Process a given class
- clsIter = clsObject;
- while (clsIter)
- {
-
-#ifdef OBJC_INSTRUMENTED
- classesVisited += 1;
-#endif
- // Flush clsObject instance method cache if
- // clsObject is a subclass of cls, or is cls itself
- // Flush the class method cache if that was asked for
- if (clsIter == target)
- {
-#ifdef OBJC_INSTRUMENTED
- subclassCount += 1;
-#endif
- _cache_flush ((Class)clsObject);
- if (flush_meta)
- _cache_flush ((Class)clsObject->isa);
-
- break;
-
- }
-
- // Flush clsObject class method cache if cls is
- // the meta class of clsObject or of one
- // of clsObject's superclasses
- else if (clsIter->isa == target)
- {
-#ifdef OBJC_INSTRUMENTED
- subclassCount += 1;
-#endif
- _cache_flush ((Class)clsObject->isa);
- break;
- }
-
- // Move up superclass chain
- // else if (_class_isInitialized(clsIter))
- clsIter = clsIter->super_class;
-
- // clsIter is not initialized, so its cache
- // must be empty. This happens only when
- // clsIter == clsObject, because
- // superclasses are initialized before
- // subclasses, and this loop traverses
- // from sub- to super- classes.
- // else
- // break;
- }
- }
-#ifdef OBJC_INSTRUMENTED
- NonlinearFlushCachesVisitedCount += classesVisited;
- if (classesVisited > MaxNonlinearFlushCachesVisitedCount)
- MaxNonlinearFlushCachesVisitedCount = classesVisited;
- IdealFlushCachesCount += subclassCount;
- if (subclassCount > MaxIdealFlushCachesCount)
- MaxIdealFlushCachesCount = subclassCount;
-#endif
-
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* flush_marked_caches. Flush the method cache of any class marked
-* CLS_FLUSH_CACHE (and all subclasses thereof)
-* fixme instrument
-**********************************************************************/
-void flush_marked_caches(void)
-{
- struct old_class *cls;
- struct old_class *supercls;
- NXHashState state;
-
- mutex_lock(&classLock);
- mutex_lock(&cacheUpdateLock);
-
- state = NXInitHashState(class_hash);
- while (NXNextHashState(class_hash, &state, (void**)&cls)) {
- for (supercls = cls; supercls; supercls = supercls->super_class) {
- if (supercls->info & CLS_FLUSH_CACHE) {
- _cache_flush((Class)cls);
- break;
- }
- }
-
- for (supercls = cls->isa; supercls; supercls = supercls->super_class) {
- if (supercls->info & CLS_FLUSH_CACHE) {
- _cache_flush((Class)cls->isa);
- break;
- }
- }
- }
-
- state = NXInitHashState(class_hash);
- while (NXNextHashState(class_hash, &state, (void**)&cls)) {
- if (cls->info & CLS_FLUSH_CACHE) {
- _class_clearInfo((Class)cls, CLS_FLUSH_CACHE);
- }
- if (cls->isa->info & CLS_FLUSH_CACHE) {
- _class_clearInfo((Class)cls->isa, CLS_FLUSH_CACHE);
- }
- }
-
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* get_base_method_list
-* Returns the method list containing the class's own methods,
-* ignoring any method lists added by categories or class_addMethods.
-* Called only by add_class_to_loadable_list.
-* Does not hold methodListLock because add_class_to_loadable_list
-* does not manipulate in-use classes.
-**********************************************************************/
-static struct old_method_list *get_base_method_list(struct old_class *cls)
-{
- struct old_method_list **ptr;
-
- if (!cls->methodLists) return NULL;
- if (cls->info & CLS_NO_METHOD_ARRAY) return (struct old_method_list *)cls->methodLists;
- ptr = cls->methodLists;
- if (!*ptr || *ptr == END_OF_METHODS_LIST) return NULL;
- while ( *ptr != 0 && *ptr != END_OF_METHODS_LIST ) { ptr++; }
- --ptr;
- return *ptr;
-}
-
-
-static IMP _class_getLoadMethod_nocheck(struct old_class *cls)
-{
- struct old_method_list *mlist;
- mlist = get_base_method_list(cls->isa);
- if (mlist) {
- return lookupNamedMethodInMethodList (mlist, "load");
- }
- return NULL;
-}
-
-
-BOOL _class_hasLoadMethod(Class cls)
-{
- if (oldcls(cls)->isa->info & CLS_HAS_LOAD_METHOD) return YES;
- return (_class_getLoadMethod_nocheck(oldcls(cls)) ? YES : NO);
-}
-
-
-/***********************************************************************
-* _class_getLoadMethod
-* Returns cls's +load implementation, or NULL if it doesn't have one.
-**********************************************************************/
-IMP _class_getLoadMethod(Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
- if (cls->isa->info & CLS_HAS_LOAD_METHOD) {
- return _class_getLoadMethod_nocheck(cls);
- }
- return NULL;
-}
-
-
-BOOL _class_shouldGrowCache(Class cls)
-{
- return _class_getInfo(cls, CLS_GROW_CACHE);
-}
-
-void _class_setGrowCache(Class cls, BOOL grow)
-{
- if (grow) _class_setInfo(cls, CLS_GROW_CACHE);
- else _class_clearInfo(cls, CLS_GROW_CACHE);
-}
-
-BOOL _class_hasCxxStructors(Class cls)
-{
- // this DOES check superclasses too, because set_superclass
- // propagates the flag from the superclass.
- return _class_getInfo(cls, CLS_HAS_CXX_STRUCTORS);
-}
-
-BOOL _class_shouldFinalizeOnMainThread(Class cls) {
- return _class_getInfo(cls, CLS_FINALIZE_ON_MAIN_THREAD);
-}
-
-void _class_setFinalizeOnMainThread(Class cls) {
- _class_setInfo(cls, CLS_FINALIZE_ON_MAIN_THREAD);
-}
-
-BOOL _class_instancesHaveAssociatedObjects(Class cls) {
- return _class_getInfo(cls, CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
-}
-
-void _class_setInstancesHaveAssociatedObjects(Class cls) {
- _class_setInfo(cls, CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
-}
-
-BOOL _class_usesAutomaticRetainRelease(Class cls)
-{
- return NO;
-}
-
-uint32_t _class_getInstanceStart(Class cls)
-{
- _objc_fatal("_class_getInstanceStart() unimplemented for fragile instance variables");
- return 0; // PCB: never used just provided for ARR consistency.
-}
-
-ptrdiff_t ivar_getOffset(Ivar ivar)
-{
- return oldivar(ivar)->ivar_offset;
-}
-
-const char *ivar_getName(Ivar ivar)
-{
- return oldivar(ivar)->ivar_name;
-}
-
-const char *ivar_getTypeEncoding(Ivar ivar)
-{
- return oldivar(ivar)->ivar_type;
-}
-
-
-IMP method_getImplementation(Method m)
-{
- if (!m) return NULL;
- return oldmethod(m)->method_imp;
-}
-
-SEL method_getName(Method m)
-{
- if (!m) return NULL;
- return oldmethod(m)->method_name;
-}
-
-const char *method_getTypeEncoding(Method m)
-{
- if (!m) return NULL;
- return oldmethod(m)->method_types;
-}
-
-unsigned int method_getSizeOfArguments(Method m)
-{
- OBJC_WARN_DEPRECATED;
- if (!m) return 0;
- return encoding_getSizeOfArguments(method_getTypeEncoding(m));
-}
-
-unsigned int method_getArgumentInfo(Method m, int arg,
- const char **type, int *offset)
-{
- OBJC_WARN_DEPRECATED;
- if (!m) return 0;
- return encoding_getArgumentInfo(method_getTypeEncoding(m),
- arg, type, offset);
-}
-
-
-static OSSpinLock impLock = OS_SPINLOCK_INIT;
-
-IMP method_setImplementation(Method m_gen, IMP imp)
-{
- IMP old;
- struct old_method *m = oldmethod(m_gen);
- if (!m) return NULL;
- if (!imp) return NULL;
-
- if (ignoreSelector(m->method_name)) {
- // Ignored methods stay ignored
- return m->method_imp;
- }
-
- OSSpinLockLock(&impLock);
- old = m->method_imp;
- m->method_imp = imp;
- OSSpinLockUnlock(&impLock);
- return old;
-}
-
-
-void method_exchangeImplementations(Method m1_gen, Method m2_gen)
-{
- IMP m1_imp;
- struct old_method *m1 = oldmethod(m1_gen);
- struct old_method *m2 = oldmethod(m2_gen);
- if (!m1 || !m2) return;
-
- if (ignoreSelector(m1->method_name) || ignoreSelector(m2->method_name)) {
- // Ignored methods stay ignored. Now they're both ignored.
- m1->method_imp = (IMP)&_objc_ignored_method;
- m2->method_imp = (IMP)&_objc_ignored_method;
- return;
- }
-
- OSSpinLockLock(&impLock);
- m1_imp = m1->method_imp;
- m1->method_imp = m2->method_imp;
- m2->method_imp = m1_imp;
- OSSpinLockUnlock(&impLock);
-}
-
-
-struct objc_method_description * method_getDescription(Method m)
-{
- if (!m) return NULL;
- return (struct objc_method_description *)oldmethod(m);
-}
-
-
-const char *property_getName(objc_property_t prop)
-{
- return oldproperty(prop)->name;
-}
-
-const char *property_getAttributes(objc_property_t prop)
-{
- return oldproperty(prop)->attributes;
-}
-
-objc_property_attribute_t *property_copyAttributeList(objc_property_t prop,
- unsigned int *outCount)
-{
- if (!prop) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- objc_property_attribute_t *result;
- mutex_lock(&classLock);
- result = copyPropertyAttributeList(oldproperty(prop)->attributes,outCount);
- mutex_unlock(&classLock);
- return result;
-}
-
-char * property_copyAttributeValue(objc_property_t prop, const char *name)
-{
- if (!prop || !name || *name == '\0') return NULL;
-
- char *result;
- mutex_lock(&classLock);
- result = copyPropertyAttributeValue(oldproperty(prop)->attributes, name);
- mutex_unlock(&classLock);
- return result;
-}
-
-
-/***********************************************************************
-* class_addMethod
-**********************************************************************/
-static IMP _class_addMethod(Class cls_gen, SEL name, IMP imp,
- const char *types, BOOL replace)
-{
- struct old_class *cls = oldcls(cls_gen);
- struct old_method *m;
- IMP result = NULL;
-
- if (!types) types = "";
-
- mutex_lock(&methodListLock);
-
- if ((m = _findMethodInClass(cls, name))) {
- // already exists
- // fixme atomic
- result = method_getImplementation((Method)m);
- if (replace) {
- method_setImplementation((Method)m, imp);
- }
- } else {
- // fixme could be faster
- struct old_method_list *mlist =
- _calloc_internal(sizeof(struct old_method_list), 1);
- mlist->obsolete = fixed_up_method_list;
- mlist->method_count = 1;
- mlist->method_list[0].method_name = name;
- mlist->method_list[0].method_types = _strdup_internal(types);
- if (!ignoreSelector(name)) {
- mlist->method_list[0].method_imp = imp;
- } else {
- mlist->method_list[0].method_imp = (IMP)&_objc_ignored_method;
- }
-
- _objc_insertMethods(cls, mlist, NULL);
- if (!(cls->info & CLS_CONSTRUCTING)) {
- flush_caches((Class)cls, NO);
- } else {
- // in-construction class has no subclasses
- flush_cache((Class)cls);
- }
- result = NULL;
- }
-
- mutex_unlock(&methodListLock);
-
- return result;
-}
-
-
-/***********************************************************************
-* class_addMethod
-**********************************************************************/
-BOOL class_addMethod(Class cls, SEL name, IMP imp, const char *types)
-{
- IMP old;
- if (!cls) return NO;
-
- old = _class_addMethod(cls, name, imp, types, NO);
- return old ? NO : YES;
-}
-
-
-/***********************************************************************
-* class_replaceMethod
-**********************************************************************/
-IMP class_replaceMethod(Class cls, SEL name, IMP imp, const char *types)
-{
- if (!cls) return NULL;
-
- return _class_addMethod(cls, name, imp, types, YES);
-}
-
-
-/***********************************************************************
-* class_addIvar
-**********************************************************************/
-BOOL class_addIvar(Class cls_gen, const char *name, size_t size,
- uint8_t alignment, const char *type)
-{
- struct old_class *cls = oldcls(cls_gen);
- BOOL result = YES;
-
- if (!cls) return NO;
- if (ISMETA(cls)) return NO;
- if (!(cls->info & CLS_CONSTRUCTING)) return NO;
-
- if (!type) type = "";
- if (name && 0 == strcmp(name, "")) name = NULL;
-
- mutex_lock(&classLock);
-
- // Check for existing ivar with this name
- // fixme check superclasses?
- if (cls->ivars) {
- int i;
- for (i = 0; i < cls->ivars->ivar_count; i++) {
- if (0 == strcmp(cls->ivars->ivar_list[i].ivar_name, name)) {
- result = NO;
- break;
- }
- }
- }
-
- if (result) {
- struct old_ivar_list *old = cls->ivars;
- size_t oldSize;
- int newCount;
- struct old_ivar *ivar;
- size_t alignBytes;
- size_t misalign;
-
- if (old) {
- oldSize = sizeof(struct old_ivar_list) +
- (old->ivar_count - 1) * sizeof(struct old_ivar);
- newCount = 1 + old->ivar_count;
- } else {
- oldSize = sizeof(struct old_ivar_list) - sizeof(struct old_ivar);
- newCount = 1;
- }
-
- // allocate new ivar list
- cls->ivars = _calloc_internal(oldSize + sizeof(struct old_ivar), 1);
- if (old) memcpy(cls->ivars, old, oldSize);
- if (old && malloc_size(old)) free(old);
- cls->ivars->ivar_count = newCount;
- ivar = &cls->ivars->ivar_list[newCount-1];
-
- // set ivar name and type
- ivar->ivar_name = _strdup_internal(name);
- ivar->ivar_type = _strdup_internal(type);
-
- // align if necessary
- alignBytes = 1 << alignment;
- misalign = cls->instance_size % alignBytes;
- if (misalign) cls->instance_size += (long)(alignBytes - misalign);
-
- // set ivar offset and increase instance size
- ivar->ivar_offset = (int)cls->instance_size;
- cls->instance_size += (long)size;
- }
-
- mutex_unlock(&classLock);
-
- return result;
-}
-
-
-/***********************************************************************
-* class_addProtocol
-**********************************************************************/
-BOOL class_addProtocol(Class cls_gen, Protocol *protocol_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
- struct old_protocol *protocol = oldprotocol(protocol_gen);
- struct old_protocol_list *plist;
-
- if (!cls) return NO;
- if (class_conformsToProtocol(cls_gen, protocol_gen)) return NO;
-
- mutex_lock(&classLock);
-
- // fixme optimize - protocol list doesn't escape?
- plist = _calloc_internal(sizeof(struct old_protocol_list), 1);
- plist->count = 1;
- plist->list[0] = protocol;
- plist->next = cls->protocols;
- cls->protocols = plist;
-
- // fixme metaclass?
-
- mutex_unlock(&classLock);
-
- return YES;
-}
-
-
-/***********************************************************************
-* _class_addProperties
-* Internal helper to add properties to a class.
-* Used by category attachment and class_addProperty()
-* Locking: acquires classLock
-**********************************************************************/
-BOOL
-_class_addProperties(struct old_class *cls,
- struct old_property_list *additions)
-{
- struct old_property_list *newlist;
-
- if (!(cls->info & CLS_EXT)) return NO;
-
- newlist =
- _memdup_internal(additions, sizeof(*newlist) - sizeof(newlist->first)
- + (additions->entsize * additions->count));
-
- mutex_lock(&classLock);
-
- allocateExt(cls);
- if (!cls->ext->propertyLists) {
- // cls has no properties - simply use this list
- cls->ext->propertyLists = (struct old_property_list **)newlist;
- _class_setInfo((Class)cls, CLS_NO_PROPERTY_ARRAY);
- }
- else if (cls->info & CLS_NO_PROPERTY_ARRAY) {
- // cls has one property list - make a new array
- struct old_property_list **newarray =
- _malloc_internal(3 * sizeof(*newarray));
- newarray[0] = newlist;
- newarray[1] = (struct old_property_list *)cls->ext->propertyLists;
- newarray[2] = NULL;
- cls->ext->propertyLists = newarray;
- _class_clearInfo((Class)cls, CLS_NO_PROPERTY_ARRAY);
- }
- else {
- // cls has a property array - make a bigger one
- struct old_property_list **newarray;
- int count = 0;
- while (cls->ext->propertyLists[count]) count++;
- newarray = _malloc_internal((count+2) * sizeof(*newarray));
- newarray[0] = newlist;
- memcpy(&newarray[1], &cls->ext->propertyLists[0],
- count * sizeof(*newarray));
- newarray[count+1] = NULL;
- free(cls->ext->propertyLists);
- cls->ext->propertyLists = newarray;
- }
-
- mutex_unlock(&classLock);
-
- return YES;
-}
-
-
-/***********************************************************************
-* class_addProperty
-* Adds a property to a class. Returns NO if the proeprty already exists.
-* Locking: acquires classLock
-**********************************************************************/
-static BOOL
-_class_addProperty(Class cls_gen, const char *name,
- const objc_property_attribute_t *attrs, unsigned int count,
- BOOL replace)
-{
- struct old_class *cls = oldcls(cls_gen);
-
- if (!cls) return NO;
- if (!name) return NO;
-
- struct old_property *prop = oldproperty(class_getProperty(cls_gen, name));
- if (prop && !replace) {
- // already exists, refuse to replace
- return NO;
- }
- else if (prop) {
- // replace existing
- mutex_lock(&classLock);
- try_free(prop->attributes);
- prop->attributes = copyPropertyAttributeString(attrs, count);
- mutex_unlock(&classLock);
- return YES;
- }
- else {
- // add new
- struct old_property_list proplist;
- proplist.entsize = sizeof(struct old_property);
- proplist.count = 1;
- proplist.first.name = _strdup_internal(name);
- proplist.first.attributes = copyPropertyAttributeString(attrs, count);
-
- return _class_addProperties(cls, &proplist);
- }
-}
-
-BOOL
-class_addProperty(Class cls_gen, const char *name,
- const objc_property_attribute_t *attrs, unsigned int n)
-{
- return _class_addProperty(cls_gen, name, attrs, n, NO);
-}
-
-void
-class_replaceProperty(Class cls_gen, const char *name,
- const objc_property_attribute_t *attrs, unsigned int n)
-{
- _class_addProperty(cls_gen, name, attrs, n, YES);
-}
-
-
-/***********************************************************************
-* class_copyProtocolList. Returns a heap block containing the
-* protocols implemented by the class, or NULL if the class
-* implements no protocols. Caller must free the block.
-* Does not copy any superclass's protocols.
-**********************************************************************/
-Protocol * __unsafe_unretained *
-class_copyProtocolList(Class cls_gen, unsigned int *outCount)
-{
- struct old_class *cls = oldcls(cls_gen);
- struct old_protocol_list *plist;
- Protocol **result = NULL;
- unsigned int count = 0;
- unsigned int p;
-
- if (!cls) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- mutex_lock(&classLock);
-
- for (plist = cls->protocols; plist != NULL; plist = plist->next) {
- count += (int)plist->count;
- }
-
- if (count > 0) {
- result = malloc((count+1) * sizeof(Protocol *));
-
- for (p = 0, plist = cls->protocols;
- plist != NULL;
- plist = plist->next)
- {
- int i;
- for (i = 0; i < plist->count; i++) {
- result[p++] = (Protocol *)plist->list[i];
- }
- }
- result[p] = NULL;
- }
-
- mutex_unlock(&classLock);
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-/***********************************************************************
-* class_getProperty. Return the named property.
-**********************************************************************/
-objc_property_t class_getProperty(Class cls_gen, const char *name)
-{
- struct old_property *result;
- struct old_class *cls = oldcls(cls_gen);
- if (!cls || !name) return NULL;
-
- mutex_lock(&classLock);
-
- for (result = NULL; cls && !result; cls = cls->super_class) {
- uintptr_t iterator = 0;
- struct old_property_list *plist;
- while ((plist = nextPropertyList(cls, &iterator))) {
- uint32_t i;
- for (i = 0; i < plist->count; i++) {
- struct old_property *p = property_list_nth(plist, i);
- if (0 == strcmp(name, p->name)) {
- result = p;
- goto done;
- }
- }
- }
- }
-
- done:
- mutex_unlock(&classLock);
-
- return (objc_property_t)result;
-}
-
-
-/***********************************************************************
-* class_copyPropertyList. Returns a heap block containing the
-* properties declared in the class, or NULL if the class
-* declares no properties. Caller must free the block.
-* Does not copy any superclass's properties.
-**********************************************************************/
-objc_property_t *class_copyPropertyList(Class cls_gen, unsigned int *outCount)
-{
- struct old_class *cls = oldcls(cls_gen);
- struct old_property_list *plist;
- uintptr_t iterator = 0;
- struct old_property **result = NULL;
- unsigned int count = 0;
- unsigned int p, i;
-
- if (!cls) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- mutex_lock(&classLock);
-
- iterator = 0;
- while ((plist = nextPropertyList(cls, &iterator))) {
- count += plist->count;
- }
-
- if (count > 0) {
- result = malloc((count+1) * sizeof(struct old_property *));
-
- p = 0;
- iterator = 0;
- while ((plist = nextPropertyList(cls, &iterator))) {
- for (i = 0; i < plist->count; i++) {
- result[p++] = property_list_nth(plist, i);
- }
- }
- result[p] = NULL;
- }
-
- mutex_unlock(&classLock);
-
- if (outCount) *outCount = count;
- return (objc_property_t *)result;
-}
-
-
-/***********************************************************************
-* class_copyMethodList. Returns a heap block containing the
-* methods implemented by the class, or NULL if the class
-* implements no methods. Caller must free the block.
-* Does not copy any superclass's methods.
-**********************************************************************/
-Method *class_copyMethodList(Class cls_gen, unsigned int *outCount)
-{
- struct old_class *cls = oldcls(cls_gen);
- struct old_method_list *mlist;
- void *iterator = NULL;
- Method *result = NULL;
- unsigned int count = 0;
- unsigned int m;
-
- if (!cls) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- mutex_lock(&methodListLock);
-
- iterator = NULL;
- while ((mlist = nextMethodList(cls, &iterator))) {
- count += mlist->method_count;
- }
-
- if (count > 0) {
- result = malloc((count+1) * sizeof(Method));
-
- m = 0;
- iterator = NULL;
- while ((mlist = nextMethodList(cls, &iterator))) {
- int i;
- for (i = 0; i < mlist->method_count; i++) {
- Method aMethod = (Method)&mlist->method_list[i];
- if (ignoreSelector(method_getName(aMethod))) {
- count--;
- continue;
- }
- result[m++] = aMethod;
- }
- }
- result[m] = NULL;
- }
-
- mutex_unlock(&methodListLock);
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-/***********************************************************************
-* class_copyIvarList. Returns a heap block containing the
-* ivars declared in the class, or NULL if the class
-* declares no ivars. Caller must free the block.
-* Does not copy any superclass's ivars.
-**********************************************************************/
-Ivar *class_copyIvarList(Class cls_gen, unsigned int *outCount)
-{
- struct old_class *cls = oldcls(cls_gen);
- Ivar *result = NULL;
- unsigned int count = 0;
- int i;
-
- if (!cls) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- if (cls->ivars) {
- count = cls->ivars->ivar_count;
- }
-
- if (count > 0) {
- result = malloc((count+1) * sizeof(Ivar));
-
- for (i = 0; i < cls->ivars->ivar_count; i++) {
- result[i] = (Ivar)&cls->ivars->ivar_list[i];
- }
- result[i] = NULL;
- }
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-/***********************************************************************
-* objc_allocateClass.
-**********************************************************************/
-
-void set_superclass(struct old_class *cls, struct old_class *supercls,
- BOOL cls_is_new)
-{
- struct old_class *meta = cls->isa;
-
- if (supercls) {
- cls->super_class = supercls;
- meta->super_class = supercls->isa;
- meta->isa = supercls->isa->isa;
-
- // Propagate C++ cdtors from superclass.
- if (supercls->info & CLS_HAS_CXX_STRUCTORS) {
- if (cls_is_new) cls->info |= CLS_HAS_CXX_STRUCTORS;
- else _class_setInfo((Class)cls, CLS_HAS_CXX_STRUCTORS);
- }
-
- // Superclass is no longer a leaf for cache flushing
- if (supercls->info & CLS_LEAF) {
- _class_clearInfo((Class)supercls, CLS_LEAF);
- _class_clearInfo((Class)supercls->isa, CLS_LEAF);
- }
- } else {
- cls->super_class = Nil; // superclass of root class is nil
- meta->super_class = cls; // superclass of root metaclass is root class
- meta->isa = meta; // metaclass of root metaclass is root metaclass
-
- // Root class is never a leaf for cache flushing, because the
- // root metaclass is a subclass. (This could be optimized, but
- // is too uncommon to bother.)
- _class_clearInfo((Class)cls, CLS_LEAF);
- _class_clearInfo((Class)meta, CLS_LEAF);
- }
-}
-
-// &UnsetLayout is the default ivar layout during class construction
-static const uint8_t UnsetLayout = 0;
-
-Class objc_initializeClassPair(Class superclass_gen, const char *name, Class cls_gen, Class meta_gen)
-{
- struct old_class *supercls = oldcls(superclass_gen);
- struct old_class *cls = oldcls(cls_gen);
- struct old_class *meta = oldcls(meta_gen);
-
- // Connect to superclasses and metaclasses
- cls->isa = meta;
- set_superclass(cls, supercls, YES);
-
- // Set basic info
- cls->name = _strdup_internal(name);
- meta->name = _strdup_internal(name);
- cls->version = 0;
- meta->version = 7;
- cls->info = CLS_CLASS | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF;
- meta->info = CLS_META | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF;
-
- // Set instance size based on superclass.
- if (supercls) {
- cls->instance_size = supercls->instance_size;
- meta->instance_size = supercls->isa->instance_size;
- } else {
- cls->instance_size = sizeof(struct old_class *); // just an isa
- meta->instance_size = sizeof(struct old_class);
- }
-
- // No ivars. No methods. Empty cache. No protocols. No layout. Empty ext.
- cls->ivars = NULL;
- cls->methodLists = NULL;
- cls->cache = (Cache)&_objc_empty_cache;
- cls->protocols = NULL;
- cls->ivar_layout = &UnsetLayout;
- cls->ext = NULL;
- allocateExt(cls);
- cls->ext->weak_ivar_layout = &UnsetLayout;
-
- meta->ivars = NULL;
- meta->methodLists = NULL;
- meta->cache = (Cache)&_objc_empty_cache;
- meta->protocols = NULL;
- meta->ext = NULL;
-
- return cls_gen;
-}
-
-Class objc_allocateClassPair(Class superclass_gen, const char *name,
- size_t extraBytes)
-{
- struct old_class *supercls = oldcls(superclass_gen);
- Class cls, meta;
-
- if (objc_getClass(name)) return NO;
- // fixme reserve class name against simultaneous allocation
-
- if (supercls && (supercls->info & CLS_CONSTRUCTING)) {
- // Can't make subclass of an in-construction class
- return NO;
- }
-
- // Allocate new classes.
- if (supercls) {
- cls = _calloc_class(_class_getInstanceSize((Class)supercls->isa) + extraBytes);
- meta = _calloc_class(_class_getInstanceSize((Class)supercls->isa->isa) + extraBytes);
- } else {
- cls = _calloc_class(sizeof(struct old_class) + extraBytes);
- meta = _calloc_class(sizeof(struct old_class) + extraBytes);
- }
-
-
- objc_initializeClassPair(superclass_gen, name, cls, meta);
-
- return (Class)cls;
-}
-
-
-void objc_registerClassPair(Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
-
- if ((cls->info & CLS_CONSTRUCTED) ||
- (cls->isa->info & CLS_CONSTRUCTED))
- {
- _objc_inform("objc_registerClassPair: class '%s' was already "
- "registered!", cls->name);
- return;
- }
-
- if (!(cls->info & CLS_CONSTRUCTING) ||
- !(cls->isa->info & CLS_CONSTRUCTING))
- {
- _objc_inform("objc_registerClassPair: class '%s' was not "
- "allocated with objc_allocateClassPair!", cls->name);
- return;
- }
-
- if (ISMETA(cls)) {
- _objc_inform("objc_registerClassPair: class '%s' is a metaclass, "
- "not a class!", cls->name);
- return;
- }
-
- mutex_lock(&classLock);
-
- // Build ivar layouts
- if (UseGC) {
- if (cls->ivar_layout != &UnsetLayout) {
- // Class builder already called class_setIvarLayout.
- }
- else if (!cls->super_class) {
- // Root class. Scan conservatively (should be isa ivar only).
- cls->ivar_layout = NULL;
- }
- else if (cls->ivars == NULL) {
- // No local ivars. Use superclass's layout.
- cls->ivar_layout =
- _ustrdup_internal(cls->super_class->ivar_layout);
- }
- else {
- // Has local ivars. Build layout based on superclass.
- struct old_class *supercls = cls->super_class;
- const uint8_t *superlayout =
- class_getIvarLayout((Class)supercls);
- layout_bitmap bitmap =
- layout_bitmap_create(superlayout, supercls->instance_size,
- cls->instance_size, NO);
- int i;
- for (i = 0; i < cls->ivars->ivar_count; i++) {
- struct old_ivar *iv = &cls->ivars->ivar_list[i];
- layout_bitmap_set_ivar(bitmap, iv->ivar_type, iv->ivar_offset);
- }
- cls->ivar_layout = layout_string_create(bitmap);
- layout_bitmap_free(bitmap);
- }
-
- if (cls->ext->weak_ivar_layout != &UnsetLayout) {
- // Class builder already called class_setWeakIvarLayout.
- }
- else if (!cls->super_class) {
- // Root class. No weak ivars (should be isa ivar only)
- cls->ext->weak_ivar_layout = NULL;
- }
- else if (cls->ivars == NULL) {
- // No local ivars. Use superclass's layout.
- const uint8_t *weak =
- class_getWeakIvarLayout((Class)cls->super_class);
- if (weak) {
- cls->ext->weak_ivar_layout = _ustrdup_internal(weak);
- } else {
- cls->ext->weak_ivar_layout = NULL;
- }
- }
- else {
- // Has local ivars. Build layout based on superclass.
- // No way to add weak ivars yet.
- const uint8_t *weak =
- class_getWeakIvarLayout((Class)cls->super_class);
- if (weak) {
- cls->ext->weak_ivar_layout = _ustrdup_internal(weak);
- } else {
- cls->ext->weak_ivar_layout = NULL;
- }
- }
- }
-
- // Clear "under construction" bit, set "done constructing" bit
- cls->info &= ~CLS_CONSTRUCTING;
- cls->isa->info &= ~CLS_CONSTRUCTING;
- cls->info |= CLS_CONSTRUCTED;
- cls->isa->info |= CLS_CONSTRUCTED;
-
- NXHashInsertIfAbsent(class_hash, cls);
- objc_addRegisteredClass((Class)cls);
- //objc_addRegisteredClass(cls->isa); if we ever allocate classes from GC
-
- mutex_unlock(&classLock);
-}
-
-
-Class objc_duplicateClass(Class orig_gen, const char *name, size_t extraBytes)
-{
- unsigned int count, i;
- struct old_method **originalMethods;
- struct old_method_list *duplicateMethods;
- struct old_class *original = oldcls(orig_gen);
- // Don't use sizeof(struct objc_class) here because
- // instance_size has historically contained two extra words,
- // and instance_size is what objc_getIndexedIvars() actually uses.
- struct old_class *duplicate = (struct old_class *)
- _calloc_class(_class_getInstanceSize((Class)original->isa) + extraBytes);
-
- duplicate->isa = original->isa;
- duplicate->super_class = original->super_class;
- duplicate->name = strdup(name);
- duplicate->version = original->version;
- duplicate->info = original->info & (CLS_CLASS|CLS_META|CLS_INITIALIZED|CLS_JAVA_HYBRID|CLS_JAVA_CLASS|CLS_HAS_CXX_STRUCTORS|CLS_HAS_LOAD_METHOD);
- duplicate->instance_size = original->instance_size;
- duplicate->ivars = original->ivars;
- // methodLists handled below
- duplicate->cache = (Cache)&_objc_empty_cache;
- duplicate->protocols = original->protocols;
- if (original->info & CLS_EXT) {
- duplicate->info |= original->info & (CLS_EXT|CLS_NO_PROPERTY_ARRAY);
- duplicate->ivar_layout = original->ivar_layout;
- if (original->ext) {
- duplicate->ext = _malloc_internal(original->ext->size);
- memcpy(duplicate->ext, original->ext, original->ext->size);
- } else {
- duplicate->ext = NULL;
- }
- }
-
- // Method lists are deep-copied so they can be stomped.
- originalMethods = (struct old_method **)
- class_copyMethodList(orig_gen, &count);
- if (originalMethods) {
- duplicateMethods = (struct old_method_list *)
- calloc(sizeof(struct old_method_list) +
- (count-1)*sizeof(struct old_method), 1);
- duplicateMethods->obsolete = fixed_up_method_list;
- duplicateMethods->method_count = count;
- for (i = 0; i < count; i++) {
- duplicateMethods->method_list[i] = *(originalMethods[i]);
- }
- duplicate->methodLists = (struct old_method_list **)duplicateMethods;
- duplicate->info |= CLS_NO_METHOD_ARRAY;
- free(originalMethods);
- }
-
- mutex_lock(&classLock);
- NXHashInsert(class_hash, duplicate);
- objc_addRegisteredClass((Class)duplicate);
- mutex_unlock(&classLock);
-
- return (Class)duplicate;
-}
-
-
-void objc_disposeClassPair(Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
-
- if (!(cls->info & (CLS_CONSTRUCTED|CLS_CONSTRUCTING)) ||
- !(cls->isa->info & (CLS_CONSTRUCTED|CLS_CONSTRUCTING)))
- {
- // class not allocated with objc_allocateClassPair
- // disposing still-unregistered class is OK!
- _objc_inform("objc_disposeClassPair: class '%s' was not "
- "allocated with objc_allocateClassPair!", cls->name);
- return;
- }
-
- if (ISMETA(cls)) {
- _objc_inform("objc_disposeClassPair: class '%s' is a metaclass, "
- "not a class!", cls->name);
- return;
- }
-
- mutex_lock(&classLock);
- NXHashRemove(class_hash, cls);
- objc_removeRegisteredClass((Class)cls);
- unload_class(cls->isa);
- unload_class(cls);
- mutex_unlock(&classLock);
-}
-
-
-
-/***********************************************************************
-* _class_createInstanceFromZone. Allocate an instance of the
-* specified class with the specified number of bytes for indexed
-* variables, in the specified zone. The isa field is set to the
-* class, C++ default constructors are called, and all other fields are zeroed.
-**********************************************************************/
-id
-_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone)
-{
- id obj;
- size_t size;
-
- // Can't create something for nothing
- if (!cls) return nil;
-
- // Allocate and initialize
- size = _class_getInstanceSize(cls) + extraBytes;
-
- // CF requires all objects be at least 16 bytes.
- if (size < 16) size = 16;
-
-#if SUPPORT_GC
- if (UseGC) {
- obj = (id)auto_zone_allocate_object(gc_zone, size,
- AUTO_OBJECT_SCANNED, 0, 1);
- } else
-#endif
- if (zone) {
- obj = (id)malloc_zone_calloc (zone, 1, size);
- } else {
- obj = (id)calloc(1, size);
- }
- if (!obj) return nil;
-
- obj->isa = cls;
-
- if (_class_hasCxxStructors(cls)) {
- obj = _objc_constructOrFree(cls, obj);
- }
-
- return obj;
-}
-
-
-/***********************************************************************
-* _class_createInstance. Allocate an instance of the specified
-* class with the specified number of bytes for indexed variables, in
-* the default zone, using _class_createInstanceFromZone.
-**********************************************************************/
-static id _class_createInstance(Class cls, size_t extraBytes)
-{
- return _class_createInstanceFromZone (cls, extraBytes, NULL);
-}
-
-
-static id _object_copyFromZone(id oldObj, size_t extraBytes, void *zone)
-{
- id obj;
- size_t size;
-
- if (!oldObj) return nil;
-
- obj = (*_zoneAlloc)(oldObj->isa, extraBytes, zone);
- size = _class_getInstanceSize(oldObj->isa) + extraBytes;
-
- // fixme need C++ copy constructor
- objc_memmove_collectable(obj, oldObj, size);
-
-#if SUPPORT_GC
- if (UseGC) gc_fixup_weakreferences(obj, oldObj);
-#endif
-
- return obj;
-}
-
-
-/***********************************************************************
-* objc_destructInstance
-* Destroys an instance without freeing memory.
-* Calls C++ destructors.
-* Removes associative references.
-* Returns `obj`. Does nothing if `obj` is nil.
-* Be warned that GC DOES NOT CALL THIS. If you edit this, also edit finalize.
-* CoreFoundation and other clients do call this under GC.
-**********************************************************************/
-void *objc_destructInstance(id obj)
-{
- if (obj) {
- Class isa = _object_getClass(obj);
-
- if (_class_hasCxxStructors(isa)) {
- object_cxxDestruct(obj);
- }
-
- if (_class_instancesHaveAssociatedObjects(isa)) {
- _object_remove_assocations(obj);
- }
-
- if (!UseGC) objc_clear_deallocating(obj);
- }
-
- return obj;
-}
-
-static id
-_object_dispose(id anObject)
-{
- if (anObject==nil) return nil;
-
- objc_destructInstance(anObject);
-
-#if SUPPORT_GC
- if (UseGC) {
- auto_zone_retain(gc_zone, anObject); // gc free expects rc==1
- } else
-#endif
- {
- // only clobber isa for non-gc
- anObject->isa = _objc_getFreedObjectClass ();
- }
- free(anObject);
- return nil;
-}
-
-static id _object_copy(id oldObj, size_t extraBytes)
-{
- void *z = malloc_zone_from_ptr(oldObj);
- return _object_copyFromZone(oldObj, extraBytes,
- z ? z : malloc_default_zone());
-}
-
-static id _object_reallocFromZone(id anObject, size_t nBytes,
- void *zone)
-{
- id newObject;
- Class tmp;
-
- if (anObject == nil)
- __objc_error(nil, "reallocating nil object");
-
- if (anObject->isa == _objc_getFreedObjectClass ())
- __objc_error(anObject, "reallocating freed object");
-
- if (nBytes < _class_getInstanceSize(anObject->isa))
- __objc_error(anObject, "(%s, %zu) requested size too small",
- object_getClassName(anObject), nBytes);
-
- // fixme need C++ copy constructor
- // fixme GC copy
- // Make sure not to modify space that has been declared free
- tmp = anObject->isa;
- anObject->isa = _objc_getFreedObjectClass ();
- newObject = (id)malloc_zone_realloc(zone, anObject, nBytes);
- if (newObject) {
- newObject->isa = tmp;
- } else {
- // realloc failed, anObject is still alive
- anObject->isa = tmp;
- }
- return newObject;
-}
-
-
-static id _object_realloc(id anObject, size_t nBytes)
-{
- void *z = malloc_zone_from_ptr(anObject);
- return _object_reallocFromZone(anObject,
- nBytes,
- z ? z : malloc_default_zone());
-}
-
-id (*_alloc)(Class, size_t) = _class_createInstance;
-id (*_copy)(id, size_t) = _object_copy;
-id (*_realloc)(id, size_t) = _object_realloc;
-id (*_dealloc)(id) = _object_dispose;
-id (*_zoneAlloc)(Class, size_t, void *) = _class_createInstanceFromZone;
-id (*_zoneCopy)(id, size_t, void *) = _object_copyFromZone;
-id (*_zoneRealloc)(id, size_t, void *) = _object_reallocFromZone;
-void (*_error)(id, const char *, va_list) = _objc_error;
-
-
-id class_createInstance(Class cls, size_t extraBytes)
-{
- if (UseGC) {
- return _class_createInstance(cls, extraBytes);
- } else {
- return (*_alloc)(cls, extraBytes);
- }
-}
-
-id class_createInstanceFromZone(Class cls, size_t extraBytes, void *z)
-{
- OBJC_WARN_DEPRECATED;
- if (UseGC) {
- return _class_createInstanceFromZone(cls, extraBytes, z);
- } else {
- return (*_zoneAlloc)(cls, extraBytes, z);
- }
-}
-
-unsigned class_createInstances(Class cls, size_t extraBytes,
- id *results, unsigned num_requested)
-{
- if (UseGC || _alloc == &_class_createInstance) {
- return _class_createInstancesFromZone(cls, extraBytes, NULL,
- results, num_requested);
- } else {
- // _alloc in use, which isn't understood by the batch allocator
- return 0;
- }
-}
-
-id object_copy(id obj, size_t extraBytes)
-{
- if (UseGC) return _object_copy(obj, extraBytes);
- else return (*_copy)(obj, extraBytes);
-}
-
-id object_copyFromZone(id obj, size_t extraBytes, void *z)
-{
- OBJC_WARN_DEPRECATED;
- if (UseGC) return _object_copyFromZone(obj, extraBytes, z);
- else return (*_zoneCopy)(obj, extraBytes, z);
-}
-
-id object_dispose(id obj)
-{
- if (UseGC) return _object_dispose(obj);
- else return (*_dealloc)(obj);
-}
-
-id object_realloc(id obj, size_t nBytes)
-{
- OBJC_WARN_DEPRECATED;
- if (UseGC) return _object_realloc(obj, nBytes);
- else return (*_realloc)(obj, nBytes);
-}
-
-id object_reallocFromZone(id obj, size_t nBytes, void *z)
-{
- OBJC_WARN_DEPRECATED;
- if (UseGC) return _object_reallocFromZone(obj, nBytes, z);
- else return (*_zoneRealloc)(obj, nBytes, z);
-}
-
-
-// ProKit SPI
-Class class_setSuperclass(Class cls, Class newSuper)
-{
- Class oldSuper = cls->super_class;
- set_superclass(oldcls(cls), oldcls(newSuper), NO);
- flush_caches(cls, YES);
- return oldSuper;
-}
-#endif
--- /dev/null
+/*
+ * Copyright (c) 1999-2009 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/***********************************************************************
+* objc-class-old.m
+* Support for old-ABI classes, methods, and categories.
+**********************************************************************/
+
+#if !__OBJC2__
+
+#include "objc-private.h"
+#include "objc-runtime-old.h"
+#include "objc-file-old.h"
+#include "objc-cache-old.h"
+
+static Method _class_getMethod(Class cls, SEL sel);
+static Method _class_getMethodNoSuper(Class cls, SEL sel);
+static Method _class_getMethodNoSuper_nolock(Class cls, SEL sel);
+static void flush_caches(Class cls, bool flush_meta);
+
+
+// Freed objects have their isa set to point to this dummy class.
+// This avoids the need to check for Nil classes in the messenger.
+static const void* freedObjectClass[12] =
+{
+ Nil, // isa
+ Nil, // superclass
+ "FREED(id)", // name
+ 0, // version
+ 0, // info
+ 0, // instance_size
+ nil, // ivars
+ nil, // methodLists
+ (Cache) &_objc_empty_cache, // cache
+ nil, // protocols
+ nil, // ivar_layout;
+ nil // ext
+};
+
+
+/***********************************************************************
+* _class_getFreedObjectClass. Return a pointer to the dummy freed
+* object class. Freed objects get their isa pointers replaced with
+* a pointer to the freedObjectClass, so that we can catch usages of
+* the freed object.
+**********************************************************************/
+static Class _class_getFreedObjectClass(void)
+{
+ return (Class)freedObjectClass;
+}
+
+
+/***********************************************************************
+* _objc_getFreedObjectClass. Return a pointer to the dummy freed
+* object class. Freed objects get their isa pointers replaced with
+* a pointer to the freedObjectClass, so that we can catch usages of
+* the freed object.
+**********************************************************************/
+Class _objc_getFreedObjectClass(void)
+{
+ return _class_getFreedObjectClass();
+}
+
+
+static void allocateExt(Class cls)
+{
+ if (! (cls->info & CLS_EXT)) {
+ _objc_inform("class '%s' needs to be recompiled", cls->name);
+ return;
+ }
+ if (!cls->ext) {
+ uint32_t size = (uint32_t)sizeof(old_class_ext);
+ cls->ext = (old_class_ext *)_calloc_internal(size, 1);
+ cls->ext->size = size;
+ }
+}
+
+
+static inline old_method *_findNamedMethodInList(old_method_list * mlist, const char *meth_name) {
+ int i;
+ if (!mlist) return nil;
+ if (ignoreSelectorNamed(meth_name)) return nil;
+ for (i = 0; i < mlist->method_count; i++) {
+ old_method *m = &mlist->method_list[i];
+ if (0 == strcmp((const char *)(m->method_name), meth_name)) {
+ return m;
+ }
+ }
+ return nil;
+}
+
+
+/***********************************************************************
+* Method list fixup markers.
+* mlist->obsolete == fixed_up_method_list marks method lists with real SELs
+* versus method lists with un-uniqued char*.
+* PREOPTIMIZED VERSION:
+* Fixed-up method lists get mlist->obsolete == OBJC_FIXED_UP
+* dyld shared cache sets this for method lists it preoptimizes.
+* UN-PREOPTIMIZED VERSION
+* Fixed-up method lists get mlist->obsolete == OBJC_FIXED_UP_outside_dyld
+* dyld shared cache uses OBJC_FIXED_UP, but those aren't trusted.
+**********************************************************************/
+#define OBJC_FIXED_UP ((void *)1771)
+#define OBJC_FIXED_UP_outside_dyld ((void *)1773)
+static void *fixed_up_method_list = OBJC_FIXED_UP;
+
+// sel_init() decided that selectors in the dyld shared cache are untrustworthy
+void disableSharedCacheOptimizations(void)
+{
+ fixed_up_method_list = OBJC_FIXED_UP_outside_dyld;
+}
+
+/***********************************************************************
+* fixupSelectorsInMethodList
+* Uniques selectors in the given method list.
+* Also replaces imps for GC-ignored selectors
+* The given method list must be non-nil and not already fixed-up.
+* If the class was loaded from a bundle:
+* fixes up the given list in place with heap-allocated selector strings
+* If the class was not from a bundle:
+* allocates a copy of the method list, fixes up the copy, and returns
+* the copy. The given list is unmodified.
+*
+* If cls is already in use, methodListLock must be held by the caller.
+**********************************************************************/
+static old_method_list *fixupSelectorsInMethodList(Class cls, old_method_list *mlist)
+{
+ int i;
+ size_t size;
+ old_method *method;
+ old_method_list *old_mlist;
+
+ if ( ! mlist ) return nil;
+ if ( mlist->obsolete == fixed_up_method_list ) {
+ // method list OK
+ } else {
+ BOOL isBundle = (cls->info & CLS_FROM_BUNDLE) ? YES : NO;
+ if (!isBundle) {
+ old_mlist = mlist;
+ size = sizeof(old_method_list) - sizeof(old_method) + old_mlist->method_count * sizeof(old_method);
+ mlist = (old_method_list *)_malloc_internal(size);
+ memmove(mlist, old_mlist, size);
+ } else {
+ // Mach-O bundles are fixed up in place.
+ // This prevents leaks when a bundle is unloaded.
+ }
+ sel_lock();
+ for ( i = 0; i < mlist->method_count; i += 1 ) {
+ method = &mlist->method_list[i];
+ method->method_name =
+ sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles.
+
+ if (ignoreSelector(method->method_name)) {
+ method->method_imp = (IMP)&_objc_ignored_method;
+ }
+ }
+ sel_unlock();
+ mlist->obsolete = fixed_up_method_list;
+ }
+ return mlist;
+}
+
+
+/***********************************************************************
+* nextMethodList
+* Returns successive method lists from the given class.
+* Method lists are returned in method search order (i.e. highest-priority
+* implementations first).
+* All necessary method list fixups are performed, so the
+* returned method list is fully-constructed.
+*
+* If cls is already in use, methodListLock must be held by the caller.
+* For full thread-safety, methodListLock must be continuously held by the
+* caller across all calls to nextMethodList(). If the lock is released,
+* the bad results listed in class_nextMethodList() may occur.
+*
+* void *iterator = nil;
+* old_method_list *mlist;
+* mutex_lock(&methodListLock);
+* while ((mlist = nextMethodList(cls, &iterator))) {
+* // do something with mlist
+* }
+* mutex_unlock(&methodListLock);
+**********************************************************************/
+static old_method_list *nextMethodList(Class cls,
+ void **it)
+{
+ uintptr_t index = *(uintptr_t *)it;
+ old_method_list **resultp;
+
+ if (index == 0) {
+ // First call to nextMethodList.
+ if (!cls->methodLists) {
+ resultp = nil;
+ } else if (cls->info & CLS_NO_METHOD_ARRAY) {
+ resultp = (old_method_list **)&cls->methodLists;
+ } else {
+ resultp = &cls->methodLists[0];
+ if (!*resultp || *resultp == END_OF_METHODS_LIST) {
+ resultp = nil;
+ }
+ }
+ } else {
+ // Subsequent call to nextMethodList.
+ if (!cls->methodLists) {
+ resultp = nil;
+ } else if (cls->info & CLS_NO_METHOD_ARRAY) {
+ resultp = nil;
+ } else {
+ resultp = &cls->methodLists[index];
+ if (!*resultp || *resultp == END_OF_METHODS_LIST) {
+ resultp = nil;
+ }
+ }
+ }
+
+ // resultp now is nil, meaning there are no more method lists,
+ // OR the address of the method list pointer to fix up and return.
+
+ if (resultp) {
+ if (*resultp) {
+ *resultp = fixupSelectorsInMethodList(cls, *resultp);
+ }
+ *it = (void *)(index + 1);
+ return *resultp;
+ } else {
+ *it = 0;
+ return nil;
+ }
+}
+
+
+/* These next three functions are the heart of ObjC method lookup.
+ * If the class is currently in use, methodListLock must be held by the caller.
+ */
+static inline old_method *_findMethodInList(old_method_list * mlist, SEL sel) {
+ int i;
+ if (!mlist) return nil;
+ for (i = 0; i < mlist->method_count; i++) {
+ old_method *m = &mlist->method_list[i];
+ if (m->method_name == sel) {
+ return m;
+ }
+ }
+ return nil;
+}
+
+static inline old_method * _findMethodInClass(Class cls, SEL sel) __attribute__((always_inline));
+static inline old_method * _findMethodInClass(Class cls, SEL sel) {
+ // Flattened version of nextMethodList(). The optimizer doesn't
+ // do a good job with hoisting the conditionals out of the loop.
+ // Conceptually, this looks like:
+ // while ((mlist = nextMethodList(cls, &iterator))) {
+ // old_method *m = _findMethodInList(mlist, sel);
+ // if (m) return m;
+ // }
+
+ if (!cls->methodLists) {
+ // No method lists.
+ return nil;
+ }
+ else if (cls->info & CLS_NO_METHOD_ARRAY) {
+ // One method list.
+ old_method_list **mlistp;
+ mlistp = (old_method_list **)&cls->methodLists;
+ *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
+ return _findMethodInList(*mlistp, sel);
+ }
+ else {
+ // Multiple method lists.
+ old_method_list **mlistp;
+ for (mlistp = cls->methodLists;
+ *mlistp != nil && *mlistp != END_OF_METHODS_LIST;
+ mlistp++)
+ {
+ old_method *m;
+ *mlistp = fixupSelectorsInMethodList(cls, *mlistp);
+ m = _findMethodInList(*mlistp, sel);
+ if (m) return m;
+ }
+ return nil;
+ }
+}
+
+static inline old_method * _getMethod(Class cls, SEL sel) {
+ for (; cls; cls = cls->superclass) {
+ old_method *m;
+ m = _findMethodInClass(cls, sel);
+ if (m) return m;
+ }
+ return nil;
+}
+
+
+// fixme for gc debugging temporary use
+IMP findIMPInClass(Class cls, SEL sel)
+{
+ old_method *m = _findMethodInClass(cls, sel);
+ if (m) return m->method_imp;
+ else return nil;
+}
+
+
+/***********************************************************************
+* _freedHandler.
+**********************************************************************/
+static void _freedHandler(id obj, SEL sel)
+{
+ __objc_error (obj, "message %s sent to freed object=%p",
+ sel_getName(sel), (void*)obj);
+}
+
+
+/***********************************************************************
+* log_and_fill_cache
+* Log this method call. If the logger permits it, fill the method cache.
+* cls is the method whose cache should be filled.
+* implementer is the class that owns the implementation in question.
+**********************************************************************/
+static void
+log_and_fill_cache(Class cls, Class implementer, Method meth, SEL sel)
+{
+#if SUPPORT_MESSAGE_LOGGING
+ if (objcMsgLogEnabled) {
+ bool cacheIt = logMessageSend(implementer->isMetaClass(),
+ cls->getName(),
+ implementer->getName(),
+ sel);
+ if (!cacheIt) return;
+ }
+#endif
+ _cache_fill (cls, meth, sel);
+}
+
+
+/***********************************************************************
+* _class_lookupMethodAndLoadCache.
+* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpImp().
+* This lookup avoids optimistic cache scan because the dispatcher
+* already tried that.
+**********************************************************************/
+IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls)
+{
+ return lookUpImpOrForward(cls, sel, obj,
+ YES/*initialize*/, NO/*cache*/, YES/*resolver*/);
+}
+
+
+/***********************************************************************
+* lookUpImpOrForward.
+* The standard IMP lookup.
+* initialize==NO tries to avoid +initialize (but sometimes fails)
+* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere)
+* Most callers should use initialize==YES and cache==YES.
+* inst is an instance of cls or a subclass thereof, or nil if none is known.
+* If cls is an un-initialized metaclass then a non-nil inst is faster.
+* May return _objc_msgForward_impcache. IMPs destined for external use
+* must be converted to _objc_msgForward or _objc_msgForward_stret.
+* If you don't want forwarding at all, use lookUpImpOrNil() instead.
+**********************************************************************/
+IMP lookUpImpOrForward(Class cls, SEL sel, id inst,
+ bool initialize, bool cache, bool resolver)
+{
+ Class curClass;
+ IMP methodPC = nil;
+ Method meth;
+ bool triedResolver = NO;
+
+ mutex_assert_unlocked(&methodListLock);
+
+ // Optimistic cache lookup
+ if (cache) {
+ methodPC = _cache_getImp(cls, sel);
+ if (methodPC) return methodPC;
+ }
+
+ // Check for freed class
+ if (cls == _class_getFreedObjectClass())
+ return (IMP) _freedHandler;
+
+ // Check for +initialize
+ if (initialize && !cls->isInitialized()) {
+ _class_initialize (_class_getNonMetaClass(cls, inst));
+ // If sel == initialize, _class_initialize will send +initialize and
+ // then the messenger will send +initialize again after this
+ // procedure finishes. Of course, if this is not being called
+ // from the messenger then it won't happen. 2778172
+ }
+
+ // The lock is held to make method-lookup + cache-fill atomic
+ // with respect to method addition. Otherwise, a category could
+ // be added but ignored indefinitely because the cache was re-filled
+ // with the old value after the cache flush on behalf of the category.
+ retry:
+ mutex_lock(&methodListLock);
+
+ // Ignore GC selectors
+ if (ignoreSelector(sel)) {
+ methodPC = _cache_addIgnoredEntry(cls, sel);
+ goto done;
+ }
+
+ // Try this class's cache.
+
+ methodPC = _cache_getImp(cls, sel);
+ if (methodPC) goto done;
+
+ // Try this class's method lists.
+
+ meth = _class_getMethodNoSuper_nolock(cls, sel);
+ if (meth) {
+ log_and_fill_cache(cls, cls, meth, sel);
+ methodPC = method_getImplementation(meth);
+ goto done;
+ }
+
+ // Try superclass caches and method lists.
+
+ curClass = cls;
+ while ((curClass = curClass->superclass)) {
+ // Superclass cache.
+ meth = _cache_getMethod(curClass, sel, _objc_msgForward_impcache);
+ if (meth) {
+ if (meth != (Method)1) {
+ // Found the method in a superclass. Cache it in this class.
+ log_and_fill_cache(cls, curClass, meth, sel);
+ methodPC = method_getImplementation(meth);
+ goto done;
+ }
+ else {
+ // Found a forward:: entry in a superclass.
+ // Stop searching, but don't cache yet; call method
+ // resolver for this class first.
+ break;
+ }
+ }
+
+ // Superclass method list.
+ meth = _class_getMethodNoSuper_nolock(curClass, sel);
+ if (meth) {
+ log_and_fill_cache(cls, curClass, meth, sel);
+ methodPC = method_getImplementation(meth);
+ goto done;
+ }
+ }
+
+ // No implementation found. Try method resolver once.
+
+ if (resolver && !triedResolver) {
+ mutex_unlock(&methodListLock);
+ _class_resolveMethod(cls, sel, inst);
+ triedResolver = YES;
+ goto retry;
+ }
+
+ // No implementation found, and method resolver didn't help.
+ // Use forwarding.
+
+ _cache_addForwardEntry(cls, sel);
+ methodPC = _objc_msgForward_impcache;
+
+ done:
+ mutex_unlock(&methodListLock);
+
+ // paranoia: look for ignored selectors with non-ignored implementations
+ assert(!(ignoreSelector(sel) && methodPC != (IMP)&_objc_ignored_method));
+
+ return methodPC;
+}
+
+
+/***********************************************************************
+* lookUpImpOrNil.
+* Like lookUpImpOrForward, but returns nil instead of _objc_msgForward_impcache
+**********************************************************************/
+IMP lookUpImpOrNil(Class cls, SEL sel, id inst,
+ bool initialize, bool cache, bool resolver)
+{
+ IMP imp = lookUpImpOrForward(cls, sel, inst, initialize, cache, resolver);
+ if (imp == _objc_msgForward_impcache) return nil;
+ else return imp;
+}
+
+
+/***********************************************************************
+* lookupMethodInClassAndLoadCache.
+* Like _class_lookupMethodAndLoadCache, but does not search superclasses.
+* Caches and returns objc_msgForward if the method is not found in the class.
+**********************************************************************/
+IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
+{
+ Method meth;
+ IMP imp;
+
+ // fixme this still has the method list vs method cache race
+ // because it doesn't hold a lock across lookup+cache_fill,
+ // but it's only used for .cxx_construct/destruct and we assume
+ // categories don't change them.
+
+ // Search cache first.
+ imp = _cache_getImp(cls, sel);
+ if (imp) return imp;
+
+ // Cache miss. Search method list.
+
+ meth = _class_getMethodNoSuper(cls, sel);
+
+ if (meth) {
+ // Hit in method list. Cache it.
+ _cache_fill(cls, meth, sel);
+ return method_getImplementation(meth);
+ } else {
+ // Miss in method list. Cache objc_msgForward.
+ _cache_addForwardEntry(cls, sel);
+ return _objc_msgForward_impcache;
+ }
+}
+
+
+/***********************************************************************
+* class_getVariable. Return the named instance variable.
+**********************************************************************/
+
+Ivar _class_getVariable(Class cls, const char *name, Class *memberOf)
+{
+ for (; cls != Nil; cls = cls->superclass) {
+ int i;
+
+ // Skip class having no ivars
+ if (!cls->ivars) continue;
+
+ for (i = 0; i < cls->ivars->ivar_count; i++) {
+ // Check this ivar's name. Be careful because the
+ // compiler generates ivar entries with nil ivar_name
+ // (e.g. for anonymous bit fields).
+ old_ivar *ivar = &cls->ivars->ivar_list[i];
+ if (ivar->ivar_name && 0 == strcmp(name, ivar->ivar_name)) {
+ if (memberOf) *memberOf = cls;
+ return (Ivar)ivar;
+ }
+ }
+ }
+
+ // Not found
+ return nil;
+}
+
+
+old_property *
+property_list_nth(const old_property_list *plist, uint32_t i)
+{
+ return (old_property *)(i*plist->entsize + (char *)&plist->first);
+}
+
+old_property **
+copyPropertyList(old_property_list *plist, unsigned int *outCount)
+{
+ old_property **result = nil;
+ unsigned int count = 0;
+
+ if (plist) {
+ count = plist->count;
+ }
+
+ if (count > 0) {
+ unsigned int i;
+ result = (old_property **)malloc((count+1) * sizeof(old_property *));
+
+ for (i = 0; i < count; i++) {
+ result[i] = property_list_nth(plist, i);
+ }
+ result[i] = nil;
+ }
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+static old_property_list *
+nextPropertyList(Class cls, uintptr_t *indexp)
+{
+ old_property_list *result = nil;
+
+ mutex_assert_locked(&classLock);
+ if (! ((cls->info & CLS_EXT) && cls->ext)) {
+ // No class ext
+ result = nil;
+ } else if (!cls->ext->propertyLists) {
+ // No property lists
+ result = nil;
+ } else if (cls->info & CLS_NO_PROPERTY_ARRAY) {
+ // Only one property list
+ if (*indexp == 0) {
+ result = (old_property_list *)cls->ext->propertyLists;
+ } else {
+ result = nil;
+ }
+ } else {
+ // More than one property list
+ result = cls->ext->propertyLists[*indexp];
+ }
+
+ if (result) {
+ ++*indexp;
+ return result;
+ } else {
+ *indexp = 0;
+ return nil;
+ }
+}
+
+
+/***********************************************************************
+* class_getIvarLayout
+* nil means all-scanned. "" means non-scanned.
+**********************************************************************/
+const uint8_t *
+class_getIvarLayout(Class cls)
+{
+ if (cls && (cls->info & CLS_EXT)) {
+ return cls->ivar_layout;
+ } else {
+ return nil; // conservative scan
+ }
+}
+
+
+/***********************************************************************
+* class_getWeakIvarLayout
+* nil means no weak ivars.
+**********************************************************************/
+const uint8_t *
+class_getWeakIvarLayout(Class cls)
+{
+ if (cls && (cls->info & CLS_EXT) && cls->ext) {
+ return cls->ext->weak_ivar_layout;
+ } else {
+ return nil; // no weak ivars
+ }
+}
+
+
+/***********************************************************************
+* class_setIvarLayout
+* nil means all-scanned. "" means non-scanned.
+**********************************************************************/
+void class_setIvarLayout(Class cls, const uint8_t *layout)
+{
+ if (!cls) return;
+
+ if (! (cls->info & CLS_EXT)) {
+ _objc_inform("class '%s' needs to be recompiled", cls->name);
+ return;
+ }
+
+ // fixme leak
+ cls->ivar_layout = _ustrdup_internal(layout);
+}
+
+// SPI: Instance-specific object layout.
+
+void _class_setIvarLayoutAccessor(Class cls, const uint8_t* (*accessor) (id object)) {
+ if (!cls) return;
+
+ if (! (cls->info & CLS_EXT)) {
+ _objc_inform("class '%s' needs to be recompiled", cls->name);
+ return;
+ }
+
+ // fixme leak
+ cls->ivar_layout = (const uint8_t *)accessor;
+ cls->setInfo(CLS_HAS_INSTANCE_SPECIFIC_LAYOUT);
+}
+
+const uint8_t *_object_getIvarLayout(Class cls, id object) {
+ if (cls && (cls->info & CLS_EXT)) {
+ const uint8_t* layout = cls->ivar_layout;
+ if (cls->info & CLS_HAS_INSTANCE_SPECIFIC_LAYOUT) {
+ const uint8_t* (*accessor) (id object) = (const uint8_t* (*)(id))layout;
+ layout = accessor(object);
+ }
+ return layout;
+ } else {
+ return nil;
+ }
+}
+
+/***********************************************************************
+* class_setWeakIvarLayout
+* nil means no weak ivars.
+**********************************************************************/
+void class_setWeakIvarLayout(Class cls, const uint8_t *layout)
+{
+ if (!cls) return;
+
+ mutex_lock(&classLock);
+
+ allocateExt(cls);
+
+ // fixme leak
+ cls->ext->weak_ivar_layout = _ustrdup_internal(layout);
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* class_setVersion. Record the specified version with the class.
+**********************************************************************/
+void class_setVersion(Class cls, int version)
+{
+ if (!cls) return;
+ cls->version = version;
+}
+
+/***********************************************************************
+* class_getVersion. Return the version recorded with the class.
+**********************************************************************/
+int class_getVersion(Class cls)
+{
+ if (!cls) return 0;
+ return (int)cls->version;
+}
+
+
+/***********************************************************************
+* _class_getNonMetaClass.
+* Return the ordinary class for this class or metaclass.
+* Used by +initialize.
+**********************************************************************/
+Class _class_getNonMetaClass(Class cls, id obj __unused)
+{
+ // fixme ick
+ if (cls->isMetaClass()) {
+ if (strncmp(cls->name, "_%", 2) == 0) {
+ // Posee's meta's name is smashed and isn't in the class_hash,
+ // so objc_getClass doesn't work.
+ const char *baseName = strchr(cls->name, '%'); // get posee's real name
+ cls = objc_getClass(baseName);
+ } else {
+ cls = objc_getClass(cls->name);
+ }
+ assert(cls);
+ }
+
+ return cls;
+}
+
+
+Cache _class_getCache(Class cls)
+{
+ return cls->cache;
+}
+
+void _class_setCache(Class cls, Cache cache)
+{
+ cls->cache = cache;
+}
+
+const char *_category_getName(Category cat)
+{
+ return oldcategory(cat)->category_name;
+}
+
+const char *_category_getClassName(Category cat)
+{
+ return oldcategory(cat)->class_name;
+}
+
+Class _category_getClass(Category cat)
+{
+ return objc_getClass(oldcategory(cat)->class_name);
+}
+
+IMP _category_getLoadMethod(Category cat)
+{
+ old_method_list *mlist = oldcategory(cat)->class_methods;
+ if (mlist) {
+ return lookupNamedMethodInMethodList(mlist, "load");
+ } else {
+ return nil;
+ }
+}
+
+
+
+/***********************************************************************
+* class_nextMethodList.
+* External version of nextMethodList().
+*
+* This function is not fully thread-safe. A series of calls to
+* class_nextMethodList() may fail if methods are added to or removed
+* from the class between calls.
+* If methods are added between calls to class_nextMethodList(), it may
+* return previously-returned method lists again, and may fail to return
+* newly-added lists.
+* If methods are removed between calls to class_nextMethodList(), it may
+* omit surviving method lists or simply crash.
+**********************************************************************/
+OBJC_EXPORT struct objc_method_list *class_nextMethodList(Class cls, void **it)
+{
+ old_method_list *result;
+
+ OBJC_WARN_DEPRECATED;
+
+ mutex_lock(&methodListLock);
+ result = nextMethodList(cls, it);
+ mutex_unlock(&methodListLock);
+ return (struct objc_method_list *)result;
+}
+
+
+/***********************************************************************
+* class_addMethods.
+*
+* Formerly class_addInstanceMethods ()
+**********************************************************************/
+OBJC_EXPORT void class_addMethods(Class cls, struct objc_method_list *meths)
+{
+ OBJC_WARN_DEPRECATED;
+
+ // Add the methods.
+ mutex_lock(&methodListLock);
+ _objc_insertMethods(cls, (old_method_list *)meths, nil);
+ mutex_unlock(&methodListLock);
+
+ // Must flush when dynamically adding methods. No need to flush
+ // all the class method caches. If cls is a meta class, though,
+ // this will still flush it and any of its sub-meta classes.
+ flush_caches (cls, NO);
+}
+
+
+/***********************************************************************
+* class_removeMethods.
+**********************************************************************/
+OBJC_EXPORT void class_removeMethods(Class cls, struct objc_method_list *meths)
+{
+ OBJC_WARN_DEPRECATED;
+
+ // Remove the methods
+ mutex_lock(&methodListLock);
+ _objc_removeMethods(cls, (old_method_list *)meths);
+ mutex_unlock(&methodListLock);
+
+ // Must flush when dynamically removing methods. No need to flush
+ // all the class method caches. If cls is a meta class, though,
+ // this will still flush it and any of its sub-meta classes.
+ flush_caches (cls, NO);
+}
+
+/***********************************************************************
+* lookupNamedMethodInMethodList
+* Only called to find +load/-.cxx_construct/-.cxx_destruct methods,
+* without fixing up the entire method list.
+* The class is not yet in use, so methodListLock is not taken.
+**********************************************************************/
+IMP lookupNamedMethodInMethodList(old_method_list *mlist, const char *meth_name)
+{
+ old_method *m;
+ m = meth_name ? _findNamedMethodInList(mlist, meth_name) : nil;
+ return (m ? m->method_imp : nil);
+}
+
+static Method _class_getMethod(Class cls, SEL sel)
+{
+ Method result;
+
+ mutex_lock(&methodListLock);
+ result = (Method)_getMethod(cls, sel);
+ mutex_unlock(&methodListLock);
+
+ return result;
+}
+
+static Method _class_getMethodNoSuper(Class cls, SEL sel)
+{
+ Method result;
+
+ mutex_lock(&methodListLock);
+ result = (Method)_findMethodInClass(cls, sel);
+ mutex_unlock(&methodListLock);
+
+ return result;
+}
+
+static Method _class_getMethodNoSuper_nolock(Class cls, SEL sel)
+{
+ mutex_assert_locked(&methodListLock);
+ return (Method)_findMethodInClass(cls, sel);
+}
+
+
+/***********************************************************************
+* class_getInstanceMethod. Return the instance method for the
+* specified class and selector.
+**********************************************************************/
+Method class_getInstanceMethod(Class cls, SEL sel)
+{
+ if (!cls || !sel) return nil;
+
+ // This deliberately avoids +initialize because it historically did so.
+
+ // This implementation is a bit weird because it's the only place that
+ // wants a Method instead of an IMP.
+
+ Method meth;
+ meth = _cache_getMethod(cls, sel, _objc_msgForward_impcache);
+ if (meth == (Method)1) {
+ // Cache contains forward:: . Stop searching.
+ return nil;
+ } else if (meth) {
+ return meth;
+ }
+
+ // Search method lists, try method resolver, etc.
+ lookUpImpOrNil(cls, sel, nil,
+ NO/*initialize*/, NO/*cache*/, YES/*resolver*/);
+
+ meth = _cache_getMethod(cls, sel, _objc_msgForward_impcache);
+ if (meth == (Method)1) {
+ // Cache contains forward:: . Stop searching.
+ return nil;
+ } else if (meth) {
+ return meth;
+ }
+
+ return _class_getMethod(cls, sel);
+}
+
+
+BOOL class_conformsToProtocol(Class cls, Protocol *proto_gen)
+{
+ old_protocol *proto = oldprotocol(proto_gen);
+
+ if (!cls) return NO;
+ if (!proto) return NO;
+
+ if (cls->ISA()->version >= 3) {
+ old_protocol_list *list;
+ for (list = cls->protocols; list != nil; list = list->next) {
+ int i;
+ for (i = 0; i < list->count; i++) {
+ if (list->list[i] == proto) return YES;
+ if (protocol_conformsToProtocol((Protocol *)list->list[i], proto_gen)) return YES;
+ }
+ if (cls->ISA()->version <= 4) break;
+ }
+ }
+ return NO;
+}
+
+
+static NXMapTable * posed_class_hash = nil;
+
+/***********************************************************************
+* objc_getOrigClass.
+**********************************************************************/
+extern "C"
+Class _objc_getOrigClass(const char *name)
+{
+ Class ret;
+
+ // Look for class among the posers
+ ret = Nil;
+ mutex_lock(&classLock);
+ if (posed_class_hash)
+ ret = (Class) NXMapGet (posed_class_hash, name);
+ mutex_unlock(&classLock);
+ if (ret)
+ return ret;
+
+ // Not a poser. Do a normal lookup.
+ ret = objc_getClass (name);
+ if (!ret)
+ _objc_inform ("class `%s' not linked into application", name);
+
+ return ret;
+}
+
+Class objc_getOrigClass(const char *name)
+{
+ OBJC_WARN_DEPRECATED;
+ return _objc_getOrigClass(name);
+}
+
+/***********************************************************************
+* _objc_addOrigClass. This function is only used from class_poseAs.
+* Registers the original class names, before they get obscured by
+* posing, so that [super ..] will work correctly from categories
+* in posing classes and in categories in classes being posed for.
+**********************************************************************/
+static void _objc_addOrigClass (Class origClass)
+{
+ mutex_lock(&classLock);
+
+ // Create the poser's hash table on first use
+ if (!posed_class_hash)
+ {
+ posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
+ 8,
+ _objc_internal_zone ());
+ }
+
+ // Add the named class iff it is not already there (or collides?)
+ if (NXMapGet (posed_class_hash, origClass->name) == 0)
+ NXMapInsert (posed_class_hash, origClass->name, origClass);
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* change_class_references
+* Change classrefs and superclass pointers from original to imposter
+* But if copy!=nil, don't change copy->superclass.
+* If changeSuperRefs==YES, also change [super message] classrefs.
+* Used by class_poseAs and objc_setFutureClass
+* classLock must be locked.
+**********************************************************************/
+void change_class_references(Class imposter,
+ Class original,
+ Class copy,
+ BOOL changeSuperRefs)
+{
+ header_info *hInfo;
+ Class clsObject;
+ NXHashState state;
+
+ // Change all subclasses of the original to point to the imposter.
+ state = NXInitHashState (class_hash);
+ while (NXNextHashState (class_hash, &state, (void **) &clsObject))
+ {
+ while ((clsObject) && (clsObject != imposter) &&
+ (clsObject != copy))
+ {
+ if (clsObject->superclass == original)
+ {
+ clsObject->superclass = imposter;
+ clsObject->ISA()->superclass = imposter->ISA();
+ // We must flush caches here!
+ break;
+ }
+
+ clsObject = clsObject->superclass;
+ }
+ }
+
+ // Replace the original with the imposter in all class refs
+ // Major loop - process all headers
+ for (hInfo = FirstHeader; hInfo != nil; hInfo = hInfo->next)
+ {
+ Class *cls_refs;
+ size_t refCount;
+ unsigned int index;
+
+ // Fix class refs associated with this header
+ cls_refs = _getObjcClassRefs(hInfo, &refCount);
+ if (cls_refs) {
+ for (index = 0; index < refCount; index += 1) {
+ if (cls_refs[index] == original) {
+ cls_refs[index] = imposter;
+ }
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* class_poseAs.
+*
+* !!! class_poseAs () does not currently flush any caches.
+**********************************************************************/
+Class class_poseAs(Class imposter, Class original)
+{
+ char * imposterNamePtr;
+ Class copy;
+
+ OBJC_WARN_DEPRECATED;
+
+ // Trivial case is easy
+ if (imposter == original)
+ return imposter;
+
+ // Imposter must be an immediate subclass of the original
+ if (imposter->superclass != original) {
+ __objc_error(imposter,
+ "[%s poseAs:%s]: target not immediate superclass",
+ imposter->name, original->name);
+ }
+
+ // Can't pose when you have instance variables (how could it work?)
+ if (imposter->ivars) {
+ __objc_error(imposter,
+ "[%s poseAs:%s]: %s defines new instance variables",
+ imposter->name, original->name, imposter->name);
+ }
+
+ // Build a string to use to replace the name of the original class.
+#if TARGET_OS_WIN32
+# define imposterNamePrefix "_%"
+ imposterNamePtr = _malloc_internal(strlen(original->name) + strlen(imposterNamePrefix) + 1);
+ strcpy(imposterNamePtr, imposterNamePrefix);
+ strcat(imposterNamePtr, original->name);
+# undef imposterNamePrefix
+#else
+ asprintf(&imposterNamePtr, "_%%%s", original->name);
+#endif
+
+ // We lock the class hashtable, so we are thread safe with respect to
+ // calls to objc_getClass (). However, the class names are not
+ // changed atomically, nor are all of the subclasses updated
+ // atomically. I have ordered the operations so that you will
+ // never crash, but you may get inconsistent results....
+
+ // Register the original class so that [super ..] knows
+ // exactly which classes are the "original" classes.
+ _objc_addOrigClass (original);
+ _objc_addOrigClass (imposter);
+
+ // Copy the imposter, so that the imposter can continue
+ // its normal life in addition to changing the behavior of
+ // the original. As a hack we don't bother to copy the metaclass.
+ // For some reason we modify the original rather than the copy.
+ copy = (Class)_malloc_internal(sizeof(objc_class));
+ memmove(copy, imposter, sizeof(objc_class));
+
+ mutex_lock(&classLock);
+
+ // Remove both the imposter and the original class.
+ NXHashRemove (class_hash, imposter);
+ NXHashRemove (class_hash, original);
+
+ NXHashInsert (class_hash, copy);
+ objc_addRegisteredClass(copy); // imposter & original will rejoin later, just track the new guy
+
+ // Mark the imposter as such
+ imposter->setInfo(CLS_POSING);
+ imposter->ISA()->setInfo(CLS_POSING);
+
+ // Change the name of the imposter to that of the original class.
+ imposter->name = original->name;
+ imposter->ISA()->name = original->ISA()->name;
+
+ // Also copy the version field to avoid archiving problems.
+ imposter->version = original->version;
+
+ // Change classrefs and superclass pointers
+ // Don't change copy->superclass
+ // Don't change [super ...] messages
+ change_class_references(imposter, original, copy, NO);
+
+ // Change the name of the original class.
+ original->name = imposterNamePtr + 1;
+ original->ISA()->name = imposterNamePtr;
+
+ // Restore the imposter and the original class with their new names.
+ NXHashInsert (class_hash, imposter);
+ NXHashInsert (class_hash, original);
+
+ mutex_unlock(&classLock);
+
+ return imposter;
+}
+
+
+/***********************************************************************
+* _objc_flush_caches. Flush the instance and class method caches
+* of cls and all its subclasses.
+*
+* Specifying Nil for the class "all classes."
+**********************************************************************/
+static void flush_caches(Class target, bool flush_meta)
+{
+ bool collectALot = (target == nil);
+ NXHashState state;
+ Class clsObject;
+#ifdef OBJC_INSTRUMENTED
+ unsigned int classesVisited;
+ unsigned int subclassCount;
+#endif
+
+ mutex_lock(&classLock);
+ mutex_lock(&cacheUpdateLock);
+
+ // Leaf classes are fastest because there are no subclass caches to flush.
+ // fixme instrument
+ if (target && (target->info & CLS_LEAF)) {
+ _cache_flush (target);
+
+ if (target->ISA() && (target->ISA()->info & CLS_LEAF)) {
+ _cache_flush (target->ISA());
+ mutex_unlock(&cacheUpdateLock);
+ mutex_unlock(&classLock);
+ return; // done
+ } else {
+ // Reset target and handle it by one of the methods below.
+ target = target->ISA();
+ flush_meta = NO;
+ // NOT done
+ }
+ }
+
+ state = NXInitHashState(class_hash);
+
+ // Handle nil and root instance class specially: flush all
+ // instance and class method caches. Nice that this
+ // loop is linear vs the N-squared loop just below.
+ if (!target || !target->superclass)
+ {
+#ifdef OBJC_INSTRUMENTED
+ LinearFlushCachesCount += 1;
+ classesVisited = 0;
+ subclassCount = 0;
+#endif
+ // Traverse all classes in the hash table
+ while (NXNextHashState(class_hash, &state, (void**)&clsObject))
+ {
+ Class metaClsObject;
+#ifdef OBJC_INSTRUMENTED
+ classesVisited += 1;
+#endif
+
+ // Skip class that is known not to be a subclass of this root
+ // (the isa pointer of any meta class points to the meta class
+ // of the root).
+ // NOTE: When is an isa pointer of a hash tabled class ever nil?
+ metaClsObject = clsObject->ISA();
+ if (target && metaClsObject && target->ISA() != metaClsObject->ISA()) {
+ continue;
+ }
+
+#ifdef OBJC_INSTRUMENTED
+ subclassCount += 1;
+#endif
+
+ _cache_flush (clsObject);
+ if (flush_meta && metaClsObject != nil) {
+ _cache_flush (metaClsObject);
+ }
+ }
+#ifdef OBJC_INSTRUMENTED
+ LinearFlushCachesVisitedCount += classesVisited;
+ if (classesVisited > MaxLinearFlushCachesVisitedCount)
+ MaxLinearFlushCachesVisitedCount = classesVisited;
+ IdealFlushCachesCount += subclassCount;
+ if (subclassCount > MaxIdealFlushCachesCount)
+ MaxIdealFlushCachesCount = subclassCount;
+#endif
+
+ goto done;
+ }
+
+ // Outer loop - flush any cache that could now get a method from
+ // cls (i.e. the cache associated with cls and any of its subclasses).
+#ifdef OBJC_INSTRUMENTED
+ NonlinearFlushCachesCount += 1;
+ classesVisited = 0;
+ subclassCount = 0;
+#endif
+ while (NXNextHashState(class_hash, &state, (void**)&clsObject))
+ {
+ Class clsIter;
+
+#ifdef OBJC_INSTRUMENTED
+ NonlinearFlushCachesClassCount += 1;
+#endif
+
+ // Inner loop - Process a given class
+ clsIter = clsObject;
+ while (clsIter)
+ {
+
+#ifdef OBJC_INSTRUMENTED
+ classesVisited += 1;
+#endif
+ // Flush clsObject instance method cache if
+ // clsObject is a subclass of cls, or is cls itself
+ // Flush the class method cache if that was asked for
+ if (clsIter == target)
+ {
+#ifdef OBJC_INSTRUMENTED
+ subclassCount += 1;
+#endif
+ _cache_flush (clsObject);
+ if (flush_meta)
+ _cache_flush (clsObject->ISA());
+
+ break;
+
+ }
+
+ // Flush clsObject class method cache if cls is
+ // the meta class of clsObject or of one
+ // of clsObject's superclasses
+ else if (clsIter->ISA() == target)
+ {
+#ifdef OBJC_INSTRUMENTED
+ subclassCount += 1;
+#endif
+ _cache_flush (clsObject->ISA());
+ break;
+ }
+
+ // Move up superclass chain
+ // else if (clsIter->isInitialized())
+ clsIter = clsIter->superclass;
+
+ // clsIter is not initialized, so its cache
+ // must be empty. This happens only when
+ // clsIter == clsObject, because
+ // superclasses are initialized before
+ // subclasses, and this loop traverses
+ // from sub- to super- classes.
+ // else
+ // break;
+ }
+ }
+#ifdef OBJC_INSTRUMENTED
+ NonlinearFlushCachesVisitedCount += classesVisited;
+ if (classesVisited > MaxNonlinearFlushCachesVisitedCount)
+ MaxNonlinearFlushCachesVisitedCount = classesVisited;
+ IdealFlushCachesCount += subclassCount;
+ if (subclassCount > MaxIdealFlushCachesCount)
+ MaxIdealFlushCachesCount = subclassCount;
+#endif
+
+
+ done:
+ if (collectALot) {
+ _cache_collect(true);
+ }
+
+ mutex_unlock(&cacheUpdateLock);
+ mutex_unlock(&classLock);
+}
+
+
+void _objc_flush_caches(Class target)
+{
+ flush_caches(target, YES);
+}
+
+
+
+/***********************************************************************
+* flush_marked_caches. Flush the method cache of any class marked
+* CLS_FLUSH_CACHE (and all subclasses thereof)
+* fixme instrument
+**********************************************************************/
+void flush_marked_caches(void)
+{
+ Class cls;
+ Class supercls;
+ NXHashState state;
+
+ mutex_lock(&classLock);
+ mutex_lock(&cacheUpdateLock);
+
+ state = NXInitHashState(class_hash);
+ while (NXNextHashState(class_hash, &state, (void**)&cls)) {
+ for (supercls = cls; supercls; supercls = supercls->superclass) {
+ if (supercls->info & CLS_FLUSH_CACHE) {
+ _cache_flush(cls);
+ break;
+ }
+ }
+
+ for (supercls = cls->ISA(); supercls; supercls = supercls->superclass) {
+ if (supercls->info & CLS_FLUSH_CACHE) {
+ _cache_flush(cls->ISA());
+ break;
+ }
+ }
+ }
+
+ state = NXInitHashState(class_hash);
+ while (NXNextHashState(class_hash, &state, (void**)&cls)) {
+ if (cls->info & CLS_FLUSH_CACHE) {
+ cls->clearInfo(CLS_FLUSH_CACHE);
+ }
+ if (cls->ISA()->info & CLS_FLUSH_CACHE) {
+ cls->ISA()->clearInfo(CLS_FLUSH_CACHE);
+ }
+ }
+
+ mutex_unlock(&cacheUpdateLock);
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* get_base_method_list
+* Returns the method list containing the class's own methods,
+* ignoring any method lists added by categories or class_addMethods.
+* Called only by add_class_to_loadable_list.
+* Does not hold methodListLock because add_class_to_loadable_list
+* does not manipulate in-use classes.
+**********************************************************************/
+static old_method_list *get_base_method_list(Class cls)
+{
+ old_method_list **ptr;
+
+ if (!cls->methodLists) return nil;
+ if (cls->info & CLS_NO_METHOD_ARRAY) return (old_method_list *)cls->methodLists;
+ ptr = cls->methodLists;
+ if (!*ptr || *ptr == END_OF_METHODS_LIST) return nil;
+ while ( *ptr != 0 && *ptr != END_OF_METHODS_LIST ) { ptr++; }
+ --ptr;
+ return *ptr;
+}
+
+
+static IMP _class_getLoadMethod_nocheck(Class cls)
+{
+ old_method_list *mlist;
+ mlist = get_base_method_list(cls->ISA());
+ if (mlist) {
+ return lookupNamedMethodInMethodList (mlist, "load");
+ }
+ return nil;
+}
+
+
+BOOL _class_hasLoadMethod(Class cls)
+{
+ if (cls->ISA()->info & CLS_HAS_LOAD_METHOD) return YES;
+ return (_class_getLoadMethod_nocheck(cls) ? YES : NO);
+}
+
+
+/***********************************************************************
+* objc_class::getLoadMethod
+* Returns cls's +load implementation, or nil if it doesn't have one.
+**********************************************************************/
+IMP objc_class::getLoadMethod()
+{
+ if (ISA()->info & CLS_HAS_LOAD_METHOD) {
+ return _class_getLoadMethod_nocheck((Class)this);
+ }
+ return nil;
+}
+
+BOOL _class_usesAutomaticRetainRelease(Class cls)
+{
+ return NO;
+}
+
+uint32_t _class_getInstanceStart(Class cls)
+{
+ _objc_fatal("_class_getInstanceStart() unimplemented for fragile instance variables");
+ return 0; // PCB: never used just provided for ARR consistency.
+}
+
+ptrdiff_t ivar_getOffset(Ivar ivar)
+{
+ return oldivar(ivar)->ivar_offset;
+}
+
+const char *ivar_getName(Ivar ivar)
+{
+ return oldivar(ivar)->ivar_name;
+}
+
+const char *ivar_getTypeEncoding(Ivar ivar)
+{
+ return oldivar(ivar)->ivar_type;
+}
+
+
+IMP method_getImplementation(Method m)
+{
+ if (!m) return nil;
+ return oldmethod(m)->method_imp;
+}
+
+SEL method_getName(Method m)
+{
+ if (!m) return nil;
+ return oldmethod(m)->method_name;
+}
+
+const char *method_getTypeEncoding(Method m)
+{
+ if (!m) return nil;
+ return oldmethod(m)->method_types;
+}
+
+unsigned int method_getSizeOfArguments(Method m)
+{
+ OBJC_WARN_DEPRECATED;
+ if (!m) return 0;
+ return encoding_getSizeOfArguments(method_getTypeEncoding(m));
+}
+
+unsigned int method_getArgumentInfo(Method m, int arg,
+ const char **type, int *offset)
+{
+ OBJC_WARN_DEPRECATED;
+ if (!m) return 0;
+ return encoding_getArgumentInfo(method_getTypeEncoding(m),
+ arg, type, offset);
+}
+
+
+static spinlock_t impLock = SPINLOCK_INITIALIZER;
+
+IMP method_setImplementation(Method m_gen, IMP imp)
+{
+ IMP old;
+ old_method *m = oldmethod(m_gen);
+ if (!m) return nil;
+ if (!imp) return nil;
+
+ if (ignoreSelector(m->method_name)) {
+ // Ignored methods stay ignored
+ return m->method_imp;
+ }
+
+ spinlock_lock(&impLock);
+ old = m->method_imp;
+ m->method_imp = imp;
+ spinlock_unlock(&impLock);
+ return old;
+}
+
+
+void method_exchangeImplementations(Method m1_gen, Method m2_gen)
+{
+ IMP m1_imp;
+ old_method *m1 = oldmethod(m1_gen);
+ old_method *m2 = oldmethod(m2_gen);
+ if (!m1 || !m2) return;
+
+ if (ignoreSelector(m1->method_name) || ignoreSelector(m2->method_name)) {
+ // Ignored methods stay ignored. Now they're both ignored.
+ m1->method_imp = (IMP)&_objc_ignored_method;
+ m2->method_imp = (IMP)&_objc_ignored_method;
+ return;
+ }
+
+ spinlock_lock(&impLock);
+ m1_imp = m1->method_imp;
+ m1->method_imp = m2->method_imp;
+ m2->method_imp = m1_imp;
+ spinlock_unlock(&impLock);
+}
+
+
+struct objc_method_description * method_getDescription(Method m)
+{
+ if (!m) return nil;
+ return (struct objc_method_description *)oldmethod(m);
+}
+
+
+const char *property_getName(objc_property_t prop)
+{
+ return oldproperty(prop)->name;
+}
+
+const char *property_getAttributes(objc_property_t prop)
+{
+ return oldproperty(prop)->attributes;
+}
+
+objc_property_attribute_t *property_copyAttributeList(objc_property_t prop,
+ unsigned int *outCount)
+{
+ if (!prop) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ objc_property_attribute_t *result;
+ mutex_lock(&classLock);
+ result = copyPropertyAttributeList(oldproperty(prop)->attributes,outCount);
+ mutex_unlock(&classLock);
+ return result;
+}
+
+char * property_copyAttributeValue(objc_property_t prop, const char *name)
+{
+ if (!prop || !name || *name == '\0') return nil;
+
+ char *result;
+ mutex_lock(&classLock);
+ result = copyPropertyAttributeValue(oldproperty(prop)->attributes, name);
+ mutex_unlock(&classLock);
+ return result;
+}
+
+
+/***********************************************************************
+* class_addMethod
+**********************************************************************/
+static IMP _class_addMethod(Class cls, SEL name, IMP imp,
+ const char *types, BOOL replace)
+{
+ old_method *m;
+ IMP result = nil;
+
+ if (!types) types = "";
+
+ mutex_lock(&methodListLock);
+
+ if ((m = _findMethodInClass(cls, name))) {
+ // already exists
+ // fixme atomic
+ result = method_getImplementation((Method)m);
+ if (replace) {
+ method_setImplementation((Method)m, imp);
+ }
+ } else {
+ // fixme could be faster
+ old_method_list *mlist =
+ (old_method_list *)_calloc_internal(sizeof(old_method_list), 1);
+ mlist->obsolete = fixed_up_method_list;
+ mlist->method_count = 1;
+ mlist->method_list[0].method_name = name;
+ mlist->method_list[0].method_types = _strdup_internal(types);
+ if (!ignoreSelector(name)) {
+ mlist->method_list[0].method_imp = imp;
+ } else {
+ mlist->method_list[0].method_imp = (IMP)&_objc_ignored_method;
+ }
+
+ _objc_insertMethods(cls, mlist, nil);
+ if (!(cls->info & CLS_CONSTRUCTING)) {
+ flush_caches(cls, NO);
+ } else {
+ // in-construction class has no subclasses
+ flush_cache(cls);
+ }
+ result = nil;
+ }
+
+ mutex_unlock(&methodListLock);
+
+ return result;
+}
+
+
+/***********************************************************************
+* class_addMethod
+**********************************************************************/
+BOOL class_addMethod(Class cls, SEL name, IMP imp, const char *types)
+{
+ IMP old;
+ if (!cls) return NO;
+
+ old = _class_addMethod(cls, name, imp, types, NO);
+ return old ? NO : YES;
+}
+
+
+/***********************************************************************
+* class_replaceMethod
+**********************************************************************/
+IMP class_replaceMethod(Class cls, SEL name, IMP imp, const char *types)
+{
+ if (!cls) return nil;
+
+ return _class_addMethod(cls, name, imp, types, YES);
+}
+
+
+/***********************************************************************
+* class_addIvar
+**********************************************************************/
+BOOL class_addIvar(Class cls, const char *name, size_t size,
+ uint8_t alignment, const char *type)
+{
+ BOOL result = YES;
+
+ if (!cls) return NO;
+ if (ISMETA(cls)) return NO;
+ if (!(cls->info & CLS_CONSTRUCTING)) return NO;
+
+ if (!type) type = "";
+ if (name && 0 == strcmp(name, "")) name = nil;
+
+ mutex_lock(&classLock);
+
+ // Check for existing ivar with this name
+ // fixme check superclasses?
+ if (cls->ivars) {
+ int i;
+ for (i = 0; i < cls->ivars->ivar_count; i++) {
+ if (0 == strcmp(cls->ivars->ivar_list[i].ivar_name, name)) {
+ result = NO;
+ break;
+ }
+ }
+ }
+
+ if (result) {
+ old_ivar_list *old = cls->ivars;
+ size_t oldSize;
+ int newCount;
+ old_ivar *ivar;
+ size_t alignBytes;
+ size_t misalign;
+
+ if (old) {
+ oldSize = sizeof(old_ivar_list) +
+ (old->ivar_count - 1) * sizeof(old_ivar);
+ newCount = 1 + old->ivar_count;
+ } else {
+ oldSize = sizeof(old_ivar_list) - sizeof(old_ivar);
+ newCount = 1;
+ }
+
+ // allocate new ivar list
+ cls->ivars = (old_ivar_list *)
+ _calloc_internal(oldSize+sizeof(old_ivar), 1);
+ if (old) memcpy(cls->ivars, old, oldSize);
+ if (old && malloc_size(old)) free(old);
+ cls->ivars->ivar_count = newCount;
+ ivar = &cls->ivars->ivar_list[newCount-1];
+
+ // set ivar name and type
+ ivar->ivar_name = _strdup_internal(name);
+ ivar->ivar_type = _strdup_internal(type);
+
+ // align if necessary
+ alignBytes = 1 << alignment;
+ misalign = cls->instance_size % alignBytes;
+ if (misalign) cls->instance_size += (long)(alignBytes - misalign);
+
+ // set ivar offset and increase instance size
+ ivar->ivar_offset = (int)cls->instance_size;
+ cls->instance_size += (long)size;
+ }
+
+ mutex_unlock(&classLock);
+
+ return result;
+}
+
+
+/***********************************************************************
+* class_addProtocol
+**********************************************************************/
+BOOL class_addProtocol(Class cls, Protocol *protocol_gen)
+{
+ old_protocol *protocol = oldprotocol(protocol_gen);
+ old_protocol_list *plist;
+
+ if (!cls) return NO;
+ if (class_conformsToProtocol(cls, protocol_gen)) return NO;
+
+ mutex_lock(&classLock);
+
+ // fixme optimize - protocol list doesn't escape?
+ plist = (old_protocol_list*)_calloc_internal(sizeof(old_protocol_list), 1);
+ plist->count = 1;
+ plist->list[0] = protocol;
+ plist->next = cls->protocols;
+ cls->protocols = plist;
+
+ // fixme metaclass?
+
+ mutex_unlock(&classLock);
+
+ return YES;
+}
+
+
+/***********************************************************************
+* _class_addProperties
+* Internal helper to add properties to a class.
+* Used by category attachment and class_addProperty()
+* Locking: acquires classLock
+**********************************************************************/
+BOOL
+_class_addProperties(Class cls,
+ old_property_list *additions)
+{
+ old_property_list *newlist;
+
+ if (!(cls->info & CLS_EXT)) return NO;
+
+ newlist = (old_property_list *)
+ _memdup_internal(additions, sizeof(*newlist) - sizeof(newlist->first)
+ + (additions->entsize * additions->count));
+
+ mutex_lock(&classLock);
+
+ allocateExt(cls);
+ if (!cls->ext->propertyLists) {
+ // cls has no properties - simply use this list
+ cls->ext->propertyLists = (old_property_list **)newlist;
+ cls->setInfo(CLS_NO_PROPERTY_ARRAY);
+ }
+ else if (cls->info & CLS_NO_PROPERTY_ARRAY) {
+ // cls has one property list - make a new array
+ old_property_list **newarray = (old_property_list **)
+ _malloc_internal(3 * sizeof(*newarray));
+ newarray[0] = newlist;
+ newarray[1] = (old_property_list *)cls->ext->propertyLists;
+ newarray[2] = nil;
+ cls->ext->propertyLists = newarray;
+ cls->clearInfo(CLS_NO_PROPERTY_ARRAY);
+ }
+ else {
+ // cls has a property array - make a bigger one
+ old_property_list **newarray;
+ int count = 0;
+ while (cls->ext->propertyLists[count]) count++;
+ newarray = (old_property_list **)
+ _malloc_internal((count+2) * sizeof(*newarray));
+ newarray[0] = newlist;
+ memcpy(&newarray[1], &cls->ext->propertyLists[0],
+ count * sizeof(*newarray));
+ newarray[count+1] = nil;
+ free(cls->ext->propertyLists);
+ cls->ext->propertyLists = newarray;
+ }
+
+ mutex_unlock(&classLock);
+
+ return YES;
+}
+
+
+/***********************************************************************
+* class_addProperty
+* Adds a property to a class. Returns NO if the proeprty already exists.
+* Locking: acquires classLock
+**********************************************************************/
+static BOOL
+_class_addProperty(Class cls, const char *name,
+ const objc_property_attribute_t *attrs, unsigned int count,
+ BOOL replace)
+{
+ if (!cls) return NO;
+ if (!name) return NO;
+
+ old_property *prop = oldproperty(class_getProperty(cls, name));
+ if (prop && !replace) {
+ // already exists, refuse to replace
+ return NO;
+ }
+ else if (prop) {
+ // replace existing
+ mutex_lock(&classLock);
+ try_free(prop->attributes);
+ prop->attributes = copyPropertyAttributeString(attrs, count);
+ mutex_unlock(&classLock);
+ return YES;
+ }
+ else {
+ // add new
+ old_property_list proplist;
+ proplist.entsize = sizeof(old_property);
+ proplist.count = 1;
+ proplist.first.name = _strdup_internal(name);
+ proplist.first.attributes = copyPropertyAttributeString(attrs, count);
+
+ return _class_addProperties(cls, &proplist);
+ }
+}
+
+BOOL
+class_addProperty(Class cls, const char *name,
+ const objc_property_attribute_t *attrs, unsigned int n)
+{
+ return _class_addProperty(cls, name, attrs, n, NO);
+}
+
+void
+class_replaceProperty(Class cls, const char *name,
+ const objc_property_attribute_t *attrs, unsigned int n)
+{
+ _class_addProperty(cls, name, attrs, n, YES);
+}
+
+
+/***********************************************************************
+* class_copyProtocolList. Returns a heap block containing the
+* protocols implemented by the class, or nil if the class
+* implements no protocols. Caller must free the block.
+* Does not copy any superclass's protocols.
+**********************************************************************/
+Protocol * __unsafe_unretained *
+class_copyProtocolList(Class cls, unsigned int *outCount)
+{
+ old_protocol_list *plist;
+ Protocol **result = nil;
+ unsigned int count = 0;
+ unsigned int p;
+
+ if (!cls) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ mutex_lock(&classLock);
+
+ for (plist = cls->protocols; plist != nil; plist = plist->next) {
+ count += (int)plist->count;
+ }
+
+ if (count > 0) {
+ result = (Protocol **)malloc((count+1) * sizeof(Protocol *));
+
+ for (p = 0, plist = cls->protocols;
+ plist != nil;
+ plist = plist->next)
+ {
+ int i;
+ for (i = 0; i < plist->count; i++) {
+ result[p++] = (Protocol *)plist->list[i];
+ }
+ }
+ result[p] = nil;
+ }
+
+ mutex_unlock(&classLock);
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+/***********************************************************************
+* class_getProperty. Return the named property.
+**********************************************************************/
+objc_property_t class_getProperty(Class cls, const char *name)
+{
+ old_property *result;
+ if (!cls || !name) return nil;
+
+ mutex_lock(&classLock);
+
+ for (result = nil; cls && !result; cls = cls->superclass) {
+ uintptr_t iterator = 0;
+ old_property_list *plist;
+ while ((plist = nextPropertyList(cls, &iterator))) {
+ uint32_t i;
+ for (i = 0; i < plist->count; i++) {
+ old_property *p = property_list_nth(plist, i);
+ if (0 == strcmp(name, p->name)) {
+ result = p;
+ goto done;
+ }
+ }
+ }
+ }
+
+ done:
+ mutex_unlock(&classLock);
+
+ return (objc_property_t)result;
+}
+
+
+/***********************************************************************
+* class_copyPropertyList. Returns a heap block containing the
+* properties declared in the class, or nil if the class
+* declares no properties. Caller must free the block.
+* Does not copy any superclass's properties.
+**********************************************************************/
+objc_property_t *class_copyPropertyList(Class cls, unsigned int *outCount)
+{
+ old_property_list *plist;
+ uintptr_t iterator = 0;
+ old_property **result = nil;
+ unsigned int count = 0;
+ unsigned int p, i;
+
+ if (!cls) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ mutex_lock(&classLock);
+
+ iterator = 0;
+ while ((plist = nextPropertyList(cls, &iterator))) {
+ count += plist->count;
+ }
+
+ if (count > 0) {
+ result = (old_property **)malloc((count+1) * sizeof(old_property *));
+
+ p = 0;
+ iterator = 0;
+ while ((plist = nextPropertyList(cls, &iterator))) {
+ for (i = 0; i < plist->count; i++) {
+ result[p++] = property_list_nth(plist, i);
+ }
+ }
+ result[p] = nil;
+ }
+
+ mutex_unlock(&classLock);
+
+ if (outCount) *outCount = count;
+ return (objc_property_t *)result;
+}
+
+
+/***********************************************************************
+* class_copyMethodList. Returns a heap block containing the
+* methods implemented by the class, or nil if the class
+* implements no methods. Caller must free the block.
+* Does not copy any superclass's methods.
+**********************************************************************/
+Method *class_copyMethodList(Class cls, unsigned int *outCount)
+{
+ old_method_list *mlist;
+ void *iterator = nil;
+ Method *result = nil;
+ unsigned int count = 0;
+ unsigned int m;
+
+ if (!cls) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ mutex_lock(&methodListLock);
+
+ iterator = nil;
+ while ((mlist = nextMethodList(cls, &iterator))) {
+ count += mlist->method_count;
+ }
+
+ if (count > 0) {
+ result = (Method *)malloc((count+1) * sizeof(Method));
+
+ m = 0;
+ iterator = nil;
+ while ((mlist = nextMethodList(cls, &iterator))) {
+ int i;
+ for (i = 0; i < mlist->method_count; i++) {
+ Method aMethod = (Method)&mlist->method_list[i];
+ if (ignoreSelector(method_getName(aMethod))) {
+ count--;
+ continue;
+ }
+ result[m++] = aMethod;
+ }
+ }
+ result[m] = nil;
+ }
+
+ mutex_unlock(&methodListLock);
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+/***********************************************************************
+* class_copyIvarList. Returns a heap block containing the
+* ivars declared in the class, or nil if the class
+* declares no ivars. Caller must free the block.
+* Does not copy any superclass's ivars.
+**********************************************************************/
+Ivar *class_copyIvarList(Class cls, unsigned int *outCount)
+{
+ Ivar *result = nil;
+ unsigned int count = 0;
+ int i;
+
+ if (!cls) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ if (cls->ivars) {
+ count = cls->ivars->ivar_count;
+ }
+
+ if (count > 0) {
+ result = (Ivar *)malloc((count+1) * sizeof(Ivar));
+
+ for (i = 0; i < cls->ivars->ivar_count; i++) {
+ result[i] = (Ivar)&cls->ivars->ivar_list[i];
+ }
+ result[i] = nil;
+ }
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+/***********************************************************************
+* objc_allocateClass.
+**********************************************************************/
+
+void set_superclass(Class cls, Class supercls, BOOL cls_is_new)
+{
+ Class meta = cls->ISA();
+
+ if (supercls) {
+ cls->superclass = supercls;
+ meta->superclass = supercls->ISA();
+ meta->initIsa(supercls->ISA()->ISA());
+
+ // Propagate C++ cdtors from superclass.
+ if (supercls->info & CLS_HAS_CXX_STRUCTORS) {
+ if (cls_is_new) cls->info |= CLS_HAS_CXX_STRUCTORS;
+ else cls->setInfo(CLS_HAS_CXX_STRUCTORS);
+ }
+
+ // Superclass is no longer a leaf for cache flushing
+ if (supercls->info & CLS_LEAF) {
+ supercls->clearInfo(CLS_LEAF);
+ supercls->ISA()->clearInfo(CLS_LEAF);
+ }
+ } else {
+ cls->superclass = Nil; // superclass of root class is nil
+ meta->superclass = cls; // superclass of root metaclass is root class
+ meta->initIsa(meta); // metaclass of root metaclass is root metaclass
+
+ // Root class is never a leaf for cache flushing, because the
+ // root metaclass is a subclass. (This could be optimized, but
+ // is too uncommon to bother.)
+ cls->clearInfo(CLS_LEAF);
+ meta->clearInfo(CLS_LEAF);
+ }
+}
+
+// &UnsetLayout is the default ivar layout during class construction
+static const uint8_t UnsetLayout = 0;
+
+Class objc_initializeClassPair(Class supercls, const char *name, Class cls, Class meta)
+{
+ // Connect to superclasses and metaclasses
+ cls->initIsa(meta);
+ set_superclass(cls, supercls, YES);
+
+ // Set basic info
+ cls->name = _strdup_internal(name);
+ meta->name = _strdup_internal(name);
+ cls->version = 0;
+ meta->version = 7;
+ cls->info = CLS_CLASS | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF;
+ meta->info = CLS_META | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF;
+
+ // Set instance size based on superclass.
+ if (supercls) {
+ cls->instance_size = supercls->instance_size;
+ meta->instance_size = supercls->ISA()->instance_size;
+ } else {
+ cls->instance_size = sizeof(Class); // just an isa
+ meta->instance_size = sizeof(objc_class);
+ }
+
+ // No ivars. No methods. Empty cache. No protocols. No layout. Empty ext.
+ cls->ivars = nil;
+ cls->methodLists = nil;
+ cls->cache = (Cache)&_objc_empty_cache;
+ cls->protocols = nil;
+ cls->ivar_layout = &UnsetLayout;
+ cls->ext = nil;
+ allocateExt(cls);
+ cls->ext->weak_ivar_layout = &UnsetLayout;
+
+ meta->ivars = nil;
+ meta->methodLists = nil;
+ meta->cache = (Cache)&_objc_empty_cache;
+ meta->protocols = nil;
+ meta->ext = nil;
+
+ return cls;
+}
+
+Class objc_allocateClassPair(Class supercls, const char *name,
+ size_t extraBytes)
+{
+ Class cls, meta;
+
+ if (objc_getClass(name)) return nil;
+ // fixme reserve class name against simultaneous allocation
+
+ if (supercls && (supercls->info & CLS_CONSTRUCTING)) {
+ // Can't make subclass of an in-construction class
+ return nil;
+ }
+
+ // Allocate new classes.
+ if (supercls) {
+ cls = _calloc_class(supercls->ISA()->alignedInstanceSize() + extraBytes);
+ meta = _calloc_class(supercls->ISA()->ISA()->alignedInstanceSize() + extraBytes);
+ } else {
+ cls = _calloc_class(sizeof(objc_class) + extraBytes);
+ meta = _calloc_class(sizeof(objc_class) + extraBytes);
+ }
+
+
+ objc_initializeClassPair(supercls, name, cls, meta);
+
+ return cls;
+}
+
+
+void objc_registerClassPair(Class cls)
+{
+ if ((cls->info & CLS_CONSTRUCTED) ||
+ (cls->ISA()->info & CLS_CONSTRUCTED))
+ {
+ _objc_inform("objc_registerClassPair: class '%s' was already "
+ "registered!", cls->name);
+ return;
+ }
+
+ if (!(cls->info & CLS_CONSTRUCTING) ||
+ !(cls->ISA()->info & CLS_CONSTRUCTING))
+ {
+ _objc_inform("objc_registerClassPair: class '%s' was not "
+ "allocated with objc_allocateClassPair!", cls->name);
+ return;
+ }
+
+ if (ISMETA(cls)) {
+ _objc_inform("objc_registerClassPair: class '%s' is a metaclass, "
+ "not a class!", cls->name);
+ return;
+ }
+
+ mutex_lock(&classLock);
+
+ // Build ivar layouts
+ if (UseGC) {
+ if (cls->ivar_layout != &UnsetLayout) {
+ // Class builder already called class_setIvarLayout.
+ }
+ else if (!cls->superclass) {
+ // Root class. Scan conservatively (should be isa ivar only).
+ cls->ivar_layout = nil;
+ }
+ else if (cls->ivars == nil) {
+ // No local ivars. Use superclass's layout.
+ cls->ivar_layout =
+ _ustrdup_internal(cls->superclass->ivar_layout);
+ }
+ else {
+ // Has local ivars. Build layout based on superclass.
+ Class supercls = cls->superclass;
+ const uint8_t *superlayout =
+ class_getIvarLayout(supercls);
+ layout_bitmap bitmap =
+ layout_bitmap_create(superlayout, supercls->instance_size,
+ cls->instance_size, NO);
+ int i;
+ for (i = 0; i < cls->ivars->ivar_count; i++) {
+ old_ivar *iv = &cls->ivars->ivar_list[i];
+ layout_bitmap_set_ivar(bitmap, iv->ivar_type, iv->ivar_offset);
+ }
+ cls->ivar_layout = layout_string_create(bitmap);
+ layout_bitmap_free(bitmap);
+ }
+
+ if (cls->ext->weak_ivar_layout != &UnsetLayout) {
+ // Class builder already called class_setWeakIvarLayout.
+ }
+ else if (!cls->superclass) {
+ // Root class. No weak ivars (should be isa ivar only)
+ cls->ext->weak_ivar_layout = nil;
+ }
+ else if (cls->ivars == nil) {
+ // No local ivars. Use superclass's layout.
+ const uint8_t *weak =
+ class_getWeakIvarLayout(cls->superclass);
+ if (weak) {
+ cls->ext->weak_ivar_layout = _ustrdup_internal(weak);
+ } else {
+ cls->ext->weak_ivar_layout = nil;
+ }
+ }
+ else {
+ // Has local ivars. Build layout based on superclass.
+ // No way to add weak ivars yet.
+ const uint8_t *weak =
+ class_getWeakIvarLayout(cls->superclass);
+ if (weak) {
+ cls->ext->weak_ivar_layout = _ustrdup_internal(weak);
+ } else {
+ cls->ext->weak_ivar_layout = nil;
+ }
+ }
+ }
+
+ // Clear "under construction" bit, set "done constructing" bit
+ cls->info &= ~CLS_CONSTRUCTING;
+ cls->ISA()->info &= ~CLS_CONSTRUCTING;
+ cls->info |= CLS_CONSTRUCTED;
+ cls->ISA()->info |= CLS_CONSTRUCTED;
+
+ NXHashInsertIfAbsent(class_hash, cls);
+ objc_addRegisteredClass(cls);
+ //objc_addRegisteredClass(cls->ISA()); if we ever allocate classes from GC
+
+ mutex_unlock(&classLock);
+}
+
+
+Class objc_duplicateClass(Class original, const char *name, size_t extraBytes)
+{
+ unsigned int count, i;
+ old_method **originalMethods;
+ old_method_list *duplicateMethods;
+ // Don't use sizeof(objc_class) here because
+ // instance_size has historically contained two extra words,
+ // and instance_size is what objc_getIndexedIvars() actually uses.
+ Class duplicate =
+ _calloc_class(original->ISA()->alignedInstanceSize() + extraBytes);
+
+ duplicate->initIsa(original->ISA());
+ duplicate->superclass = original->superclass;
+ duplicate->name = strdup(name);
+ duplicate->version = original->version;
+ duplicate->info = original->info & (CLS_CLASS|CLS_META|CLS_INITIALIZED|CLS_JAVA_HYBRID|CLS_JAVA_CLASS|CLS_HAS_CXX_STRUCTORS|CLS_HAS_LOAD_METHOD);
+ duplicate->instance_size = original->instance_size;
+ duplicate->ivars = original->ivars;
+ // methodLists handled below
+ duplicate->cache = (Cache)&_objc_empty_cache;
+ duplicate->protocols = original->protocols;
+ if (original->info & CLS_EXT) {
+ duplicate->info |= original->info & (CLS_EXT|CLS_NO_PROPERTY_ARRAY);
+ duplicate->ivar_layout = original->ivar_layout;
+ if (original->ext) {
+ duplicate->ext = (old_class_ext *)_malloc_internal(original->ext->size);
+ memcpy(duplicate->ext, original->ext, original->ext->size);
+ } else {
+ duplicate->ext = nil;
+ }
+ }
+
+ // Method lists are deep-copied so they can be stomped.
+ originalMethods = (old_method **)class_copyMethodList(original, &count);
+ if (originalMethods) {
+ duplicateMethods = (old_method_list *)
+ calloc(sizeof(old_method_list) +
+ (count-1)*sizeof(old_method), 1);
+ duplicateMethods->obsolete = fixed_up_method_list;
+ duplicateMethods->method_count = count;
+ for (i = 0; i < count; i++) {
+ duplicateMethods->method_list[i] = *(originalMethods[i]);
+ }
+ duplicate->methodLists = (old_method_list **)duplicateMethods;
+ duplicate->info |= CLS_NO_METHOD_ARRAY;
+ free(originalMethods);
+ }
+
+ mutex_lock(&classLock);
+ NXHashInsert(class_hash, duplicate);
+ objc_addRegisteredClass(duplicate);
+ mutex_unlock(&classLock);
+
+ return duplicate;
+}
+
+
+void objc_disposeClassPair(Class cls)
+{
+ if (!(cls->info & (CLS_CONSTRUCTED|CLS_CONSTRUCTING)) ||
+ !(cls->ISA()->info & (CLS_CONSTRUCTED|CLS_CONSTRUCTING)))
+ {
+ // class not allocated with objc_allocateClassPair
+ // disposing still-unregistered class is OK!
+ _objc_inform("objc_disposeClassPair: class '%s' was not "
+ "allocated with objc_allocateClassPair!", cls->name);
+ return;
+ }
+
+ if (ISMETA(cls)) {
+ _objc_inform("objc_disposeClassPair: class '%s' is a metaclass, "
+ "not a class!", cls->name);
+ return;
+ }
+
+ mutex_lock(&classLock);
+ NXHashRemove(class_hash, cls);
+ objc_removeRegisteredClass(cls);
+ unload_class(cls->ISA());
+ unload_class(cls);
+ mutex_unlock(&classLock);
+}
+
+
+
+/***********************************************************************
+* _class_createInstanceFromZone. Allocate an instance of the
+* specified class with the specified number of bytes for indexed
+* variables, in the specified zone. The isa field is set to the
+* class, C++ default constructors are called, and all other fields are zeroed.
+**********************************************************************/
+id
+_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone)
+{
+ id obj;
+ size_t size;
+
+ // Can't create something for nothing
+ if (!cls) return nil;
+
+ // Allocate and initialize
+ size = cls->alignedInstanceSize() + extraBytes;
+
+ // CF requires all objects be at least 16 bytes.
+ if (size < 16) size = 16;
+
+#if SUPPORT_GC
+ if (UseGC) {
+ obj = (id)auto_zone_allocate_object(gc_zone, size,
+ AUTO_OBJECT_SCANNED, 0, 1);
+ } else
+#endif
+ if (zone) {
+ obj = (id)malloc_zone_calloc((malloc_zone_t *)zone, 1, size);
+ } else {
+ obj = (id)calloc(1, size);
+ }
+ if (!obj) return nil;
+
+ obj->initIsa(cls);
+
+ if (cls->hasCxxCtor()) {
+ obj = _objc_constructOrFree(cls, obj);
+ }
+
+ return obj;
+}
+
+
+/***********************************************************************
+* _class_createInstance. Allocate an instance of the specified
+* class with the specified number of bytes for indexed variables, in
+* the default zone, using _class_createInstanceFromZone.
+**********************************************************************/
+static id _class_createInstance(Class cls, size_t extraBytes)
+{
+ return _class_createInstanceFromZone (cls, extraBytes, nil);
+}
+
+
+static id _object_copyFromZone(id oldObj, size_t extraBytes, void *zone)
+{
+ id obj;
+ size_t size;
+
+ if (!oldObj) return nil;
+
+ obj = (*_zoneAlloc)(oldObj->ISA(), extraBytes, zone);
+ size = oldObj->ISA()->alignedInstanceSize() + extraBytes;
+
+ // fixme need C++ copy constructor
+ objc_memmove_collectable(obj, oldObj, size);
+
+#if SUPPORT_GC
+ if (UseGC) gc_fixup_weakreferences(obj, oldObj);
+#endif
+
+ return obj;
+}
+
+
+/***********************************************************************
+* objc_destructInstance
+* Destroys an instance without freeing memory.
+* Calls C++ destructors.
+* Removes associative references.
+* Returns `obj`. Does nothing if `obj` is nil.
+* Be warned that GC DOES NOT CALL THIS. If you edit this, also edit finalize.
+* CoreFoundation and other clients do call this under GC.
+**********************************************************************/
+void *objc_destructInstance(id obj)
+{
+ if (obj) {
+ Class isa = obj->getIsa();
+
+ if (isa->hasCxxDtor()) {
+ object_cxxDestruct(obj);
+ }
+
+ if (isa->instancesHaveAssociatedObjects()) {
+ _object_remove_assocations(obj);
+ }
+
+ if (!UseGC) objc_clear_deallocating(obj);
+ }
+
+ return obj;
+}
+
+static id
+_object_dispose(id anObject)
+{
+ if (anObject==nil) return nil;
+
+ objc_destructInstance(anObject);
+
+#if SUPPORT_GC
+ if (UseGC) {
+ auto_zone_retain(gc_zone, anObject); // gc free expects rc==1
+ } else
+#endif
+ {
+ // only clobber isa for non-gc
+ anObject->initIsa(_objc_getFreedObjectClass ());
+ }
+ free(anObject);
+ return nil;
+}
+
+static id _object_copy(id oldObj, size_t extraBytes)
+{
+ void *z = malloc_zone_from_ptr(oldObj);
+ return _object_copyFromZone(oldObj, extraBytes,
+ z ? z : malloc_default_zone());
+}
+
+static id _object_reallocFromZone(id anObject, size_t nBytes, void *zone)
+{
+ id newObject;
+ Class tmp;
+
+ if (anObject == nil)
+ __objc_error(nil, "reallocating nil object");
+
+ if (anObject->ISA() == _objc_getFreedObjectClass ())
+ __objc_error(anObject, "reallocating freed object");
+
+ if (nBytes < anObject->ISA()->alignedInstanceSize())
+ __objc_error(anObject, "(%s, %zu) requested size too small",
+ object_getClassName(anObject), nBytes);
+
+ // fixme need C++ copy constructor
+ // fixme GC copy
+ // Make sure not to modify space that has been declared free
+ tmp = anObject->ISA();
+ anObject->initIsa(_objc_getFreedObjectClass ());
+ newObject = (id)malloc_zone_realloc((malloc_zone_t *)zone, anObject, nBytes);
+ if (newObject) {
+ newObject->initIsa(tmp);
+ } else {
+ // realloc failed, anObject is still alive
+ anObject->initIsa(tmp);
+ }
+ return newObject;
+}
+
+
+static id _object_realloc(id anObject, size_t nBytes)
+{
+ void *z = malloc_zone_from_ptr(anObject);
+ return _object_reallocFromZone(anObject,
+ nBytes,
+ z ? z : malloc_default_zone());
+}
+
+id (*_alloc)(Class, size_t) = _class_createInstance;
+id (*_copy)(id, size_t) = _object_copy;
+id (*_realloc)(id, size_t) = _object_realloc;
+id (*_dealloc)(id) = _object_dispose;
+id (*_zoneAlloc)(Class, size_t, void *) = _class_createInstanceFromZone;
+id (*_zoneCopy)(id, size_t, void *) = _object_copyFromZone;
+id (*_zoneRealloc)(id, size_t, void *) = _object_reallocFromZone;
+void (*_error)(id, const char *, va_list) = _objc_error;
+
+
+id class_createInstance(Class cls, size_t extraBytes)
+{
+ if (UseGC) {
+ return _class_createInstance(cls, extraBytes);
+ } else {
+ return (*_alloc)(cls, extraBytes);
+ }
+}
+
+id class_createInstanceFromZone(Class cls, size_t extraBytes, void *z)
+{
+ OBJC_WARN_DEPRECATED;
+ if (UseGC) {
+ return _class_createInstanceFromZone(cls, extraBytes, z);
+ } else {
+ return (*_zoneAlloc)(cls, extraBytes, z);
+ }
+}
+
+unsigned class_createInstances(Class cls, size_t extraBytes,
+ id *results, unsigned num_requested)
+{
+ if (UseGC || _alloc == &_class_createInstance) {
+ return _class_createInstancesFromZone(cls, extraBytes, nil,
+ results, num_requested);
+ } else {
+ // _alloc in use, which isn't understood by the batch allocator
+ return 0;
+ }
+}
+
+id object_copy(id obj, size_t extraBytes)
+{
+ if (UseGC) return _object_copy(obj, extraBytes);
+ else return (*_copy)(obj, extraBytes);
+}
+
+id object_copyFromZone(id obj, size_t extraBytes, void *z)
+{
+ OBJC_WARN_DEPRECATED;
+ if (UseGC) return _object_copyFromZone(obj, extraBytes, z);
+ else return (*_zoneCopy)(obj, extraBytes, z);
+}
+
+id object_dispose(id obj)
+{
+ if (UseGC) return _object_dispose(obj);
+ else return (*_dealloc)(obj);
+}
+
+id object_realloc(id obj, size_t nBytes)
+{
+ OBJC_WARN_DEPRECATED;
+ if (UseGC) return _object_realloc(obj, nBytes);
+ else return (*_realloc)(obj, nBytes);
+}
+
+id object_reallocFromZone(id obj, size_t nBytes, void *z)
+{
+ OBJC_WARN_DEPRECATED;
+ if (UseGC) return _object_reallocFromZone(obj, nBytes, z);
+ else return (*_zoneRealloc)(obj, nBytes, z);
+}
+
+
+// ProKit SPI
+Class class_setSuperclass(Class cls, Class newSuper)
+{
+ Class oldSuper = cls->superclass;
+ set_superclass(cls, newSuper, NO);
+ flush_caches(cls, YES);
+ return oldSuper;
+}
+#endif
* Lazy method list arrays and method list locking (2004-10-19)
*
* cls->methodLists may be in one of three forms:
- * 1. NULL: The class has no methods.
- * 2. non-NULL, with CLS_NO_METHOD_ARRAY set: cls->methodLists points
+ * 1. nil: The class has no methods.
+ * 2. non-nil, with CLS_NO_METHOD_ARRAY set: cls->methodLists points
* to a single method list, which is the class's only method list.
- * 3. non-NULL, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to
+ * 3. non-nil, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to
* an array of method list pointers. The end of the array's block
* is set to -1. If the actual number of method lists is smaller
- * than that, the rest of the array is NULL.
+ * than that, the rest of the array is nil.
*
* Attaching categories and adding and removing classes may change
* the form of the class list. In addition, individual method lists
* synchronized to prevent races.
*
* Three thread-safe modification functions are provided:
- * _class_setInfo() // atomically sets some bits
- * _class_clearInfo() // atomically clears some bits
- * _class_changeInfo() // atomically sets some bits and clears others
+ * cls->setInfo() // atomically sets some bits
+ * cls->clearInfo() // atomically clears some bits
+ * cls->changeInfo() // atomically sets some bits and clears others
* These replace CLS_SETINFO() for the multithreaded cases.
*
* Three modification windows are defined:
OBJC_EXPORT id (*_zoneCopy)(id, size_t, void *);
-/***********************************************************************
-* Function prototypes internal to this module.
-**********************************************************************/
-
-static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
-static Method look_up_method(Class cls, SEL sel, BOOL withCache, BOOL withResolver);
-
-
-/***********************************************************************
-* Static data internal to this module.
-**********************************************************************/
-
-#if !TARGET_OS_WIN32 && !defined(__arm__)
-# define MESSAGE_LOGGING
-#endif
-
-#if defined(MESSAGE_LOGGING)
-// Method call logging
-static int LogObjCMessageSend (BOOL isClassMethod, const char * objectsClass, const char * implementingClass, SEL selector);
-typedef int (*ObjCLogProc)(BOOL, const char *, const char *, SEL);
-
-static int objcMsgLogFD = (-1);
-static ObjCLogProc objcMsgLogProc = &LogObjCMessageSend;
-static int objcMsgLogEnabled = 0;
-#endif
-
-
/***********************************************************************
* Information about multi-thread support:
*
**********************************************************************/
Class object_getClass(id obj)
{
- return _object_getClass(obj);
+ if (obj) return obj->getIsa();
+ else return Nil;
}
**********************************************************************/
Class object_setClass(id obj, Class cls)
{
- if (obj) {
- Class old;
- do {
- old = obj->isa;
- } while (! OSAtomicCompareAndSwapPtrBarrier(old, cls, (void * volatile *)&obj->isa));
-
- if (old && _class_instancesHaveAssociatedObjects(old)) {
- _class_setInstancesHaveAssociatedObjects(cls);
- }
-
- return old;
- }
+ if (obj) return obj->changeIsa(cls);
else return Nil;
}
**********************************************************************/
const char *object_getClassName(id obj)
{
- Class isa = _object_getClass(obj);
- if (isa) return _class_getName(isa);
- else return "nil";
+ return class_getName(obj ? obj->getIsa() : nil);
+}
+
+
+/***********************************************************************
+ * object_getMethodImplementation.
+ **********************************************************************/
+IMP object_getMethodImplementation(id obj, SEL name)
+{
+ Class cls = (obj ? obj->getIsa() : nil);
+ return class_getMethodImplementation(cls, name);
+}
+
+
+/***********************************************************************
+ * object_getMethodImplementation_stret.
+ **********************************************************************/
+IMP object_getMethodImplementation_stret(id obj, SEL name)
+{
+ Class cls = (obj ? obj->getIsa() : nil);
+ return class_getMethodImplementation_stret(cls, name);
}
+
/***********************************************************************
* object_getIndexedIvars.
**********************************************************************/
void *object_getIndexedIvars(id obj)
{
// ivars are tacked onto the end of the object
- if (obj) return ((char *) obj) + _class_getInstanceSize(_object_getClass(obj));
- else return NULL;
+ if (!obj) return nil;
+ if (obj->isTaggedPointer()) return nil;
+ return ((char *) obj) + obj->ISA()->alignedInstanceSize();
}
Ivar object_setInstanceVariable(id obj, const char *name, void *value)
{
- Ivar ivar = NULL;
+ Ivar ivar = nil;
- if (obj && name) {
- if ((ivar = class_getInstanceVariable(_object_getClass(obj), name))) {
+ if (obj && name && !obj->isTaggedPointer()) {
+ if ((ivar = class_getInstanceVariable(obj->ISA(), name))) {
object_setIvar(obj, ivar, (id)value);
}
}
Ivar object_getInstanceVariable(id obj, const char *name, void **value)
{
- if (obj && name) {
+ if (obj && name && !obj->isTaggedPointer()) {
Ivar ivar;
- if ((ivar = class_getInstanceVariable(_object_getClass(obj), name))) {
+ if ((ivar = class_getInstanceVariable(obj->ISA(), name))) {
if (value) *value = (void *)object_getIvar(obj, ivar);
return ivar;
}
}
- if (value) *value = NULL;
- return NULL;
+ if (value) *value = nil;
+ return nil;
}
static BOOL is_scanned_offset(ptrdiff_t ivar_offset, const uint8_t *layout) {
// FIXME: this could be optimized.
static Class _ivar_getClass(Class cls, Ivar ivar) {
- Class ivar_class = NULL;
+ Class ivar_class = nil;
const char *ivar_name = ivar_getName(ivar);
Ivar named_ivar = _class_getVariable(cls, ivar_name, &ivar_class);
if (named_ivar) {
// the same ivar name can appear multiple times along the superclass chain.
- while (named_ivar != ivar && ivar_class != NULL) {
- ivar_class = class_getSuperclass(ivar_class);
+ while (named_ivar != ivar && ivar_class != nil) {
+ ivar_class = ivar_class->superclass;
named_ivar = _class_getVariable(cls, ivar_getName(ivar), &ivar_class);
}
}
void object_setIvar(id obj, Ivar ivar, id value)
{
- if (obj && ivar) {
- Class cls = _ivar_getClass(object_getClass(obj), ivar);
+ if (obj && ivar && !obj->isTaggedPointer()) {
+ Class cls = _ivar_getClass(obj->ISA(), ivar);
ptrdiff_t ivar_offset = ivar_getOffset(ivar);
id *location = (id *)((char *)obj + ivar_offset);
// if this ivar is a member of an ARR compiled class, then issue the correct barrier according to the layout.
objc_assign_weak(value, location);
}
}
- objc_assign_ivar_internal(value, obj, ivar_offset);
+ objc_assign_ivar(value, obj, ivar_offset);
#else
*location = value;
#endif
id object_getIvar(id obj, Ivar ivar)
{
- if (obj && ivar) {
- Class cls = _object_getClass(obj);
+ if (obj && ivar && !obj->isTaggedPointer()) {
+ Class cls = obj->ISA();
ptrdiff_t ivar_offset = ivar_getOffset(ivar);
if (_class_usesAutomaticRetainRelease(cls)) {
// for ARR, layout strings are relative to the instance start.
#endif
return *idx;
}
- return NULL;
+ return nil;
}
// Call cls's dtor first, then superclasses's dtors.
- for ( ; cls != NULL; cls = _class_getSuperclass(cls)) {
- if (!_class_hasCxxStructors(cls)) return;
+ for ( ; cls; cls = cls->superclass) {
+ if (!cls->hasCxxDtor()) return;
dtor = (void(*)(id))
lookupMethodInClassAndLoadCache(cls, SEL_cxx_destruct);
- if (dtor != (void(*)(id))_objc_msgForward_internal) {
+ if (dtor != (void(*)(id))_objc_msgForward_impcache) {
if (PrintCxxCtors) {
_objc_inform("CXX: calling C++ destructors for class %s",
- _class_getName(cls));
+ cls->getName());
}
(*dtor)(obj);
}
void object_cxxDestruct(id obj)
{
if (!obj) return;
- if (OBJC_IS_TAGGED_PTR(obj)) return;
- object_cxxDestructFromClass(obj, obj->isa); // need not be object_getClass
+ if (obj->isTaggedPointer()) return;
+ object_cxxDestructFromClass(obj, obj->ISA());
}
Class supercls;
// Stop if neither this class nor any superclass has ctors.
- if (!_class_hasCxxStructors(cls)) return YES; // no ctor - ok
+ if (!cls->hasCxxCtor()) return YES; // no ctor - ok
- supercls = _class_getSuperclass(cls);
+ supercls = cls->superclass;
// Call superclasses' ctors first, if any.
if (supercls) {
// Find this class's ctor, if any.
ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, SEL_cxx_construct);
- if (ctor == (id(*)(id))_objc_msgForward_internal) return YES; // no ctor - ok
+ if (ctor == (id(*)(id))_objc_msgForward_impcache) return YES; // no ctor - ok
// Call this class's ctor.
if (PrintCxxCtors) {
- _objc_inform("CXX: calling C++ constructors for class %s", _class_getName(cls));
+ _objc_inform("CXX: calling C++ constructors for class %s", cls->getName());
}
if ((*ctor)(obj)) return YES; // ctor called and succeeded - ok
BOOL object_cxxConstruct(id obj)
{
if (!obj) return YES;
- if (OBJC_IS_TAGGED_PTR(obj)) return YES;
- return object_cxxConstructFromClass(obj, obj->isa); // need not be object_getClass
+ if (obj->isTaggedPointer()) return YES;
+ return object_cxxConstructFromClass(obj, obj->ISA());
}
/***********************************************************************
* _class_resolveClassMethod
-* Call +resolveClassMethod and return the method added or NULL.
+* Call +resolveClassMethod, looking for a method to be added to class cls.
* cls should be a metaclass.
-* Assumes the method doesn't exist already.
+* Does not check if the method already exists.
**********************************************************************/
-static Method _class_resolveClassMethod(Class cls, SEL sel)
+static void _class_resolveClassMethod(Class cls, SEL sel, id inst)
{
- BOOL resolved;
- Method meth = NULL;
- Class clsInstance;
+ assert(cls->isMetaClass());
- if (!look_up_method(cls, SEL_resolveClassMethod,
- YES /*cache*/, NO /*resolver*/))
+ if (! lookUpImpOrNil(cls, SEL_resolveClassMethod, inst,
+ NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
{
- return NULL;
- }
-
- // GrP fixme same hack as +initialize
- if (strncmp(_class_getName(cls), "_%", 2) == 0) {
- // Posee's meta's name is smashed and isn't in the class_hash,
- // so objc_getClass doesn't work.
- const char *baseName = strchr(_class_getName(cls), '%'); // get posee's real name
- clsInstance = (Class)objc_getClass(baseName);
- } else {
- clsInstance = (Class)objc_getClass(_class_getName(cls));
+ // Resolver not implemented.
+ return;
}
-
- resolved = ((BOOL(*)(id, SEL, SEL))objc_msgSend)((id)clsInstance, SEL_resolveClassMethod, sel);
- if (resolved) {
- // +resolveClassMethod adds to self->isa
- meth = look_up_method(cls, sel, YES/*cache*/, NO/*resolver*/);
-
- if (!meth) {
+ BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
+ BOOL resolved = msg(_class_getNonMetaClass(cls, inst),
+ SEL_resolveClassMethod, sel);
+
+ // Cache the result (good or bad) so the resolver doesn't fire next time.
+ // +resolveClassMethod adds to self->ISA() a.k.a. cls
+ IMP imp = lookUpImpOrNil(cls, sel, inst,
+ NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
+
+ if (resolved && PrintResolving) {
+ if (imp) {
+ _objc_inform("RESOLVE: method %c[%s %s] "
+ "dynamically resolved to %p",
+ cls->isMetaClass() ? '+' : '-',
+ cls->getName(), sel_getName(sel), imp);
+ }
+ else {
// Method resolver didn't add anything?
- _objc_inform("+[%s resolveClassMethod:%s] returned YES, but "
- "no new implementation of +[%s %s] was found",
- class_getName(cls),
- sel_getName(sel),
- class_getName(cls),
- sel_getName(sel));
- return NULL;
+ _objc_inform("RESOLVE: +[%s resolveClassMethod:%s] returned YES"
+ ", but no new implementation of %c[%s %s] was found",
+ cls->getName(), sel_getName(sel),
+ cls->isMetaClass() ? '+' : '-',
+ cls->getName(), sel_getName(sel));
}
}
-
- return meth;
}
/***********************************************************************
* _class_resolveInstanceMethod
-* Call +resolveInstanceMethod and return the method added or NULL.
-* cls should be a non-meta class.
-* Assumes the method doesn't exist already.
+* Call +resolveInstanceMethod, looking for a method to be added to class cls.
+* cls may be a metaclass or a non-meta class.
+* Does not check if the method already exists.
**********************************************************************/
-static Method _class_resolveInstanceMethod(Class cls, SEL sel)
+static void _class_resolveInstanceMethod(Class cls, SEL sel, id inst)
{
- BOOL resolved;
- Method meth = NULL;
-
- if (!look_up_method(((id)cls)->isa, SEL_resolveInstanceMethod,
- YES /*cache*/, NO /*resolver*/))
+ if (! lookUpImpOrNil(cls->ISA(), SEL_resolveInstanceMethod, cls,
+ NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
{
- return NULL;
+ // Resolver not implemented.
+ return;
}
- resolved = ((BOOL(*)(id, SEL, SEL))objc_msgSend)((id)cls, SEL_resolveInstanceMethod, sel);
+ BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
+ BOOL resolved = msg(cls, SEL_resolveInstanceMethod, sel);
- if (resolved) {
- // +resolveClassMethod adds to self
- meth = look_up_method(cls, sel, YES/*cache*/, NO/*resolver*/);
+ // Cache the result (good or bad) so the resolver doesn't fire next time.
+ // +resolveInstanceMethod adds to self a.k.a. cls
+ IMP imp = lookUpImpOrNil(cls, sel, inst,
+ NO/*initialize*/, YES/*cache*/, NO/*resolver*/);
- if (!meth) {
+ if (resolved && PrintResolving) {
+ if (imp) {
+ _objc_inform("RESOLVE: method %c[%s %s] "
+ "dynamically resolved to %p",
+ cls->isMetaClass() ? '+' : '-',
+ cls->getName(), sel_getName(sel), imp);
+ }
+ else {
// Method resolver didn't add anything?
- _objc_inform("+[%s resolveInstanceMethod:%s] returned YES, but "
- "no new implementation of %c[%s %s] was found",
- class_getName(cls),
- sel_getName(sel),
- class_isMetaClass(cls) ? '+' : '-',
- class_getName(cls),
- sel_getName(sel));
- return NULL;
+ _objc_inform("RESOLVE: +[%s resolveInstanceMethod:%s] returned YES"
+ ", but no new implementation of %c[%s %s] was found",
+ cls->getName(), sel_getName(sel),
+ cls->isMetaClass() ? '+' : '-',
+ cls->getName(), sel_getName(sel));
}
}
-
- return meth;
}
/***********************************************************************
* _class_resolveMethod
-* Call +resolveClassMethod or +resolveInstanceMethod and return
-* the method added or NULL.
-* Assumes the method doesn't exist already.
-**********************************************************************/
-Method _class_resolveMethod(Class cls, SEL sel)
-{
- Method meth = NULL;
-
- if (_class_isMetaClass(cls)) {
- meth = _class_resolveClassMethod(cls, sel);
- }
- if (!meth) {
- meth = _class_resolveInstanceMethod(cls, sel);
- }
-
- if (PrintResolving && meth) {
- _objc_inform("RESOLVE: method %c[%s %s] dynamically resolved to %p",
- class_isMetaClass(cls) ? '+' : '-',
- class_getName(cls), sel_getName(sel),
- method_getImplementation(meth));
- }
-
- return meth;
-}
-
-
-/***********************************************************************
-* look_up_method
-* Look up a method in the given class and its superclasses.
-* If withCache==YES, look in the class's method cache too.
-* If withResolver==YES, call +resolveClass/InstanceMethod too.
-* Returns NULL if the method is not found.
-* +forward:: entries are not returned.
+* Call +resolveClassMethod or +resolveInstanceMethod.
+* Returns nothing; any result would be potentially out-of-date already.
+* Does not check if the method already exists.
**********************************************************************/
-static Method look_up_method(Class cls, SEL sel,
- BOOL withCache, BOOL withResolver)
+void _class_resolveMethod(Class cls, SEL sel, id inst)
{
- Method meth = NULL;
-
- if (withCache) {
- meth = _cache_getMethod(cls, sel, _objc_msgForward_internal);
- if (meth == (Method)1) {
- // Cache contains forward:: . Stop searching.
- return NULL;
+ if (! cls->isMetaClass()) {
+ // try [cls resolveInstanceMethod:sel]
+ _class_resolveInstanceMethod(cls, sel, inst);
+ }
+ else {
+ // try [nonMetaClass resolveClassMethod:sel]
+ // and [cls resolveInstanceMethod:sel]
+ _class_resolveClassMethod(cls, sel, inst);
+ if (!lookUpImpOrNil(cls, sel, inst,
+ NO/*initialize*/, YES/*cache*/, NO/*resolver*/))
+ {
+ _class_resolveInstanceMethod(cls, sel, inst);
}
}
-
- if (!meth) meth = _class_getMethod(cls, sel);
-
- if (!meth && withResolver) meth = _class_resolveMethod(cls, sel);
-
- return meth;
}
-/***********************************************************************
-* class_getInstanceMethod. Return the instance method for the
-* specified class and selector.
-**********************************************************************/
-Method class_getInstanceMethod(Class cls, SEL sel)
-{
- if (!cls || !sel) return NULL;
-
- return look_up_method(cls, sel, YES/*cache*/, YES/*resolver*/);
-}
-
/***********************************************************************
* class_getClassMethod. Return the class method for the specified
* class and selector.
**********************************************************************/
Method class_getClassMethod(Class cls, SEL sel)
{
- if (!cls || !sel) return NULL;
+ if (!cls || !sel) return nil;
- return class_getInstanceMethod(_class_getMeta(cls), sel);
+ return class_getInstanceMethod(cls->getMeta(), sel);
}
**********************************************************************/
Ivar class_getInstanceVariable(Class cls, const char *name)
{
- if (!cls || !name) return NULL;
+ if (!cls || !name) return nil;
- return _class_getVariable(cls, name, NULL);
+ return _class_getVariable(cls, name, nil);
}
**********************************************************************/
Ivar class_getClassVariable(Class cls, const char *name)
{
- if (!cls) return NULL;
+ if (!cls) return nil;
- return class_getInstanceVariable(((id)cls)->isa, name);
+ return class_getInstanceVariable(cls->ISA(), name);
}
);
-/***********************************************************************
-* _objc_flush_caches. Flush the caches of the specified class and any
-* of its subclasses. If cls is a meta-class, only meta-class (i.e.
-* class method) caches are flushed. If cls is an instance-class, both
-* instance-class and meta-class caches are flushed.
-**********************************************************************/
-void _objc_flush_caches(Class cls)
-{
- flush_caches (cls, YES);
-
- if (!cls) {
- // collectALot if cls==nil
- mutex_lock(&cacheUpdateLock);
- _cache_collect(true);
- mutex_unlock(&cacheUpdateLock);
- }
-}
-
-
/***********************************************************************
* class_respondsToSelector.
**********************************************************************/
// Avoids +initialize because it historically did so.
// We're not returning a callable IMP anyway.
- imp = lookUpMethod(cls, sel, NO/*initialize*/, YES/*cache*/, nil);
- return (imp != (IMP)_objc_msgForward_internal) ? YES : NO;
+ imp = lookUpImpOrNil(cls, sel, nil,
+ NO/*initialize*/, YES/*cache*/, YES/*resolver*/);
+ return imp ? YES : NO;
}
// No one responds to zero!
if (!sel) {
- __objc_error((id)cls, "invalid selector (null)");
+ __objc_error(cls, "invalid selector (null)");
}
return class_getMethodImplementation(cls, sel);
{
IMP imp;
- if (!cls || !sel) return NULL;
+ if (!cls || !sel) return nil;
- imp = lookUpMethod(cls, sel, YES/*initialize*/, YES/*cache*/, nil);
+ imp = lookUpImpOrNil(cls, sel, nil,
+ YES/*initialize*/, YES/*cache*/, YES/*resolver*/);
// Translate forwarding function to C-callable external version
- if (imp == _objc_msgForward_internal) {
+ if (!imp) {
return _objc_msgForward;
}
/***********************************************************************
-* instrumentObjcMessageSends/logObjcMessageSends.
+* instrumentObjcMessageSends
**********************************************************************/
-#if !defined(MESSAGE_LOGGING) && defined(__arm__)
-void instrumentObjcMessageSends (BOOL flag)
+#if !SUPPORT_MESSAGE_LOGGING
+
+void instrumentObjcMessageSends(BOOL flag)
{
}
-#elif defined(MESSAGE_LOGGING)
-static int LogObjCMessageSend (BOOL isClassMethod,
- const char * objectsClass,
- const char * implementingClass,
- SEL selector)
+
+#else
+
+bool objcMsgLogEnabled = false;
+static int objcMsgLogFD = -1;
+
+bool logMessageSend(bool isClassMethod,
+ const char *objectsClass,
+ const char *implementingClass,
+ SEL selector)
{
char buf[ 1024 ];
objcMsgLogFD = secure_open (buf, O_WRONLY | O_CREAT, geteuid());
if (objcMsgLogFD < 0) {
// no log file - disable logging
- objcMsgLogEnabled = 0;
+ objcMsgLogEnabled = false;
objcMsgLogFD = -1;
- return 1;
+ return true;
}
}
implementingClass,
sel_getName(selector));
- static OSSpinLock lock = OS_SPINLOCK_INIT;
- OSSpinLockLock(&lock);
+ static spinlock_t lock = SPINLOCK_INITIALIZER;
+ spinlock_lock(&lock);
write (objcMsgLogFD, buf, strlen(buf));
- OSSpinLockUnlock(&lock);
+ spinlock_unlock(&lock);
// Tell caller to not cache the method
- return 0;
+ return false;
}
-void instrumentObjcMessageSends (BOOL flag)
+void instrumentObjcMessageSends(BOOL flag)
{
- int enabledValue = (flag) ? 1 : 0;
+ bool enable = flag;
// Shortcut NOP
- if (objcMsgLogEnabled == enabledValue)
+ if (objcMsgLogEnabled == enable)
return;
// If enabling, flush all method caches so we get some traces
- if (flag)
- flush_caches (Nil, YES);
+ if (enable)
+ _objc_flush_caches(Nil);
// Sync our log file
- if (objcMsgLogFD != (-1))
+ if (objcMsgLogFD != -1)
fsync (objcMsgLogFD);
- objcMsgLogEnabled = enabledValue;
+ objcMsgLogEnabled = enable;
}
-void logObjcMessageSends (ObjCLogProc logProc)
-{
- if (logProc)
- {
- objcMsgLogProc = logProc;
- objcMsgLogEnabled = 1;
- }
- else
- {
- objcMsgLogProc = logProc;
- objcMsgLogEnabled = 0;
- }
-
- if (objcMsgLogFD != (-1))
- fsync (objcMsgLogFD);
-}
-#endif
-
-/***********************************************************************
-* log_and_fill_cache
-* Log this method call. If the logger permits it, fill the method cache.
-* cls is the method whose cache should be filled.
-* implementer is the class that owns the implementation in question.
-**********************************************************************/
-void
-log_and_fill_cache(Class cls, Class implementer, Method meth, SEL sel)
-{
-#if defined(MESSAGE_LOGGING)
- BOOL cacheIt = YES;
-
- if (objcMsgLogEnabled) {
- cacheIt = objcMsgLogProc (_class_isMetaClass(implementer) ? YES : NO,
- _class_getName(cls),
- _class_getName(implementer),
- sel);
- }
- if (cacheIt)
+// SUPPORT_MESSAGE_LOGGING
#endif
- _cache_fill (cls, meth, sel);
-}
-
-
-/***********************************************************************
-* _class_lookupMethodAndLoadCache.
-* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpMethod().
-* This lookup avoids optimistic cache scan because the dispatcher
-* already tried that.
-**********************************************************************/
-IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls)
-{
- return lookUpMethod(cls, sel, YES/*initialize*/, NO/*cache*/, obj);
-}
-
-
-/***********************************************************************
-* lookUpMethod.
-* The standard method lookup.
-* initialize==NO tries to avoid +initialize (but sometimes fails)
-* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere)
-* Most callers should use initialize==YES and cache==YES.
-* inst is an instance of cls or a subclass thereof, or nil if none is known.
-* If cls is an un-initialized metaclass then a non-nil inst is faster.
-* May return _objc_msgForward_internal. IMPs destined for external use
-* must be converted to _objc_msgForward or _objc_msgForward_stret.
-**********************************************************************/
-IMP lookUpMethod(Class cls, SEL sel, BOOL initialize, BOOL cache, id inst)
-{
- Class curClass;
- IMP methodPC = NULL;
- Method meth;
- BOOL triedResolver = NO;
-
- // Optimistic cache lookup
- if (cache) {
- methodPC = _cache_getImp(cls, sel);
- if (methodPC) return methodPC;
- }
-
- // realize, +initialize, and any special early exit
- methodPC = prepareForMethodLookup(cls, sel, initialize, inst);
- if (methodPC) return methodPC;
-
-
- // The lock is held to make method-lookup + cache-fill atomic
- // with respect to method addition. Otherwise, a category could
- // be added but ignored indefinitely because the cache was re-filled
- // with the old value after the cache flush on behalf of the category.
- retry:
- lockForMethodLookup();
-
- // Ignore GC selectors
- if (ignoreSelector(sel)) {
- methodPC = _cache_addIgnoredEntry(cls, sel);
- goto done;
- }
-
- // Try this class's cache.
-
- methodPC = _cache_getImp(cls, sel);
- if (methodPC) goto done;
-
- // Try this class's method lists.
-
- meth = _class_getMethodNoSuper_nolock(cls, sel);
- if (meth) {
- log_and_fill_cache(cls, cls, meth, sel);
- methodPC = method_getImplementation(meth);
- goto done;
- }
-
- // Try superclass caches and method lists.
-
- curClass = cls;
- while ((curClass = _class_getSuperclass(curClass))) {
- // Superclass cache.
- meth = _cache_getMethod(curClass, sel, _objc_msgForward_internal);
- if (meth) {
- if (meth != (Method)1) {
- // Found the method in a superclass. Cache it in this class.
- log_and_fill_cache(cls, curClass, meth, sel);
- methodPC = method_getImplementation(meth);
- goto done;
- }
- else {
- // Found a forward:: entry in a superclass.
- // Stop searching, but don't cache yet; call method
- // resolver for this class first.
- break;
- }
- }
-
- // Superclass method list.
- meth = _class_getMethodNoSuper_nolock(curClass, sel);
- if (meth) {
- log_and_fill_cache(cls, curClass, meth, sel);
- methodPC = method_getImplementation(meth);
- goto done;
- }
- }
-
- // No implementation found. Try method resolver once.
-
- if (!triedResolver) {
- unlockForMethodLookup();
- _class_resolveMethod(cls, sel);
- // Don't cache the result; we don't hold the lock so it may have
- // changed already. Re-do the search from scratch instead.
- triedResolver = YES;
- goto retry;
- }
-
- // No implementation found, and method resolver didn't help.
- // Use forwarding.
-
- _cache_addForwardEntry(cls, sel);
- methodPC = _objc_msgForward_internal;
-
- done:
- unlockForMethodLookup();
-
- // paranoia: look for ignored selectors with non-ignored implementations
- assert(!(ignoreSelector(sel) && methodPC != (IMP)&_objc_ignored_method));
-
- return methodPC;
-}
-
-
-/***********************************************************************
-* lookupMethodInClassAndLoadCache.
-* Like _class_lookupMethodAndLoadCache, but does not search superclasses.
-* Caches and returns objc_msgForward if the method is not found in the class.
-**********************************************************************/
-static IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
-{
- Method meth;
- IMP imp;
-
- // fixme this still has the method list vs method cache race
- // because it doesn't hold a lock across lookup+cache_fill,
- // but it's only used for .cxx_construct/destruct and we assume
- // categories don't change them.
-
- // Search cache first.
- imp = _cache_getImp(cls, sel);
- if (imp) return imp;
-
- // Cache miss. Search method list.
-
- meth = _class_getMethodNoSuper(cls, sel);
-
- if (meth) {
- // Hit in method list. Cache it.
- _cache_fill(cls, meth, sel);
- return method_getImplementation(meth);
- } else {
- // Miss in method list. Cache objc_msgForward.
- _cache_addForwardEntry(cls, sel);
- return _objc_msgForward_internal;
- }
-}
/***********************************************************************
{
size_t len;
char *dup;
- if (!str) return NULL;
+ if (!str) return nil;
len = strlen(str);
dup = (char *)malloc_zone_malloc(_objc_internal_zone(), len + 1);
memcpy(dup, str, len + 1);
const char *class_getName(Class cls)
{
- return _class_getName(cls);
+ if (!cls) return "nil";
+ else return cls->getName();
}
Class class_getSuperclass(Class cls)
{
- return _class_getSuperclass(cls);
+ if (!cls) return nil;
+ return cls->superclass;
}
BOOL class_isMetaClass(Class cls)
{
- return _class_isMetaClass(cls);
+ if (!cls) return NO;
+ return cls->isMetaClass();
}
size_t class_getInstanceSize(Class cls)
{
- return _class_getInstanceSize(cls);
+ if (!cls) return 0;
+ return cls->alignedInstanceSize();
}
* well-aligned zero-filled memory.
* The new object's isa is set. Any C++ constructors are called.
* Returns `bytes` if successful. Returns nil if `cls` or `bytes` is
-* NULL, or if C++ constructors fail.
+* nil, or if C++ constructors fail.
* Note: class_createInstance() and class_createInstances() preflight this.
**********************************************************************/
static id
id obj = (id)bytes;
// Set the isa pointer
- obj->isa = cls; // need not be object_setClass
+ obj->initIsa(cls);
// Call C++ constructors, if any.
if (!object_cxxConstruct(obj)) {
unsigned num_allocated;
if (!cls) return 0;
- size_t size = _class_getInstanceSize(cls) + extraBytes;
+ size_t size = cls->alignedInstanceSize() + extraBytes;
// CF requires all objects be at least 16 bytes.
if (size < 16) size = 16;
unsigned shift = 0;
unsigned i;
- BOOL ctor = _class_hasCxxStructors(cls);
+ bool ctor = cls->hasCxxCtor();
for (i = 0; i < num_allocated; i++) {
id obj = results[i];
if (ctor) obj = _objc_constructOrFree(cls, obj);
- else if (obj) obj->isa = cls; // need not be object_setClass
+ else if (obj) obj->initIsa(cls);
if (obj) {
results[i-shift] = obj;
#endif
}
-#if SUPPORT_TAGGED_POINTERS
-/***********************************************************************
- * _objc_insert_tagged_isa
- * Insert an isa into a particular slot in the tagged isa table.
- * Will error & abort if slot already has an isa that is different.
- **********************************************************************/
-void _objc_insert_tagged_isa(unsigned char slotNumber, Class isa) {
- unsigned char actualSlotNumber = (slotNumber << 1) + 1;
- Class previousIsa = _objc_tagged_isa_table[actualSlotNumber];
-
- if (actualSlotNumber & 0xF0) {
- _objc_fatal("%s -- Slot number %uc is too large. Aborting.", __FUNCTION__, slotNumber);
- }
-
- if (actualSlotNumber == 0) {
- _objc_fatal("%s -- Slot number 0 doesn't make sense. Aborting.", __FUNCTION__);
- }
-
- if (isa && previousIsa && (previousIsa != isa)) {
- _objc_fatal("%s -- Tagged pointer table already had an item in that slot (%s). "
- "Not putting (%s) in table. Aborting instead",
- __FUNCTION__, class_getName(previousIsa), class_getName(isa));
- }
- _objc_tagged_isa_table[actualSlotNumber] = isa;
-}
-#endif
-
const char *
copyPropertyAttributeString(const objc_property_attribute_t *attrs,
{
if (!attrs) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
// Result size:
if (attrcount == 0) {
free(result);
- result = NULL;
+ result = nil;
}
if (outCount) *outCount = attrcount;
char *copyPropertyAttributeValue(const char *attrs, const char *name)
{
- char *result = NULL;
+ char *result = nil;
iteratePropertyAttributes(attrs, findOneAttribute, (void*)name, &result);
# define SUPPORT_GC 1
#endif
-// Define SUPPORT_ENVIRON=1 to enable getenv().
-#if ((TARGET_OS_EMBEDDED || TARGET_OS_IPHONE) && !TARGET_IPHONE_SIMULATOR) && defined(NDEBUG)
-# define SUPPORT_ENVIRON 0
-#else
-# define SUPPORT_ENVIRON 1
-#endif
-
// Define SUPPORT_ZONES=1 to enable malloc zone support in NXHashTable.
#if TARGET_OS_EMBEDDED || TARGET_OS_IPHONE
# define SUPPORT_ZONES 0
# define SUPPORT_PREOPT 1
#endif
-// Define SUPPORT_DEBUGGER_MODE=1 to enable lock-avoiding execution for debuggers
-#if TARGET_OS_WIN32
-# define SUPPORT_DEBUGGER_MODE 0
-#else
-# define SUPPORT_DEBUGGER_MODE 1
-#endif
-
// Define SUPPORT_TAGGED_POINTERS=1 to enable tagged pointer objects
-// Be sure to edit objc-internal.h as well (_objc_insert_tagged_isa)
+// Be sure to edit tagged pointer SPI in objc-internal.h as well.
#if !(__OBJC2__ && __LP64__)
# define SUPPORT_TAGGED_POINTERS 0
#else
# define SUPPORT_TAGGED_POINTERS 1
#endif
-// Define SUPPORT_FIXUP=1 to use call-site fixup messaging for OBJC2.
+// Define SUPPORT_FIXUP=1 to repair calls sites for fixup dispatch.
+// Fixup messaging itself is no longer supported.
// Be sure to edit objc-abi.h as well (objc_msgSend*_fixup)
#if !__OBJC2__ || !defined(__x86_64__)
# define SUPPORT_FIXUP 0
# define SUPPORT_FIXUP 1
#endif
-// Define SUPPORT_VTABLE=1 to enable vtable dispatch for OBJC2.
-// Be sure to edit objc-gdb.h as well (gdb_objc_trampolines)
-#if !SUPPORT_FIXUP
-# define SUPPORT_VTABLE 0
-#else
-# define SUPPORT_VTABLE 1
-#endif
-
// Define SUPPORT_IGNORED_SELECTOR_CONSTANT to remap GC-ignored selectors.
// Good: fast ignore in objc_msgSend. Bad: disable shared cache optimizations
-// Non-GC does not remap. Fixup dispatch does not remap.
-#if !SUPPORT_GC || SUPPORT_FIXUP
+// Now used only for old-ABI GC.
+// This is required for binary compatibility on 32-bit Mac: rdar://13757938
+#if __OBJC2__ || !SUPPORT_GC
# define SUPPORT_IGNORED_SELECTOR_CONSTANT 0
#else
# define SUPPORT_IGNORED_SELECTOR_CONSTANT 1
# define SUPPORT_RETURN_AUTORELEASE 1
#endif
+// Define SUPPORT_MESSAGE_LOGGING to enable NSObjCMessageLoggingEnabled
+#if TARGET_OS_WIN32 || TARGET_OS_EMBEDDED
+# define SUPPORT_MESSAGE_LOGGING 0
+#else
+# define SUPPORT_MESSAGE_LOGGING 1
+#endif
// OBJC_INSTRUMENTED controls whether message dispatching is dynamically
// monitored. Monitoring introduces substantial overhead.
--- /dev/null
+// -*- truncate-lines: t; -*-
+
+// OPTION(var, env, help)
+
+OPTION( PrintImages, OBJC_PRINT_IMAGES, "log image and library names as they are loaded")
+OPTION( PrintLoading, OBJC_PRINT_LOAD_METHODS, "log calls to class and category +load methods")
+OPTION( PrintInitializing, OBJC_PRINT_INITIALIZE_METHODS, "log calls to class +initialize methods")
+OPTION( PrintResolving, OBJC_PRINT_RESOLVED_METHODS, "log methods created by +resolveClassMethod: and +resolveInstanceMethod:")
+OPTION( PrintConnecting, OBJC_PRINT_CLASS_SETUP, "log progress of class and category setup")
+OPTION( PrintProtocols, OBJC_PRINT_PROTOCOL_SETUP, "log progress of protocol setup")
+OPTION( PrintIvars, OBJC_PRINT_IVAR_SETUP, "log processing of non-fragile ivars")
+OPTION( PrintVtables, OBJC_PRINT_VTABLE_SETUP, "log processing of class vtables")
+OPTION( PrintVtableImages, OBJC_PRINT_VTABLE_IMAGES, "print vtable images showing overridden methods")
+OPTION( PrintCaches, OBJC_PRINT_CACHE_SETUP, "log processing of method caches")
+OPTION( PrintFuture, OBJC_PRINT_FUTURE_CLASSES, "log use of future classes for toll-free bridging")
+OPTION( PrintGC, OBJC_PRINT_GC, "log some GC operations")
+OPTION( PrintPreopt, OBJC_PRINT_PREOPTIMIZATION, "log preoptimization courtesy of dyld shared cache")
+OPTION( PrintCxxCtors, OBJC_PRINT_CXX_CTORS, "log calls to C++ ctors and dtors for instance variables")
+OPTION( PrintExceptions, OBJC_PRINT_EXCEPTIONS, "log exception handling")
+OPTION( PrintExceptionThrow, OBJC_PRINT_EXCEPTION_THROW, "log backtrace of every objc_exception_throw()")
+OPTION( PrintAltHandlers, OBJC_PRINT_ALT_HANDLERS, "log processing of exception alt handlers")
+OPTION( PrintReplacedMethods, OBJC_PRINT_REPLACED_METHODS, "log methods replaced by category implementations")
+OPTION( PrintDeprecation, OBJC_PRINT_DEPRECATION_WARNINGS, "warn about calls to deprecated runtime functions")
+OPTION( PrintPoolHiwat, OBJC_PRINT_POOL_HIGHWATER, "log high-water marks for autorelease pools")
+OPTION( PrintCustomRR, OBJC_PRINT_CUSTOM_RR, "log classes with un-optimized custom retain/release methods")
+OPTION( PrintCustomAWZ, OBJC_PRINT_CUSTOM_AWZ, "log classes with un-optimized custom allocWithZone methods")
+
+OPTION( DebugUnload, OBJC_DEBUG_UNLOAD, "warn about poorly-behaving bundles when unloaded")
+OPTION( DebugFragileSuperclasses, OBJC_DEBUG_FRAGILE_SUPERCLASSES, "warn about subclasses that may have been broken by subsequent changes to superclasses")
+OPTION( DebugFinalizers, OBJC_DEBUG_FINALIZERS, "warn about classes that implement -dealloc but not -finalize")
+OPTION( DebugNilSync, OBJC_DEBUG_NIL_SYNC, "warn about @synchronized(nil), which does no synchronization")
+OPTION( DebugNonFragileIvars, OBJC_DEBUG_NONFRAGILE_IVARS, "capriciously rearrange non-fragile ivars")
+OPTION( DebugAltHandlers, OBJC_DEBUG_ALT_HANDLERS, "record more info about bad alt handler use")
+OPTION( DebugMissingPools, OBJC_DEBUG_MISSING_POOLS, "warn about autorelease with no pool in place, which may be a leak")
+
+OPTION( UseInternalZone, OBJC_USE_INTERNAL_ZONE, "allocate runtime data in a dedicated malloc zone")
+
+OPTION( DisableGC, OBJC_DISABLE_GC, "force GC OFF, even if the executable wants it on")
+OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch")
+OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache")
+OPTION( DisableTaggedPointers, OBJC_DISABLE_TAGGED_POINTERS, "disable tagged pointer optimization of NSNumber et al.")
#else
#include <vproc_priv.h>
+#include <_simple.h>
OBJC_EXPORT void (*_error)(id, const char *, va_list);
// Print "message" to the console.
static void _objc_syslog(const char *message)
{
- syslog(LOG_ERR, "%s", message);
+ _simple_asl_log(ASL_LEVEL_ERR, nil, message);
if (also_do_stderr()) {
write(STDERR_FILENO, message, strlen(message));
typedef void (*objc_uncaught_exception_handler)(id exception);
typedef void (*objc_exception_handler)(id unused, void *context);
+/**
+ * Throw a runtime exception. This function is inserted by the compiler
+ * where \c @throw would otherwise be.
+ *
+ * @param exception The exception to be thrown.
+ */
OBJC_EXPORT void objc_exception_throw(id exception)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
OBJC_EXPORT void objc_exception_rethrow(void)
if (PrintExceptionThrow) {
_objc_inform("EXCEPTIONS: throwing %p (%s)",
- exception, object_getClassName(exception));
+ (void*)exception, object_getClassName(exception));
void* callstack[500];
int frameCount = backtrace(callstack, 500);
backtrace_symbols_fd(callstack, frameCount, fileno(stderr));
objc_thread_t self = thread_self();
ThreadChainLink_t *walker = &ThreadChainLink;
while (walker->perThreadID != self) {
- if (walker->next != NULL) {
+ if (walker->next != nil) {
walker = walker->next;
continue;
}
// XXX Also, we don't register to deallocate on thread death
walker->next = (ThreadChainLink_t *)malloc(sizeof(ThreadChainLink_t));
walker = walker->next;
- walker->next = NULL;
- walker->topHandler = NULL;
+ walker->next = nil;
+ walker->topHandler = nil;
walker->perThreadID = self;
}
return walker;
if (PrintExceptions) _objc_inform("EXCEPTIONS: objc_exception_throw with nil value\n");
return;
}
- if (chainLink == NULL) {
+ if (chainLink == nil) {
if (PrintExceptions) _objc_inform("EXCEPTIONS: No handler in place!\n");
return;
}
static int default_match(Class exceptionClass, id exception) {
//return [exception isKindOfClass:exceptionClass];
Class cls;
- for (cls = _object_getClass(exception); nil != cls; cls = _class_getSuperclass(cls))
+ for (cls = exception->getIsa(); nil != cls; cls = cls->superclass)
if (cls == exceptionClass) return 1;
return 0;
}
static void _objc_exception_noop(void) { }
static bool _objc_exception_false(void) { return 0; }
// static bool _objc_exception_true(void) { return 1; }
+static void _objc_exception_abort1(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 1);
+}
+static void _objc_exception_abort2(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 2);
+}
+static void _objc_exception_abort3(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 3);
+}
+static void _objc_exception_abort4(void) {
+ _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 4);
+}
+
static bool _objc_exception_do_catch(struct objc_typeinfo *catch_tinfo,
struct objc_typeinfo *throw_tinfo,
void **throw_obj_p,
OBJC_EXPORT
const void *objc_ehtype_vtable[] = {
- NULL, // typeinfo's vtable? - fixme
+ nil, // typeinfo's vtable? - fixme
(void*)&OBJC_EHTYPE_id, // typeinfo's typeinfo - hack
(void*)_objc_exception_noop, // in-place destructor?
(void*)_objc_exception_noop, // destructor?
(void*)_objc_exception_false, // OLD __is_pointer_p
(void*)_objc_exception_false, // OLD __is_function_p
- (void*)_objc_exception_do_catch, // OLD __do_catch, NEW can_catch
- (void*)_objc_exception_false, // OLD __do_upcast
+ (void*)_objc_exception_do_catch, // OLD __do_catch, NEW can_catch
+ (void*)_objc_exception_false, // OLD __do_upcast, NEW search_above_dst
+ (void*)_objc_exception_false, // NEW search_below_dst
+ (void*)_objc_exception_abort1, // paranoia: blow up if libc++abi
+ (void*)_objc_exception_abort2, // adds something new
+ (void*)_objc_exception_abort3,
+ (void*)_objc_exception_abort4,
};
OBJC_EXPORT
struct objc_typeinfo OBJC_EHTYPE_id = {
objc_ehtype_vtable+2,
"id",
- NULL
+ nil
};
static int _objc_default_exception_matcher(Class catch_cls, id exception)
{
Class cls;
- for (cls = _object_getClass(exception);
- cls != NULL;
- cls = class_getSuperclass(cls))
+ for (cls = exception->getIsa();
+ cls != nil;
+ cls = cls->superclass)
{
if (cls == catch_cls) return 1;
}
* Compiler ABI
**********************************************************************/
-static void _objc_exception_destructor(void *exc_gen) {
-#if SUPPORT_GC
+static void _objc_exception_destructor(void *exc_gen)
+{
+ // Release the retain from objc_exception_throw().
+
struct objc_exception *exc = (struct objc_exception *)exc_gen;
- if (UseGC && auto_zone_is_valid_pointer(gc_zone, exc->obj)) {
- // retained by objc_exception_throw
- auto_zone_release(gc_zone, exc->obj);
+ id obj = exc->obj;
+
+ if (PrintExceptions) {
+ _objc_inform("EXCEPTIONS: releasing completed exception %p (object %p, a %s)",
+ exc, obj, object_getClassName(obj));
+ }
+
+#if SUPPORT_GC
+ if (UseGC) {
+ if (auto_zone_is_valid_pointer(gc_zone, obj)) {
+ auto_zone_release(gc_zone, exc->obj);
+ }
}
+ else
#endif
+ {
+ [obj release];
+ }
}
struct objc_exception *exc = (struct objc_exception *)
__cxa_allocate_exception(sizeof(struct objc_exception));
- exc->obj = (*exception_preprocessor)(obj);
+ obj = (*exception_preprocessor)(obj);
+
+ // Retain the exception object during unwinding.
+ // GC: because `exc` is unscanned memory
+ // Non-GC: because otherwise an autorelease pool pop can cause a crash
#if SUPPORT_GC
- if (UseGC && auto_zone_is_valid_pointer(gc_zone, obj)) {
- // exc is non-scanned memory. Retain the object for the duration.
- auto_zone_retain(gc_zone, obj);
+ if (UseGC) {
+ if (auto_zone_is_valid_pointer(gc_zone, obj)) {
+ auto_zone_retain(gc_zone, obj);
+ }
}
+ else
#endif
+ {
+ [obj retain];
+ }
+ exc->obj = obj;
exc->tinfo.vtable = objc_ehtype_vtable+2;
exc->tinfo.name = object_getClassName(obj);
- exc->tinfo.cls_unremapped = obj ? _object_getClass(obj) : Nil;
+ exc->tinfo.cls_unremapped = obj ? obj->getIsa() : Nil;
if (PrintExceptions) {
_objc_inform("EXCEPTIONS: throwing %p (object %p, a %s)",
- exc, obj, object_getClassName(obj));
+ exc, (void*)obj, object_getClassName(obj));
}
if (PrintExceptionThrow) {
if (!PrintExceptions)
_objc_inform("EXCEPTIONS: throwing %p (object %p, a %s)",
- exc, obj, object_getClassName(obj));
+ exc, (void*)obj, object_getClassName(obj));
void* callstack[500];
int frameCount = backtrace(callstack, 500);
backtrace_symbols_fd(callstack, frameCount, fileno(stderr));
}
else if ((*exception_matcher)(handler_cls, exception)) {
if (PrintExceptions) _objc_inform("EXCEPTIONS: catch(%s)",
- class_getName(handler_cls));
+ handler_cls->getName());
return true;
}
if (PrintExceptions) _objc_inform("EXCEPTIONS: skipping catch(%s)",
- class_getName(handler_cls));
+ handler_cls->getName());
return false;
}
* 3. If so, call our registered callback with the object.
* 4. Finally, call the previous terminate handler.
**********************************************************************/
-static void (*old_terminate)(void) = NULL;
+static void (*old_terminate)(void) = nil;
static void _objc_terminate(void)
{
if (PrintExceptions) {
__cxa_rethrow();
} @catch (id e) {
// It's an objc object. Call Foundation's handler, if any.
- (*uncaught_handler)(e);
+ (*uncaught_handler)((id)e);
(*old_terminate)();
} @catch (...) {
// It's not an objc object. Continue to C++ terminate.
uintptr_t ip_start;
uintptr_t ip_end;
uintptr_t cfa;
- // precise ranges within ip_start..ip_end; NULL or {0,0} terminated
+ // precise ranges within ip_start..ip_end; nil or {0,0} terminated
frame_ips *ips;
};
if (range_count == 1) {
// No other source ranges with the same landing pad. We're done here.
- frame->ips = NULL;
+ frame->ips = nil;
}
else {
// Record all ranges with the same landing pad as our match.
fetch_handler_list(BOOL create)
{
_objc_pthread_data *data = _objc_fetch_pthread_data(create);
- if (!data) return NULL;
+ if (!data) return nil;
struct alt_handler_list *list = data->handlerList;
if (!list) {
- if (!create) return NULL;
+ if (!create) return nil;
list = (struct alt_handler_list *)_calloc_internal(1, sizeof(*list));
data->handlerList = list;
* @APPLE_LICENSE_HEADER_END@
*/
+#include "objc-private.h"
+
#include <malloc/malloc.h>
#include <assert.h>
#include "runtime.h"
#include "objc-os.h"
-#include "objc-private.h"
#include "message.h"
#if SUPPORT_GC
#include "auto_zone.h"
// create a GC external reference
-OBJC_EXTERN
objc_xref_t _object_addExternalReference_gc(id obj, objc_xref_type_t ref_type) {
_initialize_gc();
__block size_t index;
return xref;
}
-OBJC_EXTERN
+
id _object_readExternalReference_gc(objc_xref_t ref) {
_initialize_gc();
__block id result;
return result;
}
-OBJC_EXTERN
+
void _object_removeExternalReference_gc(objc_xref_t ref) {
_initialize_gc();
objc_xref_type_t ref_type = decode_type(ref);
}
}
+
// SUPPORT_GC
#endif
-OBJC_EXTERN
-objc_xref_t _object_addExternalReference_rr(id obj, objc_xref_type_t ref_type) {
+
+objc_xref_t _object_addExternalReference_non_gc(id obj, objc_xref_type_t ref_type) {
switch (ref_type) {
case OBJC_XREF_STRONG:
((id(*)(id, SEL))objc_msgSend)(obj, SEL_retain);
return encode_pointer_and_type(obj, ref_type);
}
-OBJC_EXTERN
-id _object_readExternalReference_rr(objc_xref_t ref) {
+
+id _object_readExternalReference_non_gc(objc_xref_t ref) {
id obj = decode_pointer(ref);
return obj;
}
-OBJC_EXTERN
-void _object_removeExternalReference_rr(objc_xref_t ref) {
+
+void _object_removeExternalReference_non_gc(objc_xref_t ref) {
id obj = decode_pointer(ref);
objc_xref_type_t ref_type = decode_type(ref);
switch (ref_type) {
}
}
-objc_xref_t _object_addExternalReference(id obj, objc_xref_t type) {
-#if SUPPORT_GC
- if (UseGC)
- return _object_addExternalReference_gc(obj, type);
- else
-#endif
- return _object_addExternalReference_rr(obj, type);
+
+uintptr_t _object_getExternalHash(id object) {
+ return (uintptr_t)object;
}
-id _object_readExternalReference(objc_xref_t ref) {
+
#if SUPPORT_GC
- if (UseGC)
- return _object_readExternalReference_gc(ref);
- else
-#endif
- return _object_readExternalReference_rr(ref);
+
+// These functions are resolver functions in objc-auto.mm.
+
+#else
+
+objc_xref_t
+_object_addExternalReference(id obj, objc_xref_t type)
+{
+ return _object_addExternalReference_non_gc(obj, type);
}
-void _object_removeExternalReference(objc_xref_t ref) {
-#if SUPPORT_GC
- if (UseGC)
- _object_removeExternalReference_gc(ref);
- else
-#endif
- _object_removeExternalReference_rr(ref);
+
+id
+_object_readExternalReference(objc_xref_t ref)
+{
+ return _object_readExternalReference_non_gc(ref);
}
-uintptr_t _object_getExternalHash(id object) {
-#if SUPPORT_GC
- if (UseCompaction)
- return auto_zone_get_associative_hash(gc_zone, object);
- else
-#endif
- return (uintptr_t)object;
+
+void
+_object_removeExternalReference(objc_xref_t ref)
+{
+ _object_removeExternalReference_non_gc(ref);
}
+
+#endif
#ifndef _OBJC_FILE_OLD_H
#define _OBJC_FILE_OLD_H
+#if !__OBJC2__
+
#include "objc-os.h"
struct objc_module;
extern struct objc_module *_getObjcModules(const header_info *hi, size_t *nmodules);
extern SEL *_getObjcSelectorRefs(const header_info *hi, size_t *nmess);
-extern BOOL _hasObjcContents(const header_info *hi);
extern struct old_protocol **_getObjcProtocols(const header_info *hi, size_t *nprotos);
-extern struct old_class **_getObjcClassRefs(const header_info *hi, size_t *nclasses);
+extern Class *_getObjcClassRefs(const header_info *hi, size_t *nclasses);
extern const char *_getObjcClassNames(const header_info *hi, size_t *size);
__END_DECLS
#endif
+
+#endif
+++ /dev/null
-/*
- * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-// Copyright 1988-1996 NeXT Software, Inc.
-
-#if !__OBJC2__
-
-#include "objc-private.h"
-#include "objc-runtime-old.h"
-
-
-#if TARGET_OS_WIN32
-
-/*
-Module
-_getObjcModules(const header_info *hi, size_t *nmodules)
-{
- if (nmodules) *nmodules = hi->moduleCount;
- return hi->modules;
-}
-*/
-SEL *
-_getObjcSelectorRefs(const header_info *hi, size_t *nmess)
-{
- if (nmess) *nmess = hi->selrefCount;
- return hi->selrefs;
-}
-
-struct old_protocol **
-_getObjcProtocols(const header_info *hi, size_t *nprotos)
-{
- if (nprotos) *nprotos = hi->protocolCount;
- return hi->protocols;
-}
-
-struct old_class **
-_getObjcClassRefs(const header_info *hi, size_t *nclasses)
-{
- if (nclasses) *nclasses = hi->clsrefCount;
- return (struct old_class **)hi->clsrefs;
-}
-
-// __OBJC,__class_names section only emitted by CodeWarrior rdar://4951638
-const char *
-_getObjcClassNames(const header_info *hi, size_t *size)
-{
- if (size) *size = 0;
- return NULL;
-}
-
-#else
-
-#define GETSECT(name, type, sectname) \
- type *name(const header_info *hi, size_t *outCount) \
- { \
- unsigned long byteCount = 0; \
- type *data = (type *) \
- getsectiondata(hi->mhdr, SEG_OBJC, sectname, &byteCount); \
- *outCount = byteCount / sizeof(type); \
- return data; \
- }
-
-GETSECT(_getObjcModules, struct objc_module, "__module_info");
-GETSECT(_getObjcSelectorRefs, SEL, "__message_refs");
-GETSECT(_getObjcClassRefs, struct old_class *, "__cls_refs");
-GETSECT(_getObjcClassNames, const char, "__class_names");
-// __OBJC,__class_names section only emitted by CodeWarrior rdar://4951638
-
-
-objc_image_info *
-_getObjcImageInfo(const headerType *mhdr, size_t *outBytes)
-{
- unsigned long byteCount = 0;
- objc_image_info *info = (objc_image_info *)
- getsectiondata(mhdr, SEG_OBJC, "__image_info", &byteCount);
- *outBytes = byteCount;
- return info;
-}
-
-
-struct old_protocol **
-_getObjcProtocols(const header_info *hi, size_t *nprotos)
-{
- unsigned long size = 0;
- struct old_protocol *protos = (struct old_protocol *)
- getsectiondata(hi->mhdr, SEG_OBJC, "__protocol", &size);
- *nprotos = size / sizeof(struct old_protocol);
-
- if (!hi->proto_refs && *nprotos) {
- size_t i;
- header_info *whi = (header_info *)hi;
- whi->proto_refs = (struct old_protocol **)
- malloc(*nprotos * sizeof(*hi->proto_refs));
- for (i = 0; i < *nprotos; i++) {
- hi->proto_refs[i] = protos+i;
- }
- }
-
- return hi->proto_refs;
-}
-
-
-static const segmentType *
-getsegbynamefromheader(const headerType *head, const char *segname)
-{
- const segmentType *sgp;
- unsigned long i;
-
- sgp = (const segmentType *) (head + 1);
- for (i = 0; i < head->ncmds; i++){
- if (sgp->cmd == SEGMENT_CMD) {
- if (strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0) {
- return sgp;
- }
- }
- sgp = (const segmentType *)((char *)sgp + sgp->cmdsize);
- }
- return NULL;
-}
-
-BOOL
-_hasObjcContents(const header_info *hi)
-{
- // Look for an __OBJC,* section other than __OBJC,__image_info
- const segmentType *seg = getsegbynamefromheader(hi->mhdr, "__OBJC");
- const sectionType *sect;
- uint32_t i;
- for (i = 0; i < seg->nsects; i++) {
- sect = ((const sectionType *)(seg+1))+i;
- if (0 != strncmp(sect->sectname, "__image_info", 12)) {
- return YES;
- }
- }
-
- return NO;
-}
-
-
-#endif
-
-#endif
--- /dev/null
+/*
+ * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+// Copyright 1988-1996 NeXT Software, Inc.
+
+#if !__OBJC2__
+
+#include "objc-private.h"
+#include "objc-runtime-old.h"
+#include "objc-file-old.h"
+
+#if TARGET_OS_WIN32
+
+/*
+Module
+_getObjcModules(const header_info *hi, size_t *nmodules)
+{
+ if (nmodules) *nmodules = hi->moduleCount;
+ return hi->modules;
+}
+*/
+SEL *
+_getObjcSelectorRefs(const header_info *hi, size_t *nmess)
+{
+ if (nmess) *nmess = hi->selrefCount;
+ return hi->selrefs;
+}
+
+struct old_protocol **
+_getObjcProtocols(const header_info *hi, size_t *nprotos)
+{
+ if (nprotos) *nprotos = hi->protocolCount;
+ return hi->protocols;
+}
+
+Class*
+_getObjcClassRefs(const header_info *hi, size_t *nclasses)
+{
+ if (nclasses) *nclasses = hi->clsrefCount;
+ return (Class*)hi->clsrefs;
+}
+
+// __OBJC,__class_names section only emitted by CodeWarrior rdar://4951638
+const char *
+_getObjcClassNames(const header_info *hi, size_t *size)
+{
+ if (size) *size = 0;
+ return NULL;
+}
+
+#else
+
+#define GETSECT(name, type, sectname) \
+ type *name(const header_info *hi, size_t *outCount) \
+ { \
+ unsigned long byteCount = 0; \
+ type *data = (type *) \
+ getsectiondata(hi->mhdr, SEG_OBJC, sectname, &byteCount); \
+ *outCount = byteCount / sizeof(type); \
+ return data; \
+ }
+
+GETSECT(_getObjcModules, struct objc_module, "__module_info");
+GETSECT(_getObjcSelectorRefs, SEL, "__message_refs");
+GETSECT(_getObjcClassRefs, Class, "__cls_refs");
+GETSECT(_getObjcClassNames, const char, "__class_names");
+// __OBJC,__class_names section only emitted by CodeWarrior rdar://4951638
+
+
+objc_image_info *
+_getObjcImageInfo(const headerType *mhdr, size_t *outBytes)
+{
+ unsigned long byteCount = 0;
+ objc_image_info *info = (objc_image_info *)
+ getsectiondata(mhdr, SEG_OBJC, "__image_info", &byteCount);
+ *outBytes = byteCount;
+ return info;
+}
+
+
+struct old_protocol **
+_getObjcProtocols(const header_info *hi, size_t *nprotos)
+{
+ unsigned long size = 0;
+ struct old_protocol *protos = (struct old_protocol *)
+ getsectiondata(hi->mhdr, SEG_OBJC, "__protocol", &size);
+ *nprotos = size / sizeof(struct old_protocol);
+
+ if (!hi->proto_refs && *nprotos) {
+ size_t i;
+ header_info *whi = (header_info *)hi;
+ whi->proto_refs = (struct old_protocol **)
+ malloc(*nprotos * sizeof(*hi->proto_refs));
+ for (i = 0; i < *nprotos; i++) {
+ hi->proto_refs[i] = protos+i;
+ }
+ }
+
+ return hi->proto_refs;
+}
+
+
+static const segmentType *
+getsegbynamefromheader(const headerType *head, const char *segname)
+{
+ const segmentType *sgp;
+ unsigned long i;
+
+ sgp = (const segmentType *) (head + 1);
+ for (i = 0; i < head->ncmds; i++){
+ if (sgp->cmd == SEGMENT_CMD) {
+ if (strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0) {
+ return sgp;
+ }
+ }
+ sgp = (const segmentType *)((char *)sgp + sgp->cmdsize);
+ }
+ return NULL;
+}
+
+BOOL
+_hasObjcContents(const header_info *hi)
+{
+ // Look for an __OBJC,* section other than __OBJC,__image_info
+ const segmentType *seg = getsegbynamefromheader(hi->mhdr, "__OBJC");
+ const sectionType *sect;
+ uint32_t i;
+ for (i = 0; i < seg->nsects; i++) {
+ sect = ((const sectionType *)(seg+1))+i;
+ if (0 != strncmp(sect->sectname, "__image_info", 12)) {
+ return YES;
+ }
+ }
+
+ return NO;
+}
+
+
+#endif
+
+#endif
#ifndef _OBJC_FILE_NEW_H
#define _OBJC_FILE_NEW_H
+#if __OBJC2__
+
#include "objc-runtime-new.h"
extern SEL *_getObjc2SelectorRefs(const header_info *hi, size_t *count);
extern message_ref_t *_getObjc2MessageRefs(const header_info *hi, size_t *count);
-extern class_t **_getObjc2ClassRefs(const header_info *hi, size_t *count);
-extern class_t **_getObjc2SuperRefs(const header_info *hi, size_t *count);
+extern Class*_getObjc2ClassRefs(const header_info *hi, size_t *count);
+extern Class*_getObjc2SuperRefs(const header_info *hi, size_t *count);
extern classref_t *_getObjc2ClassList(const header_info *hi, size_t *count);
extern classref_t *_getObjc2NonlazyClassList(const header_info *hi, size_t *count);
extern category_t **_getObjc2CategoryList(const header_info *hi, size_t *count);
__END_DECLS
#endif
+
+#endif
#include "objc-private.h"
#include "objc-file.h"
-#if TARGET_IPHONE_SIMULATOR
-// getsectiondata() not yet available
-
-// 1. Find segment with file offset == 0 and file size != 0. This segment's
-// contents span the Mach-O header. (File size of 0 is .bss, for example)
-// 2. Slide is header's address - segment's preferred address
-static ptrdiff_t
-objc_getImageSlide(const struct mach_header *header)
-{
- unsigned long i;
- const struct segment_command *sgp = (const struct segment_command *)(header + 1);
-
- for (i = 0; i < header->ncmds; i++){
- if (sgp->cmd == LC_SEGMENT) {
- if (sgp->fileoff == 0 && sgp->filesize != 0) {
- return (uintptr_t)header - (uintptr_t)sgp->vmaddr;
- }
- }
- sgp = (const struct segment_command *)((char *)sgp + sgp->cmdsize);
- }
-
- // uh-oh
- _objc_fatal("could not calculate VM slide for image");
- return 0; // not reached
-}
-
-uint8_t *
-objc_getsectiondata(const struct mach_header *mh, const char *segname, const char *sectname, unsigned long *outSize)
-{
- uint32_t size = 0;
-
- char *data = getsectdatafromheader(mh, segname, sectname, &size);
- if (data) {
- *outSize = size;
- return (uint8_t *)data + objc_getImageSlide(mh);
- } else {
- *outSize = 0;
- return NULL;
- }
-}
-
-static const struct segment_command *
-objc_getsegbynamefromheader(const mach_header *head, const char *segname)
-{
- const struct segment_command *sgp;
- unsigned long i;
-
- sgp = (const struct segment_command *) (head + 1);
- for (i = 0; i < head->ncmds; i++){
- if (sgp->cmd == LC_SEGMENT) {
- if (strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0) {
- return sgp;
- }
- }
- sgp = (const struct segment_command *)((char *)sgp + sgp->cmdsize);
- }
- return NULL;
-}
-
-uint8_t *
-objc_getsegmentdata(const struct mach_header *mh, const char *segname, unsigned long *outSize)
-{
- const struct segment_command *seg;
-
- seg = objc_getsegbynamefromheader(mh, segname);
- if (seg) {
- *outSize = seg->vmsize;
- return (uint8_t *)seg->vmaddr + objc_getImageSlide(mh);
- } else {
- *outSize = 0;
- return NULL;
- }
-}
-
-// TARGET_IPHONE_SIMULATOR
-#endif
-
#define GETSECT(name, type, sectname) \
type *name(const header_info *hi, size_t *outCount) \
{ \
// function name content type section name
GETSECT(_getObjc2SelectorRefs, SEL, "__objc_selrefs");
GETSECT(_getObjc2MessageRefs, message_ref_t, "__objc_msgrefs");
-GETSECT(_getObjc2ClassRefs, class_t *, "__objc_classrefs");
-GETSECT(_getObjc2SuperRefs, class_t *, "__objc_superrefs");
+GETSECT(_getObjc2ClassRefs, Class, "__objc_classrefs");
+GETSECT(_getObjc2SuperRefs, Class, "__objc_superrefs");
GETSECT(_getObjc2ClassList, classref_t, "__objc_classlist");
GETSECT(_getObjc2NonlazyClassList, classref_t, "__objc_nlclslist");
GETSECT(_getObjc2CategoryList, category_t *, "__objc_catlist");
__BEGIN_DECLS
-/***********************************************************************
-* Trampoline descriptors for gdb.
-**********************************************************************/
-
-#if __OBJC2__ && defined(__x86_64__)
-
-typedef struct {
- uint32_t offset; // 0 = unused, else code = (uintptr_t)desc + desc->offset
- uint32_t flags;
-} objc_trampoline_descriptor;
-#define OBJC_TRAMPOLINE_MESSAGE (1<<0) // trampoline acts like objc_msgSend
-#define OBJC_TRAMPOLINE_STRET (1<<1) // trampoline is struct-returning
-#define OBJC_TRAMPOLINE_VTABLE (1<<2) // trampoline is vtable dispatcher
-
-typedef struct objc_trampoline_header {
- uint16_t headerSize; // sizeof(objc_trampoline_header)
- uint16_t descSize; // sizeof(objc_trampoline_descriptor)
- uint32_t descCount; // number of descriptors following this header
- struct objc_trampoline_header *next;
-} objc_trampoline_header;
-
-OBJC_EXPORT objc_trampoline_header *gdb_objc_trampolines
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_NA);
-
-OBJC_EXPORT void gdb_objc_trampolines_changed(objc_trampoline_header *thdr)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_NA);
-// Notify gdb that gdb_objc_trampolines has changed.
-// thdr itself includes the new descriptors; thdr->next is not new.
-
-#endif
-
/***********************************************************************
-* Debugger mode.
+* Class pointer preflighting
**********************************************************************/
-// Start debugger mode.
-// Returns non-zero if debugger mode was successfully started.
-// In debugger mode, you can try to use the runtime without deadlocking
-// on other threads. All other threads must be stopped during debugger mode.
-// OBJC_DEBUGMODE_FULL requires more locks so later operations are less
-// likely to fail.
-#define OBJC_DEBUGMODE_FULL (1<<0)
-OBJC_EXPORT int gdb_objc_startDebuggerMode(uint32_t flags)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
-
-// Stop debugger mode. Do not call if startDebuggerMode returned zero.
-OBJC_EXPORT void gdb_objc_endDebuggerMode(void)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
-
-// Failure hook when debugger mode tries something that would block.
-// Set a breakpoint here to handle it before the runtime causes a trap.
-// Debugger mode is still active; call endDebuggerMode to end it.
-OBJC_EXPORT void gdb_objc_debuggerModeFailure(void)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
-
-// Older debugger-mode mechanism. Too simplistic.
-OBJC_EXPORT BOOL gdb_objc_isRuntimeLocked(void)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
-
// Return cls if it's a valid class, or crash.
OBJC_EXPORT Class gdb_class_getClass(Class cls)
#if __OBJC2__
#endif
+/***********************************************************************
+* Tagged pointer decoding
+**********************************************************************/
+#if __OBJC2__
+
+// if (obj & mask) obj is a tagged pointer object
+OBJC_EXPORT uintptr_t objc_debug_taggedpointer_mask
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+
+// tag_slot = (obj >> slot_shift) & slot_mask
+OBJC_EXPORT unsigned int objc_debug_taggedpointer_slot_shift
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+OBJC_EXPORT uintptr_t objc_debug_taggedpointer_slot_mask
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+
+// class = classes[tag_slot]
+OBJC_EXPORT Class objc_debug_taggedpointer_classes[]
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+
+// payload = (obj << payload_lshift) >> payload_rshift
+// Payload signedness is determined by the signedness of the right-shift.
+OBJC_EXPORT unsigned int objc_debug_taggedpointer_payload_lshift
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+OBJC_EXPORT unsigned int objc_debug_taggedpointer_payload_rshift
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+
+#endif
+
+
+/***********************************************************************
+* Breakpoints in objc_msgSend for debugger stepping.
+* The array is a {0,0} terminated list of addresses.
+* Each address is one of the following:
+* OBJC_MESSENGER_START: Address is the start of a messenger function.
+* OBJC_MESSENGER_END_FAST: Address is a jump insn that calls an IMP.
+* OBJC_MESSENGER_END_SLOW: Address is some insn in the slow lookup path.
+* OBJC_MESSENGER_END_NIL: Address is a return insn for messages to nil.
+*
+* Every path from OBJC_MESSENGER_START should reach some OBJC_MESSENGER_END.
+* At all ENDs, the stack and parameter register state is the same as START.
+*
+* In some cases, the END_FAST case jumps to something other than the
+* method's implementation. In those cases the jump's destination will
+* be another function that is marked OBJC_MESSENGER_START.
+**********************************************************************/
+#if __OBJC2__
+
+#define OBJC_MESSENGER_START 1
+#define OBJC_MESSENGER_END_FAST 2
+#define OBJC_MESSENGER_END_SLOW 3
+#define OBJC_MESSENGER_END_NIL 4
+
+struct objc_messenger_breakpoint {
+ uintptr_t address;
+ uintptr_t kind;
+};
+
+OBJC_EXPORT struct objc_messenger_breakpoint
+gdb_objc_messenger_breakpoints[]
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0);
+
+#endif
+
+
#ifndef OBJC_NO_GC
/***********************************************************************
#ifndef _OBJC_INITIALIZE_H
#define _OBJC_INITIALIZE_H
-#include "objc.h"
+#include "objc-private.h"
__BEGIN_DECLS
struct _objc_initializing_classes;
-extern BOOL _class_isInitializing(Class cls);
-BOOL _class_isInitialized(Class cls);
-
extern void _class_initialize(Class cls);
extern void _destroyInitializingClassList(struct _objc_initializing_classes *list);
* _fetchInitializingClassList
* Return the list of classes being initialized by this thread.
* If create == YES, create the list when no classes are being initialized by this thread.
-* If create == NO, return NULL when no classes are being initialized by this thread.
+* If create == NO, return nil when no classes are being initialized by this thread.
**********************************************************************/
static _objc_initializing_classes *_fetchInitializingClassList(BOOL create)
{
Class *classes;
data = _objc_fetch_pthread_data(create);
- if (data == NULL) return NULL;
+ if (data == nil) return nil;
list = data->initializingClasses;
- if (list == NULL) {
+ if (list == nil) {
if (!create) {
- return NULL;
+ return nil;
} else {
list = (_objc_initializing_classes *)
_calloc_internal(1, sizeof(_objc_initializing_classes));
}
classes = list->metaclasses;
- if (classes == NULL) {
+ if (classes == nil) {
// If _objc_initializing_classes exists, allocate metaclass array,
// even if create == NO.
// Allow 4 simultaneous class inits on this thread before realloc.
/***********************************************************************
* _destroyInitializingClassList
* Deallocate memory used by the given initialization list.
-* Any part of the list may be NULL.
+* Any part of the list may be nil.
* Called from _objc_pthread_destroyspecific().
**********************************************************************/
void _destroyInitializingClassList(struct _objc_initializing_classes *list)
{
- if (list != NULL) {
- if (list->metaclasses != NULL) {
+ if (list != nil) {
+ if (list->metaclasses != nil) {
_free_internal(list->metaclasses);
}
_free_internal(list);
_objc_initializing_classes *list = _fetchInitializingClassList(NO);
if (list) {
- cls = _class_getMeta(cls);
+ cls = cls->getMeta();
for (i = 0; i < list->classesAllocated; i++) {
if (cls == list->metaclasses[i]) return YES;
}
{
int i;
_objc_initializing_classes *list = _fetchInitializingClassList(YES);
- cls = _class_getMeta(cls);
+ cls = cls->getMeta();
// paranoia: explicitly disallow duplicates
for (i = 0; i < list->classesAllocated; i++) {
}
for (i = 0; i < list->classesAllocated; i++) {
- if (0 == list->metaclasses[i]) {
+ if (! list->metaclasses[i]) {
list->metaclasses[i] = cls;
return;
}
// zero out the new entries
list->metaclasses[i++] = cls;
for ( ; i < list->classesAllocated; i++) {
- list->metaclasses[i] = NULL;
+ list->metaclasses[i] = nil;
}
}
_objc_initializing_classes *list = _fetchInitializingClassList(NO);
if (list) {
- cls = _class_getMeta(cls);
+ cls = cls->getMeta();
for (i = 0; i < list->classesAllocated; i++) {
if (cls == list->metaclasses[i]) {
- list->metaclasses[i] = NULL;
+ list->metaclasses[i] = nil;
return;
}
}
PendingInitialize *pending;
monitor_assert_locked(&classInitLock);
- assert(!supercls || _class_isInitialized(supercls));
+ assert(!supercls || supercls->isInitialized());
if (PrintInitializing) {
_objc_inform("INITIALIZE: %s is fully +initialized",
- _class_getName(cls));
+ cls->getName());
}
// propagate finalization affinity.
- if (UseGC && supercls && _class_shouldFinalizeOnMainThread(supercls)) {
- _class_setFinalizeOnMainThread(cls);
+ if (UseGC && supercls && supercls->shouldFinalizeOnMainThread()) {
+ cls->setShouldFinalizeOnMainThread();
}
// mark this class as fully +initialized
- _class_setInitialized(cls);
+ cls->setInitialized();
monitor_notifyAll(&classInitLock);
_setThisThreadIsNotInitializingClass(cls);
// Destroy the pending table if it's now empty, to save memory.
if (NXCountMapTable(pendingInitializeMap) == 0) {
NXFreeMapTable(pendingInitializeMap);
- pendingInitializeMap = NULL;
+ pendingInitializeMap = nil;
}
while (pending) {
if (PrintInitializing) {
_objc_inform("INITIALIZE: %s waiting for superclass +[%s initialize]",
- _class_getName(cls), _class_getName(supercls));
+ cls->getName(), supercls->getName());
}
if (!pendingInitializeMap) {
**********************************************************************/
void _class_initialize(Class cls)
{
- assert(!_class_isMetaClass(cls));
+ assert(!cls->isMetaClass());
Class supercls;
BOOL reallyInitialize = NO;
// Make sure super is done initializing BEFORE beginning to initialize cls.
// See note about deadlock above.
- supercls = _class_getSuperclass(cls);
- if (supercls && !_class_isInitialized(supercls)) {
+ supercls = cls->superclass;
+ if (supercls && !supercls->isInitialized()) {
_class_initialize(supercls);
}
// Try to atomically set CLS_INITIALIZING.
monitor_enter(&classInitLock);
- if (!_class_isInitialized(cls) && !_class_isInitializing(cls)) {
- _class_setInitializing(cls);
+ if (!cls->isInitialized() && !cls->isInitializing()) {
+ cls->setInitializing();
reallyInitialize = YES;
}
monitor_exit(&classInitLock);
// this class doesn't implement +initialize. 2157218
if (PrintInitializing) {
_objc_inform("INITIALIZE: calling +[%s initialize]",
- _class_getName(cls));
+ cls->getName());
}
((void(*)(Class, SEL))objc_msgSend)(cls, SEL_initialize);
if (PrintInitializing) {
_objc_inform("INITIALIZE: finished +[%s initialize]",
- _class_getName(cls));
+ cls->getName());
}
// Done initializing.
// was itself triggered from inside a superclass +initialize.)
monitor_enter(&classInitLock);
- if (!supercls || _class_isInitialized(supercls)) {
+ if (!supercls || supercls->isInitialized()) {
_finishInitializing(cls, supercls);
} else {
_finishInitializingAfter(cls, supercls);
return;
}
- else if (_class_isInitializing(cls)) {
+ else if (cls->isInitializing()) {
// We couldn't set INITIALIZING because INITIALIZING was already set.
// If this thread set it earlier, continue normally.
// If some other thread set it, block until initialize is done.
return;
} else {
monitor_enter(&classInitLock);
- while (!_class_isInitialized(cls)) {
+ while (!cls->isInitialized()) {
monitor_wait(&classInitLock);
}
monitor_exit(&classInitLock);
}
}
- else if (_class_isInitialized(cls)) {
+ else if (cls->isInitialized()) {
// Set CLS_INITIALIZING failed because someone else already
// initialized the class. Continue normally.
// NOTE this check must come AFTER the ISINITIALIZING case.
*/
#include <objc/objc.h>
+#include <objc/runtime.h>
#include <Availability.h>
#include <malloc/malloc.h>
#include <dispatch/dispatch.h>
OBJC_EXPORT Class objc_initializeClassPair(Class superclass_gen, const char *name, Class cls_gen, Class meta_gen)
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0);
-#if __OBJC2__ && __LP64__
-// Register a tagged pointer class.
-OBJC_EXPORT void _objc_insert_tagged_isa(unsigned char slotNumber, Class isa)
- __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
-#endif
-
// Batch object allocation using malloc_zone_batch_malloc().
OBJC_EXPORT unsigned class_createInstances(Class cls, size_t extraBytes,
id *results, unsigned num_requested)
OBJC_EXPORT id _objc_getNilReceiver(void)
__OSX_AVAILABLE_STARTING(__MAC_10_3, __IPHONE_NA);
-// Return NO if no instance of `cls` has ever owned an associative reference.
-OBJC_EXPORT BOOL class_instancesHaveAssociatedObjects(Class cls)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0);
-
// Return YES if GC is on and `object` is a GC allocation.
OBJC_EXPORT BOOL objc_isAuto(id object)
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_NA);
#endif
+
+// Tagged pointer objects.
+
+#if __LP64__
+#define OBJC_HAVE_TAGGED_POINTERS 1
+#endif
+
+#if OBJC_HAVE_TAGGED_POINTERS
+
+// Tagged pointer layout and usage is subject to change
+// on different OS versions. The current layout is:
+// (MSB)
+// 60 bits payload
+// 3 bits tag index
+// 1 bit 1 for tagged pointer objects, 0 for ordinary objects
+// (LSB)
+
+#if __has_feature(objc_fixed_enum) || __cplusplus >= 201103L
+enum objc_tag_index_t : uint8_t
+#else
+typedef uint8_t objc_tag_index_t;
+enum
+#endif
+{
+ OBJC_TAG_NSAtom = 0,
+ OBJC_TAG_1 = 1,
+ OBJC_TAG_NSString = 2,
+ OBJC_TAG_NSNumber = 3,
+ OBJC_TAG_NSIndexPath = 4,
+ OBJC_TAG_NSManagedObjectID = 5,
+ OBJC_TAG_NSDate = 6,
+ OBJC_TAG_7 = 7
+};
+#if __has_feature(objc_fixed_enum) && !defined(__cplusplus)
+typedef enum objc_tag_index_t objc_tag_index_t;
+#endif
+
+OBJC_EXPORT void _objc_registerTaggedPointerClass(objc_tag_index_t tag, Class cls)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+
+OBJC_EXPORT Class _objc_getClassForTag(objc_tag_index_t tag)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_NA);
+
+static inline bool
+_objc_taggedPointersEnabled(void)
+{
+ extern uintptr_t objc_debug_taggedpointer_mask;
+ return (objc_debug_taggedpointer_mask != 0);
+}
+
+static inline void *
+_objc_makeTaggedPointer(objc_tag_index_t tag, uintptr_t value)
+{
+ // assert(_objc_taggedPointersEnabled());
+ // assert((unsigned int)tag < 8);
+ // assert(((value << 4) >> 4) == value);
+ return (void *)((value << 4) | ((uintptr_t)tag << 1) | 1);
+}
+
+static inline bool
+_objc_isTaggedPointer(const void *ptr)
+{
+ return (uintptr_t)ptr & 1;
+}
+
+static inline objc_tag_index_t
+_objc_getTaggedPointerTag(const void *ptr)
+{
+ // assert(_objc_isTaggedPointer(ptr));
+ return (objc_tag_index_t)(((uintptr_t)ptr & 0xe) >> 1);
+}
+
+static inline uintptr_t
+_objc_getTaggedPointerValue(const void *ptr)
+{
+ // assert(_objc_isTaggedPointer(ptr));
+ return (uintptr_t)ptr >> 4;
+}
+
+static inline intptr_t
+_objc_getTaggedPointerSignedValue(const void *ptr)
+{
+ // assert(_objc_isTaggedPointer(ptr));
+ return (intptr_t)ptr >> 4;
+}
+
+
+OBJC_EXPORT void _objc_insert_tagged_isa(unsigned char slotNumber, Class isa)
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_7,__MAC_10_9, __IPHONE_4_3,__IPHONE_NA);
+
+#endif
+
+
// External Reference support. Used to support compaction.
enum {
OBJC_EXPORT uintptr_t _object_getExternalHash(id object)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+/**
+ * Returns the method implementation of an object.
+ *
+ * @param obj An Objective-C object.
+ * @param name An Objective-C selector.
+ *
+ * @return The IMP corresponding to the instance method implemented by
+ * the class of \e obj.
+ *
+ * @note Equivalent to:
+ *
+ * class_getMethodImplementation(object_getClass(obj), name);
+ */
+OBJC_EXPORT IMP object_getMethodImplementation(id obj, SEL name)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0);
+
+OBJC_EXPORT IMP object_getMethodImplementation_stret(id obj, SEL name)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0);
+
+
// Instance-specific instance variable layout.
OBJC_EXPORT void _class_setIvarLayoutAccessor(Class cls_gen, const uint8_t* (*accessor) (id object))
OBJC_EXPORT BOOL _class_usesAutomaticRetainRelease(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+OBJC_EXPORT BOOL _class_isFutureClass(Class cls)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0);
+
+
// Obsolete ARC conversions.
// hack - remove and reinstate objc.h's definitions
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+OBJC_EXPORT id objc_alloc(Class cls)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0);
+
+OBJC_EXPORT id objc_allocWithZone(Class cls)
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0);
+
OBJC_EXPORT id objc_retain(id obj)
__asm__("_objc_retain")
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
#include <objc/objc-class.h>
#include <mach-o/loader.h>
-#include <AvailabilityMacros.h>
/* dynamically loading Mach-O object files that contain Objective-C code */
typedef void(*load_method_t)(id, SEL);
struct loadable_class {
- Class cls; // may be NULL
+ Class cls; // may be nil
IMP method;
};
struct loadable_category {
- Category cat; // may be NULL
+ Category cat; // may be nil
IMP method;
};
// List of classes that need +load called (pending superclass +load)
// This list always has superclasses first because of the way it is constructed
-static struct loadable_class *loadable_classes = NULL;
+static struct loadable_class *loadable_classes = nil;
static int loadable_classes_used = 0;
static int loadable_classes_allocated = 0;
// List of categories that need +load called (pending parent class +load)
-static struct loadable_category *loadable_categories = NULL;
+static struct loadable_category *loadable_categories = nil;
static int loadable_categories_used = 0;
static int loadable_categories_allocated = 0;
recursive_mutex_assert_locked(&loadMethodLock);
- method = _class_getLoadMethod(cls);
+ method = cls->getLoadMethod();
if (!method) return; // Don't bother if cls has no +load method
if (PrintLoading) {
- _objc_inform("LOAD: class '%s' scheduled for +load", _class_getName(cls));
+ _objc_inform("LOAD: class '%s' scheduled for +load", cls->getName());
}
if (loadable_classes_used == loadable_classes_allocated) {
int i;
for (i = 0; i < loadable_classes_used; i++) {
if (loadable_classes[i].cls == cls) {
- loadable_classes[i].cls = NULL;
+ loadable_classes[i].cls = nil;
if (PrintLoading) {
- _objc_inform("LOAD: class '%s' unscheduled for +load", _class_getName(cls));
+ _objc_inform("LOAD: class '%s' unscheduled for +load", cls->getName());
}
return;
}
int i;
for (i = 0; i < loadable_categories_used; i++) {
if (loadable_categories[i].cat == cat) {
- loadable_categories[i].cat = NULL;
+ loadable_categories[i].cat = nil;
if (PrintLoading) {
_objc_inform("LOAD: category '%s(%s)' unscheduled for +load",
_category_getClassName(cat),
// Detach current loadable list.
struct loadable_class *classes = loadable_classes;
int used = loadable_classes_used;
- loadable_classes = NULL;
+ loadable_classes = nil;
loadable_classes_allocated = 0;
loadable_classes_used = 0;
if (!cls) continue;
if (PrintLoading) {
- _objc_inform("LOAD: +[%s load]\n", _class_getName(cls));
+ _objc_inform("LOAD: +[%s load]\n", cls->getName());
}
(*load_method)(cls, SEL_load);
}
struct loadable_category *cats = loadable_categories;
int used = loadable_categories_used;
int allocated = loadable_categories_allocated;
- loadable_categories = NULL;
+ loadable_categories = nil;
loadable_categories_allocated = 0;
loadable_categories_used = 0;
if (!cat) continue;
cls = _category_getClass(cat);
- if (cls && _class_isLoadable(cls)) {
+ if (cls && cls->isLoadable()) {
if (PrintLoading) {
_objc_inform("LOAD: +[%s(%s) load]\n",
- _class_getName(cls),
+ cls->getName(),
_category_getName(cat));
}
(*load_method)(cls, SEL_load);
- cats[i].cat = NULL;
+ cats[i].cat = nil;
}
}
loadable_categories_allocated = allocated;
} else {
if (cats) _free_internal(cats);
- loadable_categories = NULL;
+ loadable_categories = nil;
loadable_categories_used = 0;
loadable_categories_allocated = 0;
}
{
_objc_lock_list *locks = getLocks(YES);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, MUTEX)) {
- _objc_fatal("deadlock: relocking mutex %s\n", name+1);
- }
- setLock(locks, lock, MUTEX);
+ if (hasLock(locks, lock, MUTEX)) {
+ _objc_fatal("deadlock: relocking mutex %s\n", name+1);
}
+ setLock(locks, lock, MUTEX);
return _mutex_lock_nodebug(lock);
}
// attempting to relock in try_lock is OK
int result = _mutex_try_lock_nodebug(lock);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (result) {
- setLock(locks, lock, MUTEX);
- }
+ if (result) {
+ setLock(locks, lock, MUTEX);
}
+
return result;
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, MUTEX)) {
- _objc_fatal("unlocking unowned mutex %s\n", name+1);
- }
- clearLock(locks, lock, MUTEX);
+ if (!hasLock(locks, lock, MUTEX)) {
+ _objc_fatal("unlocking unowned mutex %s\n", name+1);
}
+ clearLock(locks, lock, MUTEX);
return _mutex_unlock_nodebug(lock);
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, MUTEX)) {
- _objc_fatal("mutex %s incorrectly not held\n",name+1);
- }
+ if (!hasLock(locks, lock, MUTEX)) {
+ _objc_fatal("mutex %s incorrectly not held\n",name+1);
}
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, MUTEX)) {
- _objc_fatal("mutex %s incorrectly held\n", name+1);
- }
+ if (hasLock(locks, lock, MUTEX)) {
+ _objc_fatal("mutex %s incorrectly held\n", name+1);
}
}
{
_objc_lock_list *locks = getLocks(YES);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- setLock(locks, lock, RECURSIVE);
- }
+ setLock(locks, lock, RECURSIVE);
return _recursive_mutex_lock_nodebug(lock);
}
int result = _recursive_mutex_try_lock_nodebug(lock);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (result) {
- setLock(locks, lock, RECURSIVE);
- }
+ if (result) {
+ setLock(locks, lock, RECURSIVE);
}
+
return result;
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, RECURSIVE)) {
- _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
- }
- clearLock(locks, lock, RECURSIVE);
+ if (!hasLock(locks, lock, RECURSIVE)) {
+ _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
}
+ clearLock(locks, lock, RECURSIVE);
return _recursive_mutex_unlock_nodebug(lock);
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, RECURSIVE)) {
- _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
- }
+ if (!hasLock(locks, lock, RECURSIVE)) {
+ _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
}
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, RECURSIVE)) {
- _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
- }
+ if (hasLock(locks, lock, RECURSIVE)) {
+ _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
}
}
{
_objc_lock_list *locks = getLocks(YES);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, MONITOR)) {
- _objc_fatal("deadlock: relocking monitor %s\n", name+1);
- }
- setLock(locks, lock, MONITOR);
+ if (hasLock(locks, lock, MONITOR)) {
+ _objc_fatal("deadlock: relocking monitor %s\n", name+1);
}
+ setLock(locks, lock, MONITOR);
return _monitor_enter_nodebug(lock);
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, MONITOR)) {
- _objc_fatal("unlocking unowned monitor%s\n", name+1);
- }
- clearLock(locks, lock, MONITOR);
+ if (!hasLock(locks, lock, MONITOR)) {
+ _objc_fatal("unlocking unowned monitor%s\n", name+1);
}
+ clearLock(locks, lock, MONITOR);
return _monitor_exit_nodebug(lock);
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, MONITOR)) {
- _objc_fatal("waiting in unowned monitor%s\n", name+1);
- }
+ if (!hasLock(locks, lock, MONITOR)) {
+ _objc_fatal("waiting in unowned monitor%s\n", name+1);
}
return _monitor_wait_nodebug(lock);
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, MONITOR)) {
- _objc_fatal("monitor %s incorrectly not held\n",name+1);
- }
+ if (!hasLock(locks, lock, MONITOR)) {
+ _objc_fatal("monitor %s incorrectly not held\n",name+1);
}
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, MONITOR)) {
- _objc_fatal("monitor %s incorrectly held\n", name+1);
- }
+ if (hasLock(locks, lock, MONITOR)) {
+ _objc_fatal("monitor %s incorrectly held\n", name+1);
}
}
{
_objc_lock_list *locks = getLocks(YES);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, RDLOCK)) {
- // Recursive rwlock read is bad (may deadlock vs pending writer)
- _objc_fatal("recursive rwlock read %s\n", name+1);
- }
- if (hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
- }
- setLock(locks, lock, RDLOCK);
+ if (hasLock(locks, lock, RDLOCK)) {
+ // Recursive rwlock read is bad (may deadlock vs pending writer)
+ _objc_fatal("recursive rwlock read %s\n", name+1);
}
+ if (hasLock(locks, lock, WRLOCK)) {
+ _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
+ }
+ setLock(locks, lock, RDLOCK);
_rwlock_read_nodebug(lock);
}
// try-read when already writing is OK (will fail)
int result = _rwlock_try_read_nodebug(lock);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (result) {
- setLock(locks, lock, RDLOCK);
- }
+ if (result) {
+ setLock(locks, lock, RDLOCK);
}
+
return result;
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, RDLOCK)) {
- _objc_fatal("un-reading unowned rwlock %s\n", name+1);
- }
- clearLock(locks, lock, RDLOCK);
+ if (!hasLock(locks, lock, RDLOCK)) {
+ _objc_fatal("un-reading unowned rwlock %s\n", name+1);
}
+ clearLock(locks, lock, RDLOCK);
_rwlock_unlock_read_nodebug(lock);
}
{
_objc_lock_list *locks = getLocks(YES);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, RDLOCK)) {
- // Lock promotion not allowed (may deadlock)
- _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
- }
- if (hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("recursive rwlock write %s\n", name+1);
- }
- setLock(locks, lock, WRLOCK);
+ if (hasLock(locks, lock, RDLOCK)) {
+ // Lock promotion not allowed (may deadlock)
+ _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
+ }
+ if (hasLock(locks, lock, WRLOCK)) {
+ _objc_fatal("recursive rwlock write %s\n", name+1);
}
+ setLock(locks, lock, WRLOCK);
_rwlock_write_nodebug(lock);
}
// try-write when already writing is OK (will fail)
int result = _rwlock_try_write_nodebug(lock);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (result) {
- setLock(locks, lock, WRLOCK);
- }
+ if (result) {
+ setLock(locks, lock, WRLOCK);
}
+
return result;
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("un-writing unowned rwlock %s\n", name+1);
- }
- clearLock(locks, lock, WRLOCK);
+ if (!hasLock(locks, lock, WRLOCK)) {
+ _objc_fatal("un-writing unowned rwlock %s\n", name+1);
}
+ clearLock(locks, lock, WRLOCK);
_rwlock_unlock_write_nodebug(lock);
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, RDLOCK)) {
- _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
- }
+ if (!hasLock(locks, lock, RDLOCK)) {
+ _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
}
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
- }
+ if (!hasLock(locks, lock, WRLOCK)) {
+ _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
}
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
- name+1);
- }
+ if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
+ _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
+ name+1);
}
}
{
_objc_lock_list *locks = getLocks(NO);
- if (! (DebuggerMode && isManagedDuringDebugger(lock))) {
- if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);
- }
+ if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
+ _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);
}
}
Management of optimizations in the dyld shared cache
*/
-#include "objc.h"
#include "objc-private.h"
using namespace objc_opt;
const objc_selopt_t *preoptimizedSelectors(void)
{
- return NULL;
+ return nil;
}
-struct class_t * getPreoptimizedClass(const char *name)
+Class getPreoptimizedClass(const char *name)
{
- return NULL;
+ return nil;
}
header_info *preoptimizedHinfoForHeader(const headerType *mhdr)
{
- return NULL;
+ return nil;
}
void preopt_init(void)
// _objc_opt_data: opt data possibly written by dyld
// empty_opt_data: empty data to use if dyld didn't cooperate or DisablePreopt
-static const objc_opt_t *opt = NULL;
+static const objc_opt_t *opt = nil;
static bool preoptimized;
extern const objc_opt_t _objc_opt_data; // in __TEXT, __objc_opt_ro
return opt->selopt();
}
-struct class_t * getPreoptimizedClass(const char *name)
+Class getPreoptimizedClass(const char *name)
{
assert(opt);
objc_clsopt_t *classes = opt->clsopt();
- if (!classes) return NULL;
+ if (!classes) return nil;
void *cls;
void *hi;
uint32_t count = classes->getClassAndHeader(name, cls, hi);
if (count == 1 && ((header_info *)hi)->loaded) {
// exactly one matching class, and it's image is loaded
- return (struct class_t *)cls;
+ return (Class)cls;
}
- if (count == 2) {
+ else if (count > 1) {
// more than one matching class - find one that is loaded
void *clslist[count];
void *hilist[count];
classes->getClassesAndHeaders(name, clslist, hilist);
for (uint32_t i = 0; i < count; i++) {
if (((header_info *)hilist[i])->loaded) {
- return (struct class_t *)clslist[i];
+ return (Class)clslist[i];
}
}
}
// no match that is loaded
- return NULL;
+ return nil;
}
namespace objc_opt {
}
#endif
- return NULL;
+ return nil;
}
};
};
assert(opt);
objc_headeropt_t *hinfos = opt->headeropt();
if (hinfos) return hinfos->get(mhdr);
- else return NULL;
+ else return nil;
}
void preopt_init(void)
{
// `opt` not set at compile time in order to detect too-early usage
- const char *failure = NULL;
+ const char *failure = nil;
opt = &_objc_opt_data;
if (DisablePreopt) {
#define _OBJC_OS_H
#include <TargetConditionals.h>
+#include "objc-config.h"
+
+#ifdef __LP64__
+# define WORD_SHIFT 3UL
+# define WORD_MASK 7UL
+#else
+# define WORD_SHIFT 2UL
+# define WORD_MASK 3UL
+#endif
#if TARGET_OS_MAC
# include <crt_externs.h>
# include <AssertMacros.h>
# undef check
-# include <AvailabilityMacros.h>
+# include <Availability.h>
# include <TargetConditionals.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/stat.h>
# include <sys/param.h>
# include <mach/mach.h>
+# include <mach/vm_param.h>
# include <mach-o/dyld.h>
# include <mach-o/ldsyms.h>
# include <mach-o/loader.h>
# include <mach-o/getsect.h>
# include <mach-o/dyld_priv.h>
# include <malloc/malloc.h>
+# include <os/lock_private.h>
# include <libkern/OSAtomic.h>
# include <libkern/OSCacheControl.h>
# include <System/pthread_machdep.h>
# include "objc-probes.h" // generated dtrace probe definitions.
+// Some libc functions call objc_msgSend()
+// so we can't use them without deadlocks.
+void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
+void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
-#if defined(__i386__) || defined(__x86_64__)
-// Inlined spinlock.
-// Not for arm on iOS because it hurts uniprocessor performance.
-
-#define ARR_SPINLOCK_INIT 0
-// XXX -- Careful: OSSpinLock isn't volatile, but should be
-typedef volatile int ARRSpinLock;
-__attribute__((always_inline))
-static inline void ARRSpinLockLock(ARRSpinLock *l)
-{
- unsigned y;
-again:
- if (__builtin_expect(__sync_lock_test_and_set(l, 1), 0) == 0) {
- return;
- }
- for (y = 1000; y; y--) {
-#if defined(__i386__) || defined(__x86_64__)
- asm("pause");
-#endif
- if (*l == 0) goto again;
- }
- thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
- goto again;
-}
-__attribute__((always_inline))
-static inline void ARRSpinLockUnlock(ARRSpinLock *l)
-{
- __sync_lock_release(l);
-}
-__attribute__((always_inline))
-static inline int ARRSpinLockTry(ARRSpinLock *l)
-{
- return __sync_bool_compare_and_swap(l, 0, 1);
-}
-
-#define OSSpinLock ARRSpinLock
-#define OSSpinLockTry(l) ARRSpinLockTry(l)
-#define OSSpinLockLock(l) ARRSpinLockLock(l)
-#define OSSpinLockUnlock(l) ARRSpinLockUnlock(l)
-#undef OS_SPINLOCK_INIT
-#define OS_SPINLOCK_INIT ARR_SPINLOCK_INIT
-
-#endif
+#define spinlock_t os_lock_handoff_s
+#define spinlock_trylock(l) os_lock_trylock(l)
+#define spinlock_lock(l) os_lock_lock(l)
+#define spinlock_unlock(l) os_lock_unlock(l)
+#define SPINLOCK_INITIALIZER OS_LOCK_HANDOFF_INIT
#if !TARGET_OS_IPHONE
__END_DECLS
#endif
-#if TARGET_IPHONE_SIMULATOR
- // getsectiondata() and getsegmentdata() are unavailable
- __BEGIN_DECLS
-# define getsectiondata(m, s, n, c) objc_getsectiondata(m, s, n, c)
-# define getsegmentdata(m, s, c) objc_getsegmentdata(m, s, c)
- extern uint8_t *objc_getsectiondata(const struct mach_header *mh, const char *segname, const char *sectname, unsigned long *outSize);
- extern uint8_t * objc_getsegmentdata(const struct mach_header *mh, const char *segname, unsigned long *outSize);
- __END_DECLS
-#endif
-
# if __cplusplus
# include <vector>
# include <algorithm>
# include <string.h>
# include <assert.h>
# include <malloc.h>
-# include <AvailabilityMacros.h>
+# include <Availability.h>
# if __cplusplus
# include <vector>
}
-typedef mutex_t OSSpinLock;
-#define OSSpinLockLock(l) mutex_lock(l)
-#define OSSpinLockUnlock(l) mutex_unlock(l)
-#define OS_SPINLOCK_INIT MUTEX_INITIALIZER
+typedef mutex_t spinlock_t;
+#define spinlock_lock(l) mutex_lock(l)
+#define spinlock_unlock(l) mutex_unlock(l)
+#define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
typedef struct {
typedef pthread_mutex_t mutex_t;
#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
-extern int DebuggerMode;
-extern void gdb_objc_debuggerModeFailure(void);
-extern BOOL isManagedDuringDebugger(void *lock);
-extern BOOL isLockedDuringDebugger(void *lock);
-
static inline int _mutex_lock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger(m)) {
- gdb_objc_debuggerModeFailure();
- }
- return 0;
- }
return pthread_mutex_lock(m);
}
static inline bool _mutex_try_lock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger(m)) {
- gdb_objc_debuggerModeFailure();
- }
- return true;
- }
return !pthread_mutex_trylock(m);
}
static inline int _mutex_unlock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- return 0;
- }
return pthread_mutex_unlock(m);
}
static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger((mutex_t *)m)) {
- gdb_objc_debuggerModeFailure();
- }
- return 0;
- }
return pthread_mutex_lock(m->mutex);
}
static inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger((mutex_t *)m)) {
- gdb_objc_debuggerModeFailure();
- }
- return true;
- }
return !pthread_mutex_trylock(m->mutex);
}
static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- return 0;
- }
return pthread_mutex_unlock(m->mutex);
}
return 0;
}
static inline int _monitor_enter_nodebug(monitor_t *c) {
- assert(!isManagedDuringDebugger(c));
return pthread_mutex_lock(&c->mutex);
}
static inline int _monitor_exit_nodebug(monitor_t *c) {
pthread_rwlock_t rwl;
} rwlock_t;
-extern BOOL isReadingDuringDebugger(rwlock_t *lock);
-extern BOOL isWritingDuringDebugger(rwlock_t *lock);
-
static inline void rwlock_init(rwlock_t *l)
{
int err __unused = pthread_rwlock_init(&l->rwl, NULL);
static inline void _rwlock_read_nodebug(rwlock_t *l)
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isReadingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return;
- }
int err __unused = pthread_rwlock_rdlock(&l->rwl);
assert(err == 0);
}
static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- return;
- }
int err __unused = pthread_rwlock_unlock(&l->rwl);
assert(err == 0);
}
static inline bool _rwlock_try_read_nodebug(rwlock_t *l)
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isReadingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return true;
- }
int err = pthread_rwlock_tryrdlock(&l->rwl);
assert(err == 0 || err == EBUSY);
return (err == 0);
static inline void _rwlock_write_nodebug(rwlock_t *l)
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isWritingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return;
- }
int err __unused = pthread_rwlock_wrlock(&l->rwl);
assert(err == 0);
}
static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- return;
- }
int err __unused = pthread_rwlock_unlock(&l->rwl);
assert(err == 0);
}
static inline bool _rwlock_try_write_nodebug(rwlock_t *l)
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isWritingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return true;
- }
int err = pthread_rwlock_trywrlock(&l->rwl);
assert(err == 0 || err == EBUSY);
return (err == 0);
* OS portability layer.
**********************************************************************/
-#include "objc-os.h"
#include "objc-private.h"
#include "objc-loadmethod.h"
// TARGET_OS_WIN32
#elif TARGET_OS_MAC
-#if !__OBJC2__
#include "objc-file-old.h"
-#endif
+#include "objc-file.h"
void mutex_init(mutex_t *m)
{
const char *_gcForHInfo(const header_info *hinfo)
{
if (_objcHeaderRequiresGC(hinfo)) {
- if (_objcHeaderSupportsCompaction(hinfo))
- return "requires GC, supports compaction";
- else
- return "requires GC";
+ return "requires GC";
} else if (_objcHeaderSupportsGC(hinfo)) {
- if (_objcHeaderSupportsCompaction(hinfo))
- return "supports GC, supports compaction";
- else
- return "supports GC";
+ return "supports GC";
} else {
return "does not support GC";
}
const char *_gcForHInfo2(const header_info *hinfo)
{
if (_objcHeaderRequiresGC(hinfo)) {
- if (_objcHeaderSupportsCompaction(hinfo))
- return "(requires GC) (supports compaction)";
- else
- return "(requires GC)";
+ return "(requires GC)";
} else if (_objcHeaderSupportsGC(hinfo)) {
- if (_objcHeaderSupportsCompaction(hinfo))
- return "(supports GC) (supports compaction)";
- else
- return "(supports GC)";
+ return "(supports GC)";
}
return "";
}
+/***********************************************************************
+* linksToLibrary
+* Returns true if the image links directly to a dylib whose install name
+* is exactly the given name.
+**********************************************************************/
+bool
+linksToLibrary(const header_info *hi, const char *name)
+{
+ const struct dylib_command *cmd;
+ unsigned long i;
+
+ cmd = (const struct dylib_command *) (hi->mhdr + 1);
+ for (i = 0; i < hi->mhdr->ncmds; i++) {
+ if (cmd->cmd == LC_LOAD_DYLIB || cmd->cmd == LC_LOAD_UPWARD_DYLIB ||
+ cmd->cmd == LC_LOAD_WEAK_DYLIB || cmd->cmd == LC_REEXPORT_DYLIB)
+ {
+ const char *dylib = cmd->dylib.name.offset + (const char *)cmd;
+ if (0 == strcmp(dylib, name)) return true;
+ }
+ cmd = (const struct dylib_command *)((char *)cmd + cmd->cmdsize);
+ }
+
+ return false;
+}
+
+
/***********************************************************************
* check_gc
* Check whether the executable supports or requires GC, and make sure
* all already-loaded libraries support the executable's GC mode.
* Returns TRUE if the executable wants GC on.
**********************************************************************/
-static void check_wants_gc(BOOL *appWantsGC, BOOL *appSupportsCompaction)
+static void check_wants_gc(BOOL *appWantsGC)
{
const header_info *hi;
if (DisableGC) {
_objc_inform_on_crash("GC: forcing GC OFF because OBJC_DISABLE_GC is set");
*appWantsGC = NO;
- *appSupportsCompaction = NO;
}
else {
// Find the executable and check its GC bits.
// (The executable will not be found if the executable contains
// no Objective-C code.)
*appWantsGC = NO;
- *appSupportsCompaction = NO;
for (hi = FirstHeader; hi != NULL; hi = hi->next) {
if (hi->mhdr->filetype == MH_EXECUTE) {
*appWantsGC = _objcHeaderSupportsGC(hi) ? YES : NO;
- *appSupportsCompaction = (*appWantsGC && _objcHeaderSupportsCompaction(hi)) ? YES : NO;
+
if (PrintGC) {
_objc_inform("GC: executable '%s' %s",
hi->fname, _gcForHInfo(hi));
}
+
+ if (*appWantsGC) {
+ // Exception: AppleScriptObjC apps run without GC in 10.9+
+ // 1. executable defines no classes
+ // 2. executable references NSBundle only
+ // 3. executable links to AppleScriptObjC.framework
+ size_t classcount = 0;
+ size_t refcount = 0;
+#if __OBJC2__
+ _getObjc2ClassList(hi, &classcount);
+ _getObjc2ClassRefs(hi, &refcount);
+#else
+ if (hi->mod_count == 0 || (hi->mod_count == 1 && !hi->mod_ptr[0].symtab)) classcount = 0;
+ else classcount = 1;
+ _getObjcClassRefs(hi, &refcount);
+#endif
+ if (classcount == 0 && refcount == 1 &&
+ linksToLibrary(hi, "/System/Library/Frameworks"
+ "/AppleScriptObjC.framework/Versions/A"
+ "/AppleScriptObjC"))
+ {
+ *appWantsGC = NO;
+ if (PrintGC) {
+ _objc_inform("GC: forcing GC OFF because this is "
+ "a trivial AppleScriptObjC app");
+ }
+ }
+ }
}
}
}
* if we want gc, verify that every header describes files compiled
* and presumably ready for gc.
************************************************************************/
-static void verify_gc_readiness(BOOL wantsGC, BOOL *wantsCompaction,
+static void verify_gc_readiness(BOOL wantsGC,
header_info **hList, uint32_t hCount)
{
BOOL busted = NO;
busted = YES;
}
- if (*wantsCompaction && !_objcHeaderSupportsCompaction(hi)) {
- // App supports compaction, but library doesn't.
- _objc_inform_now_and_on_crash
- ("'%s' was not linked with -Xlinker -objc_gc_compaction, "
- "but the application wants compaction.",
- hi->fname);
- // Simply warn for now until radars are filed. Eventually,
- // objc_disableCompaction() will block until any current compaction completes.
- objc_disableCompaction();
- *wantsCompaction = NO;
- }
-
if (PrintGC) {
_objc_inform("GC: library '%s' %s",
hi->fname, _gcForHInfo(hi));
{
static BOOL firstTime = YES;
static BOOL wantsGC = NO;
- static BOOL wantsCompaction = NO;
uint32_t i;
header_info *hi;
header_info *hList[infoCount];
// will do the right thing.)
#if SUPPORT_GC
if (firstTime) {
- check_wants_gc(&wantsGC, &wantsCompaction);
+ check_wants_gc(&wantsGC);
- verify_gc_readiness(wantsGC, &wantsCompaction, hList, hCount);
+ verify_gc_readiness(wantsGC, hList, hCount);
- gc_init(wantsGC, wantsCompaction); // needs executable for GC decision
+ gc_init(wantsGC); // needs executable for GC decision
} else {
- verify_gc_readiness(wantsGC, &wantsCompaction, hList, hCount);
+ verify_gc_readiness(wantsGC, hList, hCount);
}
if (wantsGC) {
// used as associated reference values (rdar://6953570)
}
}
-
- // Need to fixup barriers in all libraries that call into libobjc, whether GC is on or not.
- for (i = 0; i < infoCount; ++i) {
- gc_fixup_barrier_stubs(&infoList[i]);
- }
#endif
if (firstTime) {
#ifndef _OBJC_PRIVATE_H_
#define _OBJC_PRIVATE_H_
-#include "objc-os.h"
+#include "objc-config.h"
+
+/* Isolate ourselves from the definitions of id and Class in the compiler
+ * and public headers.
+ */
+
+#ifdef _OBJC_OBJC_H_
+#error include objc-private.h before other headers
+#endif
+
+#define OBJC_TYPES_DEFINED 1
+#define OBJC_OLD_DISPATCH_PROTOTYPES 0
+
+#include <cstddef> // for nullptr_t
+#include <stdint.h>
+#include <assert.h>
+
+struct objc_class;
+struct objc_object;
+
+typedef struct objc_class *Class;
+typedef struct objc_object *id;
+
+#if SUPPORT_TAGGED_POINTERS
+
+#define TAG_COUNT 8
+#define TAG_MASK 1
+#define TAG_SLOT_SHIFT 0
+#define TAG_SLOT_MASK 0xf
+#define TAG_PAYLOAD_LSHIFT 0
+#define TAG_PAYLOAD_RSHIFT 4
+
+extern "C" { extern Class objc_debug_taggedpointer_classes[TAG_COUNT*2]; }
+#define objc_tag_classes objc_debug_taggedpointer_classes
+
+#endif
+
+
+struct objc_object {
+private:
+ uintptr_t isa;
+
+public:
+
+ // ISA() assumes this is NOT a tagged pointer object
+ Class ISA()
+ {
+ assert(!isTaggedPointer());
+ return (Class)isa;
+ }
+
+ // getIsa() allows this to be a tagged pointer object
+ Class getIsa()
+ {
+#if SUPPORT_TAGGED_POINTERS
+ if (isTaggedPointer()) {
+ uintptr_t slot =
+ ((uintptr_t)this >> TAG_SLOT_SHIFT) & TAG_SLOT_MASK;
+ return objc_tag_classes[slot];
+ }
+#endif
+ return ISA();
+ }
+
+ // changeIsa() should be used to change the isa of existing objects.
+ // If this is a new object, use initIsa() for performance.
+ Class changeIsa(Class cls);
+
+ // initIsa() should be used to init the isa of new objects only.
+ // If this object already has an isa, use changeIsa() for correctness.
+ void initIsa(Class cls)
+ {
+ assert(!isTaggedPointer());
+ isa = (uintptr_t)cls;
+ }
+
+ bool isTaggedPointer()
+ {
+#if SUPPORT_TAGGED_POINTERS
+ return ((uintptr_t)this & TAG_MASK);
+#else
+ return false;
+#endif
+ }
+};
+
+#if __OBJC2__
+typedef struct method_t *Method;
+typedef struct ivar_t *Ivar;
+typedef struct category_t *Category;
+typedef struct property_t *objc_property_t;
+#else
+typedef struct old_method *Method;
+typedef struct old_ivar *Ivar;
+typedef struct old_category *Category;
+typedef struct old_property *objc_property_t;
+#endif
#include "objc.h"
#include "runtime.h"
+#include "objc-os.h"
+
+#if __OBJC2__
+#include "objc-runtime-new.h"
+#else
+#include "objc-runtime-old.h"
+#endif
+
#include "maptable.h"
#include "hashtable2.h"
#include "objc-api.h"
__BEGIN_DECLS
-#ifdef __LP64__
-# define WORD_SHIFT 3UL
-# define WORD_MASK 7UL
-#else
-# define WORD_SHIFT 2UL
-# define WORD_MASK 3UL
-#endif
+inline Class objc_object::changeIsa(Class cls)
+{
+ assert(!isTaggedPointer());
+
+ Class old;
+ do {
+ old = (Class)isa;
+ } while (!OSAtomicCompareAndSwapPtr(old, cls, (void**)&isa));
+
+ if (old && old->instancesHaveAssociatedObjects()) {
+ cls->setInstancesHaveAssociatedObjects();
+ }
+
+ return old;
+}
+
#if (defined(OBJC_NO_GC) && SUPPORT_GC) || \
(!defined(OBJC_NO_GC) && !SUPPORT_GC)
#if SUPPORT_GC
# include <auto_zone.h>
- // PRIVATE_EXTERN is needed to help the compiler know "how" extern these are
- PRIVATE_EXTERN extern BOOL UseGC; // equivalent to calling objc_collecting_enabled()
- PRIVATE_EXTERN extern BOOL UseCompaction; // if binary has opted-in for compaction.
+ // PRIVATE_EXTERN is needed to help the compiler know "how" extern these are
+ PRIVATE_EXTERN extern int8_t UseGC; // equivalent to calling objc_collecting_enabled()
PRIVATE_EXTERN extern auto_zone_t *gc_zone; // the GC zone, or NULL if no GC
extern void objc_addRegisteredClass(Class c);
extern void objc_removeRegisteredClass(Class c);
- extern void objc_disableCompaction();
#else
# define UseGC NO
-# define UseCompaction NO
# define gc_zone NULL
# define objc_addRegisteredClass(c) do {} while(0)
# define objc_removeRegisteredClass(c) do {} while(0)
# define AUTO_OBJECT_SCANNED 0
#endif
-#if __OBJC2__
-typedef struct objc_cache *Cache;
-#else
-// definition in runtime.h
-#endif
-
typedef struct {
uint32_t version; // currently 0
#define OBJC_IMAGE_SUPPORTS_GC (1<<1)
#define OBJC_IMAGE_REQUIRES_GC (1<<2)
#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3)
-#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4)
+#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4) // might be re-assignable
#define _objcHeaderIsReplacement(h) ((h)->info && ((h)->info->flags & OBJC_IMAGE_IS_REPLACEMENT))
Do fix up selector refs (@selector points to them)
Do fix up class refs (@class and objc_msgSend points to them)
Do fix up protocols (@protocol points to them)
- Do fix up super_class pointers in classes ([super ...] points to them)
+ Do fix up superclass pointers in classes ([super ...] points to them)
Future: do load new classes?
Future: do load new categories?
Future: do insert new methods on existing classes?
#define _objcInfoSupportsGC(info) (((info)->flags & OBJC_IMAGE_SUPPORTS_GC) ? 1 : 0)
#define _objcInfoRequiresGC(info) (((info)->flags & OBJC_IMAGE_REQUIRES_GC) ? 1 : 0)
-#define _objcInfoSupportsCompaction(info) (((info)->flags & OBJC_IMAGE_SUPPORTS_COMPACTION) ? 1 : 0)
#define _objcHeaderSupportsGC(h) ((h)->info && _objcInfoSupportsGC((h)->info))
#define _objcHeaderRequiresGC(h) ((h)->info && _objcInfoRequiresGC((h)->info))
-#define _objcHeaderSupportsCompaction(h) ((h)->info && _objcInfoSupportsCompaction((h)->info))
/* OBJC_IMAGE_SUPPORTS_GC:
was compiled with -fobjc-gc flag, regardless of whether write-barriers were issued
extern void sel_lock(void);
extern void sel_unlock(void);
extern BOOL sel_preoptimizationValid(const header_info *hi);
+extern void sel_nuke_nolock(void);
extern SEL SEL_load;
extern SEL SEL_initialize;
#if __cplusplus
namespace objc_opt { struct objc_selopt_t; };
extern const struct objc_opt::objc_selopt_t *preoptimizedSelectors(void);
-extern struct class_t * getPreoptimizedClass(const char *name);
+extern Class getPreoptimizedClass(const char *name);
#endif
extern Class _calloc_class(size_t size);
-extern IMP lookUpMethod(Class, SEL, BOOL initialize, BOOL cache, id obj);
-extern void lockForMethodLookup(void);
-extern void unlockForMethodLookup(void);
-extern IMP prepareForMethodLookup(Class cls, SEL sel, BOOL initialize, id obj);
+/* method lookup */
+extern IMP lookUpImpOrNil(Class, SEL, id obj, bool initialize, bool cache, bool resolver);
+extern IMP lookUpImpOrForward(Class, SEL, id obj, bool initialize, bool cache, bool resolver);
+
+extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
-extern IMP _cache_getImp(Class cls, SEL sel);
-extern Method _cache_getMethod(Class cls, SEL sel, IMP objc_msgForward_internal_imp);
+extern bool objcMsgLogEnabled;
+extern bool logMessageSend(bool isClassMethod,
+ const char *objectsClass,
+ const char *implementingClass,
+ SEL selector);
/* message dispatcher */
extern IMP _class_lookupMethodAndLoadCache3(id, SEL, Class);
#if !OBJC_OLD_DISPATCH_PROTOTYPES
-extern void _objc_msgForward_internal(void);
+extern void _objc_msgForward_impcache(void);
extern void _objc_ignored_method(void);
+extern void _objc_msgSend_uncached_impcache(void);
#else
-extern id _objc_msgForward_internal(id, SEL, ...);
+extern id _objc_msgForward_impcache(id, SEL, ...);
extern id _objc_ignored_method(id, SEL, ...);
+extern id _objc_msgSend_uncached_impcache(id, SEL, ...);
#endif
/* errors */
extern objc_property_attribute_t *copyPropertyAttributeList(const char *attrs, unsigned int *outCount);
extern char *copyPropertyAttributeValue(const char *attrs, const char *name);
-
/* locking */
-/* Every lock used anywhere must be declared here.
- * Locks not declared here may cause gdb deadlocks. */
extern void lock_init(void);
extern rwlock_t selLock;
extern mutex_t cacheUpdateLock;
extern mutex_t methodListLock;
#endif
-/* Debugger mode for gdb */
-#define DEBUGGER_OFF 0
-#define DEBUGGER_PARTIAL 1
-#define DEBUGGER_FULL 2
-extern int startDebuggerMode(void);
-extern void endDebuggerMode(void);
-
+/* Lock debugging */
#if defined(NDEBUG) || TARGET_OS_WIN32
#define mutex_lock(m) _mutex_lock_nodebug(m)
#endif
-extern bool noSideTableLocksHeld(void);
-
#define rwlock_unlock(m, s) \
do { \
if ((s) == RDONLY) rwlock_unlock_read(m); \
} while (0)
-extern NXHashTable *class_hash;
-
#if !TARGET_OS_WIN32
/* nil handler object */
extern id _objc_nilReceiver;
extern void *_objc_forward_handler;
extern void *_objc_forward_stret_handler;
-/* tagged pointer support */
-#if SUPPORT_TAGGED_POINTERS
-
-#define OBJC_IS_TAGGED_PTR(PTR) ((uintptr_t)(PTR) & 0x1)
-extern Class _objc_tagged_isa_table[16];
-
-#else
-
-#define OBJC_IS_TAGGED_PTR(PTR) 0
-
-#endif
-
/* ignored selector support */
/* Non-GC: no ignored selectors
- GC without fixup dispatch: some selectors ignored, remapped to kIgnore
- GC with fixup dispatch: some selectors ignored, but not remapped
+ GC (i386 Mac): some selectors ignored, remapped to kIgnore
+ GC (others): some selectors ignored, but not remapped
*/
static inline int ignoreSelector(SEL sel)
#endif
}
-/* Protocol implementation */
-#if !__OBJC2__
-struct old_protocol;
-struct objc_method_description * lookup_protocol_method(struct old_protocol *proto, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod, BOOL recursive);
-#else
-Method _protocol_getMethod(Protocol *p, SEL sel, BOOL isRequiredMethod, BOOL isInstanceMethod, BOOL recursive);
-#endif
-
/* GC startup */
-extern void gc_init(BOOL wantsGC, BOOL wantsCompaction);
+extern void gc_init(BOOL wantsGC);
extern void gc_init2(void);
/* Exceptions */
extern void gdb_objc_class_changed(Class cls, unsigned long changes, const char *classname)
__attribute__((noinline));
-/* Write barrier implementations */
-extern id objc_assign_strongCast_non_gc(id value, id *dest);
-extern id objc_assign_global_non_gc(id value, id *dest);
-extern id objc_assign_threadlocal_non_gc(id value, id *dest);
-extern id objc_assign_ivar_non_gc(id value, id dest, ptrdiff_t offset);
-extern id objc_assign_strongCast_gc(id val, id *dest);
-extern id objc_assign_global_gc(id val, id *dest);
-extern id objc_assign_threadlocal_gc(id val, id *dest);
-extern id objc_assign_ivar_gc(id value, id dest, ptrdiff_t offset);
+#if SUPPORT_GC
+/* Write barrier implementations */
extern id objc_getAssociatedObject_non_gc(id object, const void *key);
extern void objc_setAssociatedObject_non_gc(id object, const void *key, id value, objc_AssociationPolicy policy);
+
extern id objc_getAssociatedObject_gc(id object, const void *key);
extern void objc_setAssociatedObject_gc(id object, const void *key, id value, objc_AssociationPolicy policy);
-#if SUPPORT_GC
+/* xrefs */
+extern objc_xref_t _object_addExternalReference_non_gc(id obj, objc_xref_t type);
+extern id _object_readExternalReference_non_gc(objc_xref_t ref);
+extern void _object_removeExternalReference_non_gc(objc_xref_t ref);
+
+extern objc_xref_t _object_addExternalReference_gc(id obj, objc_xref_t type);
+extern id _object_readExternalReference_gc(objc_xref_t ref);
+extern void _object_removeExternalReference_gc(objc_xref_t ref);
/* GC weak reference fixup. */
extern void gc_fixup_weakreferences(id newObject, id oldObject);
/* GC datasegment registration. */
extern void gc_register_datasegment(uintptr_t base, size_t size);
extern void gc_unregister_datasegment(uintptr_t base, size_t size);
-extern void gc_fixup_barrier_stubs(const struct dyld_image_info *info);
/* objc_dumpHeap implementation */
extern BOOL _objc_dumpHeap(auto_zone_t *zone, const char *filename);
-/*
- objc_assign_ivar, objc_assign_global, objc_assign_threadlocal, and objc_assign_strongCast MUST NOT be called directly
- from inside libobjc. They live in the data segment, and must be called through the
- following pointer(s) for libobjc to exist in the shared cache.
-
- Note: If we build with GC enabled, gcc will emit calls to the original functions, which will break this.
-*/
-
-extern id (*objc_assign_ivar_internal)(id, id, ptrdiff_t);
-
-#endif
-
-/* Code modification */
-extern size_t objc_branch_size(void *entry, void *target);
-extern size_t objc_write_branch(void *entry, void *target);
-extern size_t objc_cond_branch_size(void *entry, void *target, unsigned cond);
-extern size_t objc_write_cond_branch(void *entry, void *target, unsigned cond);
-#if defined(__i386__) || defined(__x86_64__)
-#define COND_ALWAYS 0xE9 /* JMP rel32 */
-#define COND_NE 0x85 /* JNE rel32 (0F 85) */
#endif
// Settings from environment variables
-#if !SUPPORT_ENVIRON
-# define ENV(x) enum { x = 0 }
-#else
-# define ENV(x) extern int x
-#endif
-ENV(PrintImages); // env OBJC_PRINT_IMAGES
-ENV(PrintLoading); // env OBJC_PRINT_LOAD_METHODS
-ENV(PrintInitializing); // env OBJC_PRINT_INITIALIZE_METHODS
-ENV(PrintResolving); // env OBJC_PRINT_RESOLVED_METHODS
-ENV(PrintConnecting); // env OBJC_PRINT_CLASS_SETUP
-ENV(PrintProtocols); // env OBJC_PRINT_PROTOCOL_SETUP
-ENV(PrintIvars); // env OBJC_PRINT_IVAR_SETUP
-ENV(PrintVtables); // env OBJC_PRINT_VTABLE_SETUP
-ENV(PrintVtableImages); // env OBJC_PRINT_VTABLE_IMAGES
-ENV(PrintFuture); // env OBJC_PRINT_FUTURE_CLASSES
-ENV(PrintGC); // env OBJC_PRINT_GC
-ENV(PrintPreopt); // env OBJC_PRINT_PREOPTIMIZATION
-ENV(PrintCxxCtors); // env OBJC_PRINT_CXX_CTORS
-ENV(PrintExceptions); // env OBJC_PRINT_EXCEPTIONS
-ENV(PrintExceptionThrow); // env OBJC_PRINT_EXCEPTION_THROW
-ENV(PrintAltHandlers); // env OBJC_PRINT_ALT_HANDLERS
-ENV(PrintDeprecation); // env OBJC_PRINT_DEPRECATION_WARNINGS
-ENV(PrintReplacedMethods); // env OBJC_PRINT_REPLACED_METHODS
-ENV(PrintCaches); // env OBJC_PRINT_CACHE_SETUP
-ENV(PrintPoolHiwat); // env OBJC_PRINT_POOL_HIGHWATER
-ENV(PrintCustomRR); // env OBJC_PRINT_CUSTOM_RR
-ENV(PrintCustomAWZ); // env OBJC_PRINT_CUSTOM_AWZ
-ENV(UseInternalZone); // env OBJC_USE_INTERNAL_ZONE
-
-ENV(DebugUnload); // env OBJC_DEBUG_UNLOAD
-ENV(DebugFragileSuperclasses); // env OBJC_DEBUG_FRAGILE_SUPERCLASSES
-ENV(DebugFinalizers); // env OBJC_DEBUG_FINALIZERS
-ENV(DebugNilSync); // env OBJC_DEBUG_NIL_SYNC
-ENV(DebugNonFragileIvars); // env OBJC_DEBUG_NONFRAGILE_IVARS
-ENV(DebugAltHandlers); // env OBJC_DEBUG_ALT_HANDLERS
-
-ENV(DisableGC); // env OBJC_DISABLE_GC
-ENV(DisableVtables); // env OBJC_DISABLE_VTABLES
-ENV(DisablePreopt); // env OBJC_DISABLE_PREOPTIMIZATION
-
-#undef ENV
+#define OPTION(var, env, help) extern bool var;
+#include "objc-env.h"
+#undef OPTION
extern void environ_init(void);
extern _objc_pthread_data *_objc_fetch_pthread_data(BOOL create);
extern void tls_init(void);
-
-// cache.h
-#if TARGET_OS_WIN32
-
-#else
-static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); }
-#endif
-extern void flush_caches(Class cls, BOOL flush_meta);
-extern void flush_cache(Class cls);
-extern BOOL _cache_fill(Class cls, Method smt, SEL sel);
-extern void _cache_addForwardEntry(Class cls, SEL sel);
-extern IMP _cache_addIgnoredEntry(Class cls, SEL sel);
-extern void _cache_free(Cache cache);
-extern void _cache_collect(bool collectALot);
-
-extern mutex_t cacheUpdateLock;
-
// encoding.h
extern unsigned int encoding_getNumberOfArguments(const char *typedesc);
extern unsigned int encoding_getSizeOfArguments(const char *typedesc);
extern void _destroySyncCache(struct SyncCache *cache);
// arr
-extern void (^objc_arr_log)(const char *, id param);
extern void arr_init(void);
extern id objc_autoreleaseReturnValue(id obj);
// fixme runtime
-extern id look_up_class(const char *aClassName, BOOL includeUnconnected, BOOL includeClassHandler);
+extern Class look_up_class(const char *aClassName, BOOL includeUnconnected, BOOL includeClassHandler);
extern const char *map_images(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
extern const char *map_images_nolock(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
extern const char * load_images(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
extern const header_info *_headerForClass(Class cls);
-extern Class _class_getSuperclass(Class cls);
extern Class _class_remap(Class cls);
-extern BOOL _class_getInfo(Class cls, int info);
-extern const char *_class_getName(Class cls);
-extern size_t _class_getInstanceSize(Class cls);
-extern Class _class_getMeta(Class cls);
-extern BOOL _class_isMetaClass(Class cls);
-extern Cache _class_getCache(Class cls);
-extern void _class_setCache(Class cls, Cache cache);
-extern BOOL _class_isInitializing(Class cls);
-extern BOOL _class_isInitialized(Class cls);
-extern void _class_setInitializing(Class cls);
-extern void _class_setInitialized(Class cls);
extern Class _class_getNonMetaClass(Class cls, id obj);
-extern Method _class_getMethod(Class cls, SEL sel);
-extern Method _class_getMethodNoSuper(Class cls, SEL sel);
-extern Method _class_getMethodNoSuper_nolock(Class cls, SEL sel);
-extern BOOL _class_isLoadable(Class cls);
-extern IMP _class_getLoadMethod(Class cls);
-extern BOOL _class_hasLoadMethod(Class cls);
-extern BOOL _class_hasCxxStructors(Class cls);
-extern BOOL _class_shouldFinalizeOnMainThread(Class cls);
-extern void _class_setFinalizeOnMainThread(Class cls);
-extern BOOL _class_instancesHaveAssociatedObjects(Class cls);
-extern void _class_setInstancesHaveAssociatedObjects(Class cls);
-extern BOOL _class_shouldGrowCache(Class cls);
-extern void _class_setGrowCache(Class cls, BOOL grow);
extern Ivar _class_getVariable(Class cls, const char *name, Class *memberOf);
extern BOOL _class_usesAutomaticRetainRelease(Class cls);
extern uint32_t _class_getInstanceStart(Class cls);
extern BOOL object_cxxConstruct(id obj);
extern void object_cxxDestruct(id obj);
-extern Method _class_resolveMethod(Class cls, SEL sel);
-extern void log_and_fill_cache(Class cls, Class implementer, Method meth, SEL sel);
+extern void _class_resolveMethod(Class cls, SEL sel, id inst);
#define OBJC_WARN_DEPRECATED \
do { \
#endif
-/***********************************************************************
-* object_getClass.
-* Locking: None. If you add locking, tell gdb (rdar://7516456).
-**********************************************************************/
-static inline Class _object_getClass(id obj)
-{
-#if SUPPORT_TAGGED_POINTERS
- if (OBJC_IS_TAGGED_PTR(obj)) {
- uint8_t slotNumber = ((uint8_t) (uint64_t) obj) & 0x0F;
- Class isa = _objc_tagged_isa_table[slotNumber];
- return isa;
- }
-#endif
- if (obj) return obj->isa;
- else return Nil;
-}
-
-
// Global operator new and delete. We must not use any app overrides.
// This ALSO REQUIRES each of these be in libobjc's unexported symbol list.
#if __cplusplus
// lazily allocates it.
class AssociationsManager {
- static OSSpinLock _lock;
+ static spinlock_t _lock;
static AssociationsHashMap *_map; // associative references: object pointer -> PtrPtrHashMap.
public:
- AssociationsManager() { OSSpinLockLock(&_lock); }
- ~AssociationsManager() { OSSpinLockUnlock(&_lock); }
+ AssociationsManager() { spinlock_lock(&_lock); }
+ ~AssociationsManager() { spinlock_unlock(&_lock); }
AssociationsHashMap &associations() {
if (_map == NULL)
}
};
-OSSpinLock AssociationsManager::_lock = OS_SPINLOCK_INIT;
+spinlock_t AssociationsManager::_lock = SPINLOCK_INITIALIZER;
AssociationsHashMap *AssociationsManager::_map = NULL;
// expanded policy bits.
ObjectAssociationMap *refs = new ObjectAssociationMap;
associations[disguised_object] = refs;
(*refs)[key] = ObjcAssociation(policy, new_value);
- _class_setInstancesHaveAssociatedObjects(_object_getClass(object));
+ Class cls = object->getIsa();
+ cls->setInstancesHaveAssociatedObjects();
}
} else {
// setting the association to nil breaks the association.
__BEGIN_DECLS
+// SEL points to characters
+// struct objc_cache is stored in class object
+
+typedef uintptr_t cache_key_t;
+
+#if __LP64__
+ typedef uint32_t mask_t;
+# define MASK_SHIFT ((mask_t)0)
+#else
+ typedef uint16_t mask_t;
+# define MASK_SHIFT ((mask_t)0)
+#endif
+
+struct cache_t {
+ struct bucket_t *buckets;
+ mask_t shiftmask;
+ mask_t occupied;
+
+ mask_t mask() {
+ return shiftmask >> MASK_SHIFT;
+ }
+ mask_t capacity() {
+ return shiftmask ? (shiftmask >> MASK_SHIFT) + 1 : 0;
+ }
+ void setCapacity(uint32_t capacity) {
+ uint32_t newmask = (capacity - 1) << MASK_SHIFT;
+ assert(newmask == (uint32_t)(mask_t)newmask);
+ shiftmask = newmask;
+ }
+
+ void expand();
+ void reallocate(mask_t oldCapacity, mask_t newCapacity);
+ struct bucket_t * find(cache_key_t key);
+
+ static void bad_cache(id receiver, SEL sel, Class isa, bucket_t *bucket) __attribute__((noreturn));
+};
+
+
// We cannot store flags in the low bits of the 'data' field until we work with
// the 'leaks' team to not think that objc is leaking memory. See radar 8955342
// for more info.
// #define RO_REUSE_ME (1<<6)
// class compiled with -fobjc-arc (automatic retain/release)
#define RO_IS_ARR (1<<7)
+// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)
+#define RO_HAS_CXX_DTOR_ONLY (1<<8)
// class is in an unloadable bundle - must never be set by compiler
#define RO_FROM_BUNDLE (1<<29)
#define RW_SPECIALIZED_VTABLE (1<<22)
// class instances may have associative references
#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<21)
-// class or superclass has .cxx_construct/destruct implementations
-#define RW_HAS_CXX_STRUCTORS (1<<20)
+// class or superclass has .cxx_construct implementation
+#define RW_HAS_CXX_CTOR (1<<20)
+// class or superclass has .cxx_destruct implementation
+#define RW_HAS_CXX_DTOR (1<<19)
// class has instance-specific GC layout
-#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 19)
+#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 18)
// class's method list is an array of method lists
-#define RW_METHOD_ARRAY (1<<18)
-
-#if !CLASS_FAST_FLAGS_VIA_RW_DATA
+#define RW_METHOD_ARRAY (1<<17)
+// class or superclass has custom allocWithZone: implementation
+#define RW_HAS_CUSTOM_AWZ (1<<16)
+// class or superclass has custom retain/release/autorelease/retainCount
+#define RW_HAS_CUSTOM_RR (1<<15)
+
+// Flags may be stored in low bits of rw->data_NEVER_USE for fastest access
+#define CLASS_FAST_FLAG_MASK 3
+#if CLASS_FAST_FLAGS_VIA_RW_DATA
+ // reserved for future expansion
+# define CLASS_FAST_FLAG_RESERVED (1<<0)
// class or superclass has custom retain/release/autorelease/retainCount
-# define RW_HAS_CUSTOM_RR (1<<17)
- // class or superclass has custom allocWithZone: implementation
-# define RW_HAS_CUSTOM_AWZ (1<<16)
+# define CLASS_FAST_FLAG_HAS_CUSTOM_RR (1<<1)
+# undef RW_HAS_CUSTOM_RR
#endif
// classref_t is unremapped class_t*
};
};
-typedef struct method_list_t {
+struct method_list_t {
uint32_t entsize_NEVER_USE; // high bits used for fixup markers
uint32_t count;
method_t first;
uint32_t getCount() const {
return count;
}
- method_t& get(uint32_t i) const {
+ method_t& getOrEnd(uint32_t i) const {
+ assert(i <= count);
return *(method_t *)((uint8_t *)&first + i*getEntsize());
}
+ method_t& get(uint32_t i) const {
+ assert(i < count);
+ return getOrEnd(i);
+ }
// iterate methods, taking entsize into account
// fixme need a proper const_iterator
method_iterator(const method_list_t& mlist, uint32_t start = 0)
: entsize(mlist.getEntsize())
, index(start)
- , method(&mlist.get(start))
+ , method(&mlist.getOrEnd(start))
{ }
const method_iterator& operator += (ptrdiff_t delta) {
method_iterator begin() const { return method_iterator(*this, 0); }
method_iterator end() const { return method_iterator(*this, getCount()); }
-} method_list_t;
+};
-typedef struct ivar_t {
- // *offset is 64-bit by accident even though other
- // fields restrict total instance size to 32-bit.
- uintptr_t *offset;
+struct ivar_t {
+#if __x86_64__
+ // *offset was originally 64-bit on some x86_64 platforms.
+ // We read and write only 32 bits of it.
+ // Some metadata provides all 64 bits. This is harmless for unsigned
+ // little-endian values.
+ // Some code uses all 64 bits. class_addIvar() over-allocates the
+ // offset for their benefit.
+#endif
+ int32_t *offset;
const char *name;
const char *type;
- // alignment is sometimes -1; use ivar_alignment() instead
- uint32_t alignment __attribute__((deprecated));
+ // alignment is sometimes -1; use alignment() instead
+ uint32_t alignment_raw;
uint32_t size;
-} ivar_t;
-typedef struct ivar_list_t {
+ uint32_t alignment() {
+ if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
+ return 1 << alignment_raw;
+ }
+};
+
+struct ivar_list_t {
uint32_t entsize;
uint32_t count;
ivar_t first;
-} ivar_list_t;
+};
-typedef struct objc_property {
+struct property_t {
const char *name;
const char *attributes;
-} property_t;
+};
-typedef struct property_list_t {
+struct property_list_t {
uint32_t entsize;
uint32_t count;
property_t first;
-} property_list_t;
+};
typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
-typedef struct protocol_t {
- id isa;
+#define PROTOCOL_FIXED_UP (1<<31) // must never be set by compiler
+
+struct protocol_t : objc_object {
const char *name;
struct protocol_list_t *protocols;
method_list_t *instanceMethods;
uint32_t flags;
const char **extendedMethodTypes;
+ bool isFixedUp() const {
+ return flags & PROTOCOL_FIXED_UP;
+ }
+
bool hasExtendedMethodTypesField() const {
return size >= (offsetof(protocol_t, extendedMethodTypes)
+ sizeof(extendedMethodTypes));
bool hasExtendedMethodTypes() const {
return hasExtendedMethodTypesField() && extendedMethodTypes;
}
-} protocol_t;
+};
-typedef struct protocol_list_t {
+struct protocol_list_t {
// count is 64-bit by accident.
uintptr_t count;
protocol_ref_t list[0]; // variable-size
-} protocol_list_t;
+};
-typedef struct class_ro_t {
+struct class_ro_t {
uint32_t flags;
uint32_t instanceStart;
uint32_t instanceSize;
const uint8_t * weakIvarLayout;
const property_list_t *baseProperties;
-} class_ro_t;
+};
-typedef struct class_rw_t {
+struct class_rw_t {
uint32_t flags;
uint32_t version;
struct chained_property_list *properties;
const protocol_list_t ** protocols;
- struct class_t *firstSubclass;
- struct class_t *nextSiblingClass;
-} class_rw_t;
+ Class firstSubclass;
+ Class nextSiblingClass;
+};
-typedef struct class_t {
- struct class_t *isa;
- struct class_t *superclass;
- Cache cache;
- IMP *vtable;
+struct objc_class : objc_object {
+ // Class ISA;
+ Class superclass;
+ cache_t cache;
uintptr_t data_NEVER_USE; // class_rw_t * plus custom rr/alloc flags
- class_rw_t *data() const {
- return (class_rw_t *)(data_NEVER_USE & ~(uintptr_t)3);
+ class_rw_t *data() {
+ return (class_rw_t *)(data_NEVER_USE & ~CLASS_FAST_FLAG_MASK);
}
void setData(class_rw_t *newData) {
- uintptr_t flags = (uintptr_t)data_NEVER_USE & (uintptr_t)3;
+ uintptr_t flags = (uintptr_t)data_NEVER_USE & CLASS_FAST_FLAG_MASK;
data_NEVER_USE = (uintptr_t)newData | flags;
}
- bool hasCustomRR() const {
+ void setInfo(uint32_t set) {
+ assert(isFuture() || isRealized());
+ OSAtomicOr32Barrier(set, (volatile uint32_t *)&data()->flags);
+ }
+
+ void clearInfo(uint32_t clear) {
+ assert(isFuture() || isRealized());
+ OSAtomicXor32Barrier(clear, (volatile uint32_t *)&data()->flags);
+ }
+
+ // set and clear must not overlap
+ void changeInfo(uint32_t set, uint32_t clear) {
+ assert(isFuture() || isRealized());
+ assert((set & clear) == 0);
+
+ uint32_t oldf, newf;
+ do {
+ oldf = data()->flags;
+ newf = (oldf | set) & ~clear;
+ } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&data()->flags));
+ }
+
+ bool hasCustomRR() {
#if CLASS_FAST_FLAGS_VIA_RW_DATA
- return data_NEVER_USE & (uintptr_t)1;
+ return data_NEVER_USE & CLASS_FAST_FLAG_HAS_CUSTOM_RR;
#else
return data()->flags & RW_HAS_CUSTOM_RR;
#endif
}
void setHasCustomRR(bool inherited = false);
- bool hasCustomAWZ() const {
-#if CLASS_FAST_FLAGS_VIA_RW_DATA
- return data_NEVER_USE & (uintptr_t)2;
-#else
- return data()->flags & RW_HAS_CUSTOM_AWZ;
-#endif
+ bool hasCustomAWZ() {
+ return true;
+ // return data()->flags & RW_HAS_CUSTOM_AWZ;
}
void setHasCustomAWZ(bool inherited = false);
- bool isRootClass() const {
- return superclass == NULL;
+ bool hasCxxCtor() {
+ // addSubclass() propagates this flag from the superclass.
+ assert(isRealized());
+ return data()->flags & RW_HAS_CXX_CTOR;
}
- bool isRootMetaclass() const {
- return isa == this;
+
+ bool hasCxxDtor() {
+ // addSubclass() propagates this flag from the superclass.
+ assert(isRealized());
+ return data()->flags & RW_HAS_CXX_DTOR;
+ }
+
+ bool instancesHaveAssociatedObjects() {
+ // this may be an unrealized future class in the CF-bridged case
+ assert(isFuture() || isRealized());
+ return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
}
-} class_t;
-typedef struct category_t {
+ void setInstancesHaveAssociatedObjects() {
+ // this may be an unrealized future class in the CF-bridged case
+ assert(isFuture() || isRealized());
+ setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
+ }
+
+ bool shouldGrowCache() {
+ return true;
+ }
+
+ void setShouldGrowCache(bool) {
+ // fixme good or bad for memory use?
+ }
+
+ bool shouldFinalizeOnMainThread() {
+ // finishInitializing() propagates this flag from the superclass.
+ assert(isRealized());
+ return data()->flags & RW_FINALIZE_ON_MAIN_THREAD;
+ }
+
+ void setShouldFinalizeOnMainThread() {
+ assert(isRealized());
+ setInfo(RW_FINALIZE_ON_MAIN_THREAD);
+ }
+
+ bool isInitializing() {
+ return getMeta()->data()->flags & RW_INITIALIZING;
+ }
+
+ void setInitializing() {
+ assert(!isMetaClass());
+ ISA()->setInfo(RW_INITIALIZING);
+ }
+
+ bool isInitialized() {
+ return getMeta()->data()->flags & RW_INITIALIZED;
+ }
+
+ // assumes this is a metaclass already
+ bool isInitialized_meta() {
+ return (data()->flags & RW_INITIALIZED);
+ }
+
+ void setInitialized();
+
+ bool isLoadable() {
+ assert(isRealized());
+ return true; // any class registered for +load is definitely loadable
+ }
+
+ IMP getLoadMethod();
+
+ // Locking: To prevent concurrent realization, hold runtimeLock.
+ bool isRealized() {
+ return data()->flags & RW_REALIZED;
+ }
+
+ // Returns true if this is an unrealized future class.
+ // Locking: To prevent concurrent realization, hold runtimeLock.
+ bool isFuture() {
+ return data()->flags & RW_FUTURE;
+ }
+
+ bool isMetaClass() {
+ assert(this);
+ assert(isRealized());
+ return data()->ro->flags & RO_META;
+ }
+
+ // NOT identical to this->ISA when this is a metaclass
+ Class getMeta() {
+ if (isMetaClass()) return (Class)this;
+ else return this->ISA();
+ }
+
+ bool isRootClass() {
+ return superclass == nil;
+ }
+ bool isRootMetaclass() {
+ return ISA() == (Class)this;
+ }
+
+ const char *getName() { return name(); }
+ const char *name() {
+ // fixme can't assert locks here
+ assert(this);
+
+ if (isRealized() || isFuture()) {
+ return data()->ro->name;
+ } else {
+ return ((const class_ro_t *)data())->name;
+ }
+ }
+
+ // May be unaligned depending on class's ivars.
+ uint32_t unalignedInstanceSize() {
+ assert(isRealized());
+ return data()->ro->instanceSize;
+ }
+
+ // Class's ivar size rounded up to a pointer-size boundary.
+ uint32_t alignedInstanceSize() {
+ return (unalignedInstanceSize() + WORD_MASK) & ~WORD_MASK;
+ }
+};
+
+struct category_t {
const char *name;
classref_t cls;
struct method_list_t *instanceMethods;
struct method_list_t *classMethods;
struct protocol_list_t *protocols;
struct property_list_t *instanceProperties;
-} category_t;
+};
struct objc_super2 {
id receiver;
Class current_class;
};
-typedef struct {
+struct message_ref_t {
IMP imp;
SEL sel;
-} message_ref_t;
+};
+
+
+extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
+
+
+#define FOREACH_REALIZED_CLASS_AND_SUBCLASS(_c, _cls, code) \
+ do { \
+ rwlock_assert_writing(&runtimeLock); \
+ assert(_cls); \
+ Class _top = _cls; \
+ Class _c = _top; \
+ while (1) { \
+ code \
+ if (_c->data()->firstSubclass) { \
+ _c = _c->data()->firstSubclass; \
+ } else { \
+ while (!_c->data()->nextSiblingClass && _c != _top) { \
+ _c = _c->superclass; \
+ } \
+ if (_c == _top) break; \
+ _c = _c->data()->nextSiblingClass; \
+ } \
+ } \
+ } while (0)
__END_DECLS
#include "objc-private.h"
#include "objc-runtime-new.h"
#include "objc-file.h"
+#include "objc-cache.h"
#include <objc/message.h>
#include <mach/shared_region.h>
-#define newcls(cls) ((class_t *)cls)
-#define newmethod(meth) ((method_t *)meth)
-#define newivar(ivar) ((ivar_t *)ivar)
-#define newcategory(cat) ((category_t *)cat)
#define newprotocol(p) ((protocol_t *)p)
-#define newproperty(p) ((property_t *)p)
-
-static const char *getName(class_t *cls);
-static uint32_t unalignedInstanceSize(class_t *cls);
-static uint32_t alignedInstanceSize(class_t *cls);
-static BOOL isMetaClass(class_t *cls);
-static class_t *getSuperclass(class_t *cls);
-static void detach_class(class_t *cls, BOOL isMeta);
-static void free_class(class_t *cls);
-static class_t *setSuperclass(class_t *cls, class_t *newSuper);
-static class_t *realizeClass(class_t *cls);
-static void flushCaches(class_t *cls);
-static void flushVtables(class_t *cls);
-static method_t *getMethodNoSuper_nolock(class_t *cls, SEL sel);
-static method_t *getMethod_nolock(class_t *cls, SEL sel);
-static void changeInfo(class_t *cls, unsigned int set, unsigned int clear);
+
+static void disableTaggedPointers();
+static void detach_class(Class cls, BOOL isMeta);
+static void free_class(Class cls);
+static Class setSuperclass(Class cls, Class newSuper);
+static Class realizeClass(Class cls);
+static method_t *getMethodNoSuper_nolock(Class cls, SEL sel);
+static method_t *getMethod_nolock(Class cls, SEL sel);
static IMP _method_getImplementation(method_t *m);
-static BOOL hasCxxStructors(class_t *cls);
-static IMP addMethod(class_t *cls, SEL name, IMP imp, const char *types, BOOL replace);
+static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, BOOL replace);
static NXHashTable *realizedClasses(void);
static bool isRRSelector(SEL sel);
static bool isAWZSelector(SEL sel);
-static void updateCustomRR_AWZ(class_t *cls, method_t *meth);
+static void updateCustomRR_AWZ(Class cls, method_t *meth);
static method_t *search_method_list(const method_list_t *mlist, SEL sel);
+#if SUPPORT_FIXUP
+static void fixupMessageRef(message_ref_t *msg);
+#endif
id objc_noop_imp(id self, SEL _cmd __unused) {
return self;
}
+
/***********************************************************************
* Lock management
-* Every lock used anywhere must be managed here.
-* Locks not managed here may cause gdb deadlocks.
**********************************************************************/
rwlock_t runtimeLock;
rwlock_t selLock;
mutex_t cacheUpdateLock = MUTEX_INITIALIZER;
recursive_mutex_t loadMethodLock = RECURSIVE_MUTEX_INITIALIZER;
-static int debugger_runtimeLock;
-static int debugger_selLock;
-static int debugger_cacheUpdateLock;
-static int debugger_loadMethodLock;
-#define RDONLY 1
-#define RDWR 2
void lock_init(void)
{
}
-/***********************************************************************
-* startDebuggerMode
-* Attempt to acquire some locks for debugger mode.
-* Returns 0 if debugger mode failed because too many locks are unavailable.
-*
-* Locks successfully acquired are held until endDebuggerMode().
-* Locks not acquired are off-limits until endDebuggerMode(); any
-* attempt to manipulate them will cause a trap.
-* Locks not handled here may cause deadlocks in gdb.
-**********************************************************************/
-int startDebuggerMode(void)
-{
- int result = DEBUGGER_FULL;
-
- // runtimeLock is required (can't do much without it)
- if (rwlock_try_write(&runtimeLock)) {
- debugger_runtimeLock = RDWR;
- } else if (rwlock_try_read(&runtimeLock)) {
- debugger_runtimeLock = RDONLY;
- result = DEBUGGER_PARTIAL;
- } else {
- return DEBUGGER_OFF;
- }
-
- // cacheUpdateLock is required (must not fail a necessary cache flush)
- // must be AFTER runtimeLock to avoid lock inversion
- if (mutex_try_lock(&cacheUpdateLock)) {
- debugger_cacheUpdateLock = RDWR;
- } else {
- rwlock_unlock(&runtimeLock, debugger_runtimeLock);
- debugger_runtimeLock = 0;
- return DEBUGGER_OFF;
- }
-
- // side table locks are not optional
- if (!noSideTableLocksHeld()) {
- rwlock_unlock(&runtimeLock, debugger_runtimeLock);
- mutex_unlock(&cacheUpdateLock);
- debugger_runtimeLock = 0;
- return DEBUGGER_OFF;
- }
-
- // selLock is optional
- if (rwlock_try_write(&selLock)) {
- debugger_selLock = RDWR;
- } else if (rwlock_try_read(&selLock)) {
- debugger_selLock = RDONLY;
- result = DEBUGGER_PARTIAL;
- } else {
- debugger_selLock = 0;
- result = DEBUGGER_PARTIAL;
- }
-
- // loadMethodLock is optional
- if (recursive_mutex_try_lock(&loadMethodLock)) {
- debugger_loadMethodLock = RDWR;
- } else {
- debugger_loadMethodLock = 0;
- result = DEBUGGER_PARTIAL;
- }
-
- return result;
-}
-
-/***********************************************************************
-* endDebuggerMode
-* Relinquish locks acquired in startDebuggerMode().
-**********************************************************************/
-void endDebuggerMode(void)
-{
- assert(debugger_runtimeLock != 0);
-
- rwlock_unlock(&runtimeLock, debugger_runtimeLock);
- debugger_runtimeLock = 0;
-
- rwlock_unlock(&selLock, debugger_selLock);
- debugger_selLock = 0;
-
- assert(debugger_cacheUpdateLock == RDWR);
- mutex_unlock(&cacheUpdateLock);
- debugger_cacheUpdateLock = 0;
-
- if (debugger_loadMethodLock) {
- recursive_mutex_unlock(&loadMethodLock);
- debugger_loadMethodLock = 0;
- }
-}
-
-/***********************************************************************
-* isManagedDuringDebugger
-* Returns YES if the given lock is handled specially during debugger
-* mode (i.e. debugger mode tries to acquire it).
-**********************************************************************/
-BOOL isManagedDuringDebugger(void *lock)
-{
- if (lock == &selLock) return YES;
- if (lock == &cacheUpdateLock) return YES;
- if (lock == &runtimeLock) return YES;
- if (lock == &loadMethodLock) return YES;
- return NO;
-}
-
-/***********************************************************************
-* isLockedDuringDebugger
-* Returns YES if the given mutex was acquired by debugger mode.
-* Locking a managed mutex during debugger mode causes a trap unless
-* this returns YES.
-**********************************************************************/
-BOOL isLockedDuringDebugger(void *lock)
-{
- assert(DebuggerMode);
-
- if (lock == &cacheUpdateLock) return YES;
- if (lock == (mutex_t *)&loadMethodLock) return YES;
- return NO;
-}
-
-/***********************************************************************
-* isReadingDuringDebugger
-* Returns YES if the given rwlock was read-locked by debugger mode.
-* Read-locking a managed rwlock during debugger mode causes a trap unless
-* this returns YES.
-**********************************************************************/
-BOOL isReadingDuringDebugger(rwlock_t *lock)
-{
- assert(DebuggerMode);
-
- // read-lock is allowed even if debugger mode actually write-locked it
- if (debugger_runtimeLock && lock == &runtimeLock) return YES;
- if (debugger_selLock && lock == &selLock) return YES;
-
- return NO;
-}
-
-/***********************************************************************
-* isWritingDuringDebugger
-* Returns YES if the given rwlock was write-locked by debugger mode.
-* Write-locking a managed rwlock during debugger mode causes a trap unless
-* this returns YES.
-**********************************************************************/
-BOOL isWritingDuringDebugger(rwlock_t *lock)
-{
- assert(DebuggerMode);
-
- if (debugger_runtimeLock == RDWR && lock == &runtimeLock) return YES;
- if (debugger_selLock == RDWR && lock == &selLock) return YES;
-
- return NO;
-}
-
-
-/***********************************************************************
-* vtable dispatch
-*
-* Every class gets a vtable pointer. The vtable is an array of IMPs.
-* The selectors represented in the vtable are the same for all classes
-* (i.e. no class has a bigger or smaller vtable).
-* Each vtable index has an associated trampoline which dispatches to
-* the IMP at that index for the receiver class's vtable (after
-* checking for NULL). Dispatch fixup uses these trampolines instead
-* of objc_msgSend.
-* Fragility: The vtable size and list of selectors is chosen at launch
-* time. No compiler-generated code depends on any particular vtable
-* configuration, or even the use of vtable dispatch at all.
-* Memory size: If a class's vtable is identical to its superclass's
-* (i.e. the class overrides none of the vtable selectors), then
-* the class points directly to its superclass's vtable. This means
-* selectors to be included in the vtable should be chosen so they are
-* (1) frequently called, but (2) not too frequently overridden. In
-* particular, -dealloc is a bad choice.
-* Forwarding: If a class doesn't implement some vtable selector, that
-* selector's IMP is set to objc_msgSend in that class's vtable.
-* +initialize: Each class keeps the default vtable (which always
-* redirects to objc_msgSend) until its +initialize is completed.
-* Otherwise, the first message to a class could be a vtable dispatch,
-* and the vtable trampoline doesn't include +initialize checking.
-* Changes: Categories, addMethod, and setImplementation all force vtable
-* reconstruction for the class and all of its subclasses, if the
-* vtable selectors are affected.
-**********************************************************************/
-
-/***********************************************************************
-* ABI WARNING ABI WARNING ABI WARNING ABI WARNING ABI WARNING
-* vtable_prototype on x86_64 steals %rax and does not clear %rdx on return
-* This means vtable dispatch must never be used for vararg calls
-* or very large return values.
-* ABI WARNING ABI WARNING ABI WARNING ABI WARNING ABI WARNING
-**********************************************************************/
-
-#define X8(x) \
- x x x x x x x x
-#define X64(x) \
- X8(x) X8(x) X8(x) X8(x) X8(x) X8(x) X8(x) X8(x)
-#define X128(x) \
- X64(x) X64(x)
-
-#define vtableMax 128
-
-// hack to avoid conflicts with compiler's internal declaration
-asm("\n .data"
- "\n .globl __objc_empty_vtable "
- "\n __objc_empty_vtable:"
-#if __LP64__
- X128("\n .quad _objc_msgSend")
-#else
- X128("\n .long _objc_msgSend")
-#endif
- );
-
-#if SUPPORT_VTABLE
-
-// Trampoline descriptors for gdb.
-
-objc_trampoline_header *gdb_objc_trampolines = NULL;
-
-void gdb_objc_trampolines_changed(objc_trampoline_header *thdr) __attribute__((noinline));
-void gdb_objc_trampolines_changed(objc_trampoline_header *thdr)
-{
- rwlock_assert_writing(&runtimeLock);
- assert(thdr == gdb_objc_trampolines);
-
- if (PrintVtables) {
- _objc_inform("VTABLES: gdb_objc_trampolines_changed(%p)", thdr);
- }
-}
-
-// fixme workaround for rdar://6667753
-static void appendTrampolines(objc_trampoline_header *thdr) __attribute__((noinline));
-
-static void appendTrampolines(objc_trampoline_header *thdr)
-{
- rwlock_assert_writing(&runtimeLock);
- assert(thdr->next == NULL);
-
- if (gdb_objc_trampolines != thdr->next) {
- thdr->next = gdb_objc_trampolines;
- }
- gdb_objc_trampolines = thdr;
-
- gdb_objc_trampolines_changed(thdr);
-}
-
-// Vtable management.
-
-static size_t vtableStrlen;
-static size_t vtableCount;
-static SEL *vtableSelectors;
-static IMP *vtableTrampolines;
-static const char * const defaultVtable[] = {
- "allocWithZone:",
- "alloc",
- "class",
- "self",
- "isKindOfClass:",
- "respondsToSelector:",
- "isFlipped",
- "length",
- "objectForKey:",
- "count",
- "objectAtIndex:",
- "isEqualToString:",
- "isEqual:",
- "retain",
- "release",
- "autorelease",
-};
-static const char * const defaultVtableGC[] = {
- "allocWithZone:",
- "alloc",
- "class",
- "self",
- "isKindOfClass:",
- "respondsToSelector:",
- "isFlipped",
- "length",
- "objectForKey:",
- "count",
- "objectAtIndex:",
- "isEqualToString:",
- "isEqual:",
- "hash",
- "addObject:",
- "countByEnumeratingWithState:objects:count:",
-};
-
-OBJC_EXTERN void objc_msgSend_vtable0(void);
-OBJC_EXTERN void objc_msgSend_vtable1(void);
-OBJC_EXTERN void objc_msgSend_vtable2(void);
-OBJC_EXTERN void objc_msgSend_vtable3(void);
-OBJC_EXTERN void objc_msgSend_vtable4(void);
-OBJC_EXTERN void objc_msgSend_vtable5(void);
-OBJC_EXTERN void objc_msgSend_vtable6(void);
-OBJC_EXTERN void objc_msgSend_vtable7(void);
-OBJC_EXTERN void objc_msgSend_vtable8(void);
-OBJC_EXTERN void objc_msgSend_vtable9(void);
-OBJC_EXTERN void objc_msgSend_vtable10(void);
-OBJC_EXTERN void objc_msgSend_vtable11(void);
-OBJC_EXTERN void objc_msgSend_vtable12(void);
-OBJC_EXTERN void objc_msgSend_vtable13(void);
-OBJC_EXTERN void objc_msgSend_vtable14(void);
-OBJC_EXTERN void objc_msgSend_vtable15(void);
-
-static IMP const defaultVtableTrampolines[] = {
- (IMP)objc_msgSend_vtable0,
- (IMP)objc_msgSend_vtable1,
- (IMP)objc_msgSend_vtable2,
- (IMP)objc_msgSend_vtable3,
- (IMP)objc_msgSend_vtable4,
- (IMP)objc_msgSend_vtable5,
- (IMP)objc_msgSend_vtable6,
- (IMP)objc_msgSend_vtable7,
- (IMP)objc_msgSend_vtable8,
- (IMP)objc_msgSend_vtable9,
- (IMP)objc_msgSend_vtable10,
- (IMP)objc_msgSend_vtable11,
- (IMP)objc_msgSend_vtable12,
- (IMP)objc_msgSend_vtable13,
- (IMP)objc_msgSend_vtable14,
- (IMP)objc_msgSend_vtable15,
-};
-extern objc_trampoline_header defaultVtableTrampolineDescriptors;
-
-static void check_vtable_size(void) __unused;
-static void check_vtable_size(void)
-{
- // Fail to compile if vtable sizes don't match.
- int c1[sizeof(defaultVtableTrampolines)-sizeof(defaultVtable)] __unused;
- int c2[sizeof(defaultVtable)-sizeof(defaultVtableTrampolines)] __unused;
- int c3[sizeof(defaultVtableTrampolines)-sizeof(defaultVtableGC)] __unused;
- int c4[sizeof(defaultVtableGC)-sizeof(defaultVtableTrampolines)] __unused;
-
- // Fail to compile if vtableMax is too small
- int c5[vtableMax - sizeof(defaultVtable)] __unused;
- int c6[vtableMax - sizeof(defaultVtableGC)] __unused;
-}
-
-
-extern uint8_t vtable_prototype;
-extern uint8_t vtable_ignored;
-extern int vtable_prototype_size;
-extern int vtable_prototype_index_offset;
-extern int vtable_prototype_index2_offset;
-extern int vtable_prototype_tagtable_offset;
-extern int vtable_prototype_tagtable_size;
-static size_t makeVtableTrampoline(uint8_t *dst, size_t index)
-{
- // copy boilerplate
- memcpy(dst, &vtable_prototype, vtable_prototype_size);
-
- // insert indexes
-#if defined(__x86_64__)
- if (index > 255) _objc_fatal("vtable_prototype busted");
- {
- // `jmpq *0x7fff(%rax)` ff a0 ff 7f
- uint16_t *p = (uint16_t *)(dst + vtable_prototype_index_offset + 2);
- if (*p != 0x7fff) _objc_fatal("vtable_prototype busted");
- *p = index * 8;
- }
- {
- uint16_t *p = (uint16_t *)(dst + vtable_prototype_index2_offset + 2);
- if (*p != 0x7fff) _objc_fatal("vtable_prototype busted");
- *p = index * 8;
- }
-#else
-# warning unknown architecture
-#endif
-
- // insert tagged isa table
-#if defined(__x86_64__)
- {
- // `movq $0x1122334455667788, %r10` 49 ba 88 77 66 55 44 33 22 11
- if (vtable_prototype_tagtable_size != 10) {
- _objc_fatal("vtable_prototype busted");
- }
- uint8_t *p = (uint8_t *)(dst + vtable_prototype_tagtable_offset);
- if (*p++ != 0x49) _objc_fatal("vtable_prototype busted");
- if (*p++ != 0xba) _objc_fatal("vtable_prototype busted");
- if (*(uintptr_t *)p != 0x1122334455667788) {
- _objc_fatal("vtable_prototype busted");
- }
- uintptr_t addr = (uintptr_t)_objc_tagged_isa_table;
- memcpy(p, &addr, sizeof(addr));
- }
-#else
-# warning unknown architecture
-#endif
-
- return vtable_prototype_size;
-}
-
-
-static void initVtables(void)
-{
- if (DisableVtables) {
- if (PrintVtables) {
- _objc_inform("VTABLES: vtable dispatch disabled by OBJC_DISABLE_VTABLES");
- }
- vtableCount = 0;
- vtableSelectors = NULL;
- vtableTrampolines = NULL;
- return;
- }
-
- const char * const *names;
- size_t i;
-
- if (UseGC) {
- names = defaultVtableGC;
- vtableCount = sizeof(defaultVtableGC) / sizeof(defaultVtableGC[0]);
- } else {
- names = defaultVtable;
- vtableCount = sizeof(defaultVtable) / sizeof(defaultVtable[0]);
- }
- if (vtableCount > vtableMax) vtableCount = vtableMax;
-
- vtableSelectors = (SEL*)_malloc_internal(vtableCount * sizeof(SEL));
- vtableTrampolines = (IMP*)_malloc_internal(vtableCount * sizeof(IMP));
-
- // Built-in trampolines and their descriptors
-
- size_t defaultVtableTrampolineCount =
- sizeof(defaultVtableTrampolines) / sizeof(defaultVtableTrampolines[0]);
-#ifndef NDEBUG
- // debug: use generated code for 3/4 of the table
- // Disabled even in Debug builds to avoid breaking backtrace symbol names.
- // defaultVtableTrampolineCount /= 4;
-#endif
-
- for (i = 0; i < defaultVtableTrampolineCount && i < vtableCount; i++) {
- vtableSelectors[i] = sel_registerName(names[i]);
- vtableTrampolines[i] = defaultVtableTrampolines[i];
- }
- appendTrampolines(&defaultVtableTrampolineDescriptors);
-
-
- // Generated trampolines and their descriptors
-
- if (vtableCount > defaultVtableTrampolineCount) {
- // Memory for trampoline code
- size_t generatedCount =
- vtableCount - defaultVtableTrampolineCount;
-
- const int align = 16;
- size_t codeSize =
- round_page(sizeof(objc_trampoline_header) + align +
- generatedCount * (sizeof(objc_trampoline_descriptor)
- + vtable_prototype_size + align));
- void *codeAddr = mmap(0, codeSize, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANON,
- VM_MAKE_TAG(VM_MEMORY_OBJC_DISPATCHERS), 0);
- uint8_t *t = (uint8_t *)codeAddr;
-
- // Trampoline header
- objc_trampoline_header *thdr = (objc_trampoline_header *)t;
- thdr->headerSize = sizeof(objc_trampoline_header);
- thdr->descSize = sizeof(objc_trampoline_descriptor);
- thdr->descCount = (uint32_t)generatedCount;
- thdr->next = NULL;
-
- // Trampoline descriptors
- objc_trampoline_descriptor *tdesc = (objc_trampoline_descriptor *)(thdr+1);
- t = (uint8_t *)&tdesc[generatedCount];
- t += align - ((uintptr_t)t % align);
-
- // Dispatch code
- size_t tdi;
- for (i = defaultVtableTrampolineCount, tdi = 0;
- i < vtableCount;
- i++, tdi++)
- {
- vtableSelectors[i] = sel_registerName(names[i]);
- if (ignoreSelector(vtableSelectors[i])) {
- vtableTrampolines[i] = (IMP)&vtable_ignored;
- tdesc[tdi].offset = 0;
- tdesc[tdi].flags = 0;
- } else {
- vtableTrampolines[i] = (IMP)t;
- tdesc[tdi].offset =
- (uint32_t)((uintptr_t)t - (uintptr_t)&tdesc[tdi]);
- tdesc[tdi].flags =
- OBJC_TRAMPOLINE_MESSAGE|OBJC_TRAMPOLINE_VTABLE;
-
- t += makeVtableTrampoline(t, i);
- t += align - ((uintptr_t)t % align);
- }
- }
-
- appendTrampolines(thdr);
- sys_icache_invalidate(codeAddr, codeSize);
- mprotect(codeAddr, codeSize, PROT_READ|PROT_EXEC);
- }
-
-
- if (PrintVtables) {
- for (i = 0; i < vtableCount; i++) {
- _objc_inform("VTABLES: vtable[%zu] %p %s",
- i, vtableTrampolines[i],
- sel_getName(vtableSelectors[i]));
- }
- }
-
- if (PrintVtableImages) {
- _objc_inform("VTABLE IMAGES: '#' implemented by class");
- _objc_inform("VTABLE IMAGES: '-' inherited from superclass");
- _objc_inform("VTABLE IMAGES: ' ' not implemented");
- for (i = 0; i <= vtableCount; i++) {
- char spaces[vtableCount+1+1];
- size_t j;
- for (j = 0; j < i; j++) {
- spaces[j] = '|';
- }
- spaces[j] = '\0';
- _objc_inform("VTABLE IMAGES: %s%s", spaces,
- i<vtableCount ? sel_getName(vtableSelectors[i]) : "");
- }
- }
-
- if (PrintVtables || PrintVtableImages) {
- vtableStrlen = 0;
- for (i = 0; i < vtableCount; i++) {
- vtableStrlen += strlen(sel_getName(vtableSelectors[i]));
- }
- }
-}
-
-
-static int vtable_getIndex(SEL sel)
-{
- unsigned int i;
- for (i = 0; i < vtableCount; i++) {
- if (vtableSelectors[i] == sel) return i;
- }
- return -1;
-}
-
-static BOOL vtable_containsSelector(SEL sel)
-{
- return (vtable_getIndex(sel) < 0) ? NO : YES;
-}
-
-static void printVtableOverrides(class_t *cls, class_t *supercls)
-{
- char overrideMap[vtableCount+1];
- unsigned int i;
-
- if (supercls) {
- size_t overridesBufferSize = vtableStrlen + 2*vtableCount + 1;
- char *overrides =
- (char *)_calloc_internal(overridesBufferSize, 1);
- for (i = 0; i < vtableCount; i++) {
- if (ignoreSelector(vtableSelectors[i])) {
- overrideMap[i] = '-';
- continue;
- }
- if (getMethodNoSuper_nolock(cls, vtableSelectors[i])) {
- strlcat(overrides, sel_getName(vtableSelectors[i]), overridesBufferSize);
- strlcat(overrides, ", ", overridesBufferSize);
- overrideMap[i] = '#';
- } else if (getMethod_nolock(cls, vtableSelectors[i])) {
- overrideMap[i] = '-';
- } else {
- overrideMap[i] = ' ';
- }
- }
- if (PrintVtables) {
- _objc_inform("VTABLES: %s%s implements %s",
- getName(cls), isMetaClass(cls) ? "(meta)" : "",
- overrides);
- }
- _free_internal(overrides);
- }
- else {
- for (i = 0; i < vtableCount; i++) {
- overrideMap[i] = '#';
- }
- }
-
- if (PrintVtableImages) {
- overrideMap[vtableCount] = '\0';
- _objc_inform("VTABLE IMAGES: %s %s%s", overrideMap,
- getName(cls), isMetaClass(cls) ? "(meta)" : "");
- }
-}
-
-/***********************************************************************
-* updateVtable
-* Rebuilds vtable for cls, using superclass's vtable if appropriate.
-* Assumes superclass's vtable is up to date.
-* Does nothing to subclass vtables.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static void updateVtable(class_t *cls, BOOL force)
-{
- rwlock_assert_writing(&runtimeLock);
-
- // Keep default vtable until +initialize is complete.
- // Default vtable redirects to objc_msgSend, which
- // enforces +initialize locking.
- if (!force && !_class_isInitialized((Class)cls)) {
- /*
- if (PrintVtables) {
- _objc_inform("VTABLES: KEEPING DEFAULT vtable for "
- "uninitialized class %s%s",
- getName(cls), isMetaClass(cls) ? "(meta)" : "");
- }
- */
- return;
- }
-
- // Decide whether this class can share its superclass's vtable.
-
- class_t *supercls = getSuperclass(cls);
- BOOL needVtable = NO;
- unsigned int i;
- if (!supercls) {
- // Root classes always need a vtable
- needVtable = YES;
- }
- else if (cls->data()->flags & RW_SPECIALIZED_VTABLE) {
- // Once you have your own vtable, you never go back
- needVtable = YES;
- }
- else {
- for (i = 0; i < vtableCount; i++) {
- if (ignoreSelector(vtableSelectors[i])) continue;
- method_t *m = getMethodNoSuper_nolock(cls, vtableSelectors[i]);
- // assume any local implementation differs from super's
- if (m) {
- needVtable = YES;
- break;
- }
- }
- }
-
- // Build a vtable for this class, or not.
-
- if (!needVtable) {
- if (PrintVtables) {
- _objc_inform("VTABLES: USING SUPERCLASS vtable for class %s%s %p",
- getName(cls), isMetaClass(cls) ? "(meta)" : "", cls);
- }
- cls->vtable = supercls->vtable;
- }
- else {
- if (PrintVtables) {
- _objc_inform("VTABLES: %s vtable for class %s%s %p",
- (cls->data()->flags & RW_SPECIALIZED_VTABLE) ?
- "UPDATING SPECIALIZED" : "CREATING SPECIALIZED",
- getName(cls), isMetaClass(cls) ? "(meta)" : "", cls);
- }
- if (PrintVtables || PrintVtableImages) {
- printVtableOverrides(cls, supercls);
- }
-
- IMP *new_vtable;
- IMP *super_vtable = supercls ? supercls->vtable : &_objc_empty_vtable;
- // fixme use msgForward (instead of msgSend from empty vtable) ?
-
- if (cls->data()->flags & RW_SPECIALIZED_VTABLE) {
- // update cls->vtable in place
- new_vtable = cls->vtable;
- if (new_vtable == &_objc_empty_vtable) {
- // oops - our vtable is not as specialized as we thought
- // This is probably the broken memcpy of __NSCFConstantString.
- // rdar://8770551
- new_vtable = (IMP*)malloc(vtableCount * sizeof(IMP));
- }
- assert(new_vtable != &_objc_empty_vtable);
- } else {
- // make new vtable
- new_vtable = (IMP*)malloc(vtableCount * sizeof(IMP));
- changeInfo(cls, RW_SPECIALIZED_VTABLE, 0);
- }
-
- for (i = 0; i < vtableCount; i++) {
- if (ignoreSelector(vtableSelectors[i])) {
- new_vtable[i] = (IMP)&vtable_ignored;
- } else {
- method_t *m = getMethodNoSuper_nolock(cls, vtableSelectors[i]);
- if (m) new_vtable[i] = _method_getImplementation(m);
- else new_vtable[i] = super_vtable[i];
- }
- }
-
- if (cls->vtable != new_vtable) {
- // don't let other threads see uninitialized parts of new_vtable
- OSMemoryBarrier();
- cls->vtable = new_vtable;
- }
- }
-}
-
-// SUPPORT_VTABLE
-#else
-// !SUPPORT_VTABLE
-
-static void initVtables(void)
-{
- if (PrintVtables) {
- _objc_inform("VTABLES: no vtables on this architecture");
- }
-}
-
-static BOOL vtable_containsSelector(SEL sel)
-{
- return NO;
-}
-
-static void updateVtable(class_t *cls, BOOL force)
-{
-}
-
-// !SUPPORT_VTABLE
-#endif
-
typedef struct {
category_t *cat;
BOOL fromBundle;
} \
} while (0)
-#define FOREACH_REALIZED_CLASS_AND_SUBCLASS(_c, _cls, code) \
- do { \
- rwlock_assert_writing(&runtimeLock); \
- class_t *_top = _cls; \
- class_t *_c = _top; \
- if (_c) { \
- while (1) { \
- code \
- if (_c->data()->firstSubclass) { \
- _c = _c->data()->firstSubclass; \
- } else { \
- while (!_c->data()->nextSiblingClass && _c != _top) { \
- _c = getSuperclass(_c); \
- } \
- if (_c == _top) break; \
- _c = _c->data()->nextSiblingClass; \
- } \
- } \
- } else { \
- /* nil means all realized classes */ \
- NXHashTable *_classes = realizedClasses(); \
- NXHashTable *_metaclasses = realizedMetaclasses(); \
- NXHashState _state; \
- _state = NXInitHashState(_classes); \
- while (NXNextHashState(_classes, &_state, (void**)&_c)) \
- { \
- code \
- } \
- _state = NXInitHashState(_metaclasses); \
- while (NXNextHashState(_metaclasses, &_state, (void**)&_c)) \
- { \
- code \
- } \
- } \
- } while (0)
-
/*
Low two bits of mlist->entsize is used as the fixed-up marker.
static method_t *method_list_nth(const method_list_t *mlist, uint32_t i)
{
- assert(i < mlist->count);
- return (method_t *)(i*method_list_entsize(mlist) + (char *)&mlist->first);
+ return &mlist->get(i);
}
static uint32_t method_list_count(const method_list_t *mlist)
}
-// part of ivar_t, with non-deprecated alignment
-typedef struct {
- uintptr_t *offset;
- const char *name;
- const char *type;
- uint32_t alignment;
-} ivar_alignment_t;
-
-static uint32_t ivar_alignment(const ivar_t *ivar)
-{
- uint32_t alignment = ((ivar_alignment_t *)ivar)->alignment;
- if (alignment == (uint32_t)-1) alignment = (uint32_t)WORD_SHIFT;
- return 1<<alignment;
-}
-
-
static method_list_t *cat_method_list(const category_t *cat, BOOL isMeta)
{
- if (!cat) return NULL;
+ if (!cat) return nil;
if (isMeta) return cat->classMethods;
else return cat->instanceMethods;
static method_t *cat_method_nth(const category_t *cat, BOOL isMeta, uint32_t i)
{
method_list_t *cmlist = cat_method_list(cat, isMeta);
- if (!cmlist) return NULL;
+ if (!cmlist) return nil;
return method_list_nth(cmlist, i);
}
{
rwlock_assert_writing(&runtimeLock);
- static NXMapTable *category_map = NULL;
+ static NXMapTable *category_map = nil;
if (category_map) return category_map;
* Records an unattached category.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void addUnattachedCategoryForClass(category_t *cat, class_t *cls,
+static void addUnattachedCategoryForClass(category_t *cat, Class cls,
header_info *catHeader)
{
rwlock_assert_writing(&runtimeLock);
* Removes an unattached category.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void removeUnattachedCategoryForClass(category_t *cat, class_t *cls)
+static void removeUnattachedCategoryForClass(category_t *cat, Class cls)
{
rwlock_assert_writing(&runtimeLock);
* The result must be freed by the caller.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static category_list *unattachedCategoriesForClass(class_t *cls)
+static category_list *unattachedCategoriesForClass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
return (category_list *)NXMapRemove(unattachedCategories(), cls);
}
-/***********************************************************************
-* isRealized
-* Returns YES if class cls has been realized.
-* Locking: To prevent concurrent realization, hold runtimeLock.
-**********************************************************************/
-static BOOL isRealized(class_t *cls)
-{
- return (cls->data()->flags & RW_REALIZED) ? YES : NO;
-}
-
-
-/***********************************************************************
-* isFuture
-* Returns YES if class cls is an unrealized future class.
-* Locking: To prevent concurrent realization, hold runtimeLock.
-**********************************************************************/
-#ifndef NDEBUG
-// currently used in asserts only
-static BOOL isFuture(class_t *cls)
-{
- return (cls->data()->flags & RW_FUTURE) ? YES : NO;
-}
-#endif
-
-
/***********************************************************************
* classNSObject
* Returns class NSObject.
* Locking: none
**********************************************************************/
-static class_t *classNSObject(void)
+static Class classNSObject(void)
{
- extern class_t OBJC_CLASS_$_NSObject;
- return &OBJC_CLASS_$_NSObject;
+ extern objc_class OBJC_CLASS_$_NSObject;
+ return (Class)&OBJC_CLASS_$_NSObject;
}
* Warn about methods from cats that override other methods in cats or cls.
* Assumes no methods from cats have been added to cls yet.
**********************************************************************/
-static void printReplacements(class_t *cls, category_list *cats)
+static void printReplacements(Class cls, category_list *cats)
{
uint32_t c;
- BOOL isMeta = isMetaClass(cls);
+ BOOL isMeta = cls->isMetaClass();
if (!cats) return;
uint32_t m;
for (m = 0; m < cmCount; m++) {
uint32_t c2, m2;
- method_t *meth2 = NULL;
+ method_t *meth2 = nil;
method_t *meth = cat_method_nth(cat, isMeta, m);
SEL s = sel_registerName((const char *)meth->name);
whine:
// Found an override.
- logReplacedMethod(getName(cls), s, isMetaClass(cls), cat->name,
+ logReplacedMethod(cls->name(), s, cls->isMetaClass(), cat->name,
_method_getImplementation(meth2),
_method_getImplementation(meth));
}
}
-static BOOL isBundleClass(class_t *cls)
+static BOOL isBundleClass(Class cls)
{
return (cls->data()->ro->flags & RO_FROM_BUNDLE) ? YES : NO;
}
static method_list_t *
fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort)
{
+ rwlock_assert_writing(&runtimeLock);
assert(!isMethodListFixedUp(mlist));
mlist = (method_list_t *)
static void
-attachMethodLists(class_t *cls, method_list_t **addedLists, int addedCount,
- BOOL baseMethods, BOOL methodsFromBundle,
- BOOL *inoutVtablesAffected)
+attachMethodLists(Class cls, method_list_t **addedLists, int addedCount,
+ bool baseMethods, bool methodsFromBundle,
+ bool flushCaches)
{
rwlock_assert_writing(&runtimeLock);
bool scanForCustomAWZ = !UseGC && !cls->hasCustomAWZ();
// RR special cases:
+ // GC is custom RR.
// NSObject's base instance methods are not custom RR.
// All other root classes are custom RR.
// updateCustomRR_AWZ also knows about these cases.
+ if (UseGC) {
+ cls->setHasCustomRR();
+ scanForCustomRR = false;
+ }
if (baseMethods && scanForCustomRR && cls->isRootClass()) {
if (cls != classNSObject()) {
cls->setHasCustomRR();
// All other root metaclasses are custom AWZ.
// updateCustomRR_AWZ also knows about these cases.
if (baseMethods && scanForCustomAWZ && cls->isRootMetaclass()) {
- if (cls != classNSObject()->isa) {
+ if (cls != classNSObject()->ISA()) {
cls->setHasCustomAWZ();
}
scanForCustomAWZ = false;
}
- // Method list array is NULL-terminated.
- // Some elements of lists are NULL; we must filter them out.
+ // Method list array is nil-terminated.
+ // Some elements of lists are nil; we must filter them out.
method_list_t *oldBuf[2];
method_list_t **oldLists;
oldLists = cls->data()->method_lists;
} else {
oldBuf[0] = cls->data()->method_list;
- oldBuf[1] = NULL;
+ oldBuf[1] = nil;
oldLists = oldBuf;
}
if (oldLists) {
int newCount = oldCount;
for (int i = 0; i < addedCount; i++) {
- if (addedLists[i]) newCount++; // only non-NULL entries get added
+ if (addedLists[i]) newCount++; // only non-nil entries get added
}
method_list_t *newBuf[2];
mlist = fixupMethodList(mlist, methodsFromBundle, true/*sort*/);
}
- // Scan for vtable updates
- if (inoutVtablesAffected && !*inoutVtablesAffected) {
- uint32_t m;
- for (m = 0; m < mlist->count; m++) {
- SEL sel = method_list_nth(mlist, m)->name;
- if (vtable_containsSelector(sel)) {
- *inoutVtablesAffected = YES;
- break;
- }
- }
- }
-
// Scan for method implementations tracked by the class's flags
for (uint32_t m = 0;
(scanForCustomRR || scanForCustomAWZ) && m < mlist->count;
scanForCustomAWZ = false;
}
}
+
+ // Update method caches
+ if (flushCaches) {
+ cache_eraseMethods(cls, mlist);
+ }
// Fill method list array
newLists[newCount++] = mlist;
}
if (oldLists && oldLists != oldBuf) free(oldLists);
- // NULL-terminate
- newLists[newCount] = NULL;
+ // nil-terminate
+ newLists[newCount] = nil;
if (newCount > 1) {
assert(newLists != newBuf);
cls->data()->method_lists = newLists;
- changeInfo(cls, RW_METHOD_ARRAY, 0);
+ cls->setInfo(RW_METHOD_ARRAY);
} else {
assert(newLists == newBuf);
cls->data()->method_list = newLists[0];
}
static void
-attachCategoryMethods(class_t *cls, category_list *cats,
- BOOL *inoutVtablesAffected)
+attachCategoryMethods(Class cls, category_list *cats, bool flushCaches)
{
if (!cats) return;
if (PrintReplacedMethods) printReplacements(cls, cats);
- BOOL isMeta = isMetaClass(cls);
+ bool isMeta = cls->isMetaClass();
method_list_t **mlists = (method_list_t **)
_malloc_internal(cats->count * sizeof(*mlists));
}
}
- attachMethodLists(cls, mlists, mcount, NO, fromBundle, inoutVtablesAffected);
+ attachMethodLists(cls, mlists, mcount, NO, fromBundle, flushCaches);
_free_internal(mlists);
}
}
- if (count == 0) return NULL;
+ if (count == 0) return nil;
// Allocate new list.
newlist = (chained_property_list *)
_malloc_internal(sizeof(*newlist) + count * sizeof(property_t));
newlist->count = 0;
- newlist->next = NULL;
+ newlist->next = nil;
// Copy properties; newest categories first, then ordinary properties
if (cats) {
count++;
}
- if (count == 0) return NULL;
+ if (count == 0) return nil;
newprotos = (const protocol_list_t **)
_malloc_internal((count+1) * sizeof(protocol_list_t *));
}
}
- *newp = NULL;
+ *newp = nil;
return newprotos;
}
* methodizeClass
* Fixes up cls's method list, protocol list, and property list.
* Attaches any outstanding categories.
-* Builds vtable.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void methodizeClass(class_t *cls)
+static void methodizeClass(Class cls)
{
category_list *cats;
BOOL isMeta;
rwlock_assert_writing(&runtimeLock);
- isMeta = isMetaClass(cls);
+ isMeta = cls->isMetaClass();
// Methodizing for the first time
if (PrintConnecting) {
_objc_inform("CLASS: methodizing class '%s' %s",
- getName(cls), isMeta ? "(meta)" : "");
+ cls->name(), isMeta ? "(meta)" : "");
}
// Build method and protocol and property lists.
// Include methods and protocols and properties from categories, if any
attachMethodLists(cls, (method_list_t **)&cls->data()->ro->baseMethods, 1,
- YES, isBundleClass(cls), NULL);
+ YES, isBundleClass(cls), NO);
// Root classes get bonus method implementations if they don't have
// them already. These apply before category replacements.
}
cats = unattachedCategoriesForClass(cls);
- attachCategoryMethods(cls, cats, NULL);
+ attachCategoryMethods(cls, cats, NO);
if (cats || cls->data()->ro->baseProperties) {
cls->data()->properties =
if (cats || cls->data()->ro->baseProtocols) {
cls->data()->protocols =
- buildProtocolList(cats, cls->data()->ro->baseProtocols, NULL);
+ buildProtocolList(cats, cls->data()->ro->baseProtocols, nil);
}
if (PrintConnecting) {
for (i = 0; i < cats->count; i++) {
_objc_inform("CLASS: attached category %c%s(%s)",
isMeta ? '+' : '-',
- getName(cls), cats->list[i].cat->name);
+ cls->name(), cats->list[i].cat->name);
}
}
}
if (cats) _free_internal(cats);
- // No vtable until +initialize completes
- assert(cls->vtable == &_objc_empty_vtable);
-
#ifndef NDEBUG
// Debug: sanity-check all SELs; log method list contents
FOREACH_METHOD_LIST(mlist, cls, {
for ( ; iter != end; ++iter) {
if (PrintConnecting) {
_objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-',
- getName(cls), sel_getName(iter->name));
+ cls->name(), sel_getName(iter->name));
}
assert(ignoreSelector(iter->name) || sel_registerName(sel_getName(iter->name))==iter->name);
}
* remethodizeClass
* Attach outstanding categories to an existing class.
* Fixes up cls's method list, protocol list, and property list.
-* Updates method caches and vtables for cls and its subclasses.
+* Updates method caches for cls and its subclasses.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void remethodizeClass(class_t *cls)
+static void remethodizeClass(Class cls)
{
category_list *cats;
BOOL isMeta;
rwlock_assert_writing(&runtimeLock);
- isMeta = isMetaClass(cls);
+ isMeta = cls->isMetaClass();
// Re-methodizing: check for more categories
if ((cats = unattachedCategoriesForClass(cls))) {
if (PrintConnecting) {
_objc_inform("CLASS: attaching categories to class '%s' %s",
- getName(cls), isMeta ? "(meta)" : "");
+ cls->name(), isMeta ? "(meta)" : "");
}
// Update methods, properties, protocols
- BOOL vtableAffected = NO;
- attachCategoryMethods(cls, cats, &vtableAffected);
+ attachCategoryMethods(cls, cats, YES);
- newproperties = buildPropertyList(NULL, cats, isMeta);
+ newproperties = buildPropertyList(nil, cats, isMeta);
if (newproperties) {
newproperties->next = cls->data()->properties;
cls->data()->properties = newproperties;
}
- newprotos = buildProtocolList(cats, NULL, cls->data()->protocols);
+ newprotos = buildProtocolList(cats, nil, cls->data()->protocols);
if (cls->data()->protocols && cls->data()->protocols != newprotos) {
_free_internal(cls->data()->protocols);
}
cls->data()->protocols = newprotos;
_free_internal(cats);
-
- // Update method caches and vtables
- flushCaches(cls);
- if (vtableAffected) flushVtables(cls);
}
}
-/***********************************************************************
-* changeInfo
-* Atomically sets and clears some bits in cls's info field.
-* set and clear must not overlap.
-**********************************************************************/
-static void changeInfo(class_t *cls, unsigned int set, unsigned int clear)
-{
- uint32_t oldf, newf;
-
- assert(isFuture(cls) || isRealized(cls));
-
- do {
- oldf = cls->data()->flags;
- newf = (oldf | set) & ~clear;
- } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&cls->data()->flags));
-}
-
-
/***********************************************************************
* getClass
* Looks up a class by name. The class MIGHT NOT be realized.
// named classes not in the dyld shared cache, whether realized or not.
NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h
-static class_t *getClass(const char *name)
+static Class getClass(const char *name)
{
rwlock_assert_locked(&runtimeLock);
assert(gdb_objc_realized_classes);
// Try runtime-allocated table
- class_t *result = (class_t *)NXMapGet(gdb_objc_realized_classes, name);
+ Class result = (Class)NXMapGet(gdb_objc_realized_classes, name);
if (result) return result;
// Try table from dyld shared cache
* Warns about duplicate class names and keeps the old mapping.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void addNamedClass(class_t *cls, const char *name)
+static void addNamedClass(Class cls, const char *name)
{
rwlock_assert_writing(&runtimeLock);
- class_t *old;
+ Class old;
if ((old = getClass(name))) {
- inform_duplicate(name, (Class)old, (Class)cls);
+ inform_duplicate(name, old, cls);
} else {
NXMapInsert(gdb_objc_realized_classes, name, cls);
}
assert(!(cls->data()->flags & RO_META));
// wrong: constructed classes are already realized when they get here
- // assert(!isRealized(cls));
+ // assert(!cls->isRealized());
}
* Removes cls from the name => cls map.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void removeNamedClass(class_t *cls, const char *name)
+static void removeNamedClass(Class cls, const char *name)
{
rwlock_assert_writing(&runtimeLock);
assert(!(cls->data()->flags & RO_META));
* Returns the class list for realized non-meta classes.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static NXHashTable *realized_class_hash = NULL;
+static NXHashTable *realized_class_hash = nil;
static NXHashTable *realizedClasses(void)
-{
+{
rwlock_assert_locked(&runtimeLock);
// allocated in _read_images
* Returns the class list for realized metaclasses.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static NXHashTable *realized_metaclass_hash = NULL;
+static NXHashTable *realized_metaclass_hash = nil;
static NXHashTable *realizedMetaclasses(void)
{
rwlock_assert_locked(&runtimeLock);
* Adds cls to the realized non-meta class hash.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void addRealizedClass(class_t *cls)
+static void addRealizedClass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
void *old;
old = NXHashInsert(realizedClasses(), cls);
- objc_addRegisteredClass((Class)cls);
- assert(!isMetaClass(cls));
+ objc_addRegisteredClass(cls);
+ assert(!cls->isMetaClass());
assert(!old);
}
* Removes cls from the realized non-meta class hash.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void removeRealizedClass(class_t *cls)
+static void removeRealizedClass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
- if (isRealized(cls)) {
- assert(!isMetaClass(cls));
+ if (cls->isRealized()) {
+ assert(!cls->isMetaClass());
NXHashRemove(realizedClasses(), cls);
- objc_removeRegisteredClass((Class)cls);
+ objc_removeRegisteredClass(cls);
}
}
* Adds cls to the realized metaclass hash.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void addRealizedMetaclass(class_t *cls)
+static void addRealizedMetaclass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
void *old;
old = NXHashInsert(realizedMetaclasses(), cls);
- assert(isMetaClass(cls));
+ assert(cls->isMetaClass());
assert(!old);
}
* Removes cls from the realized metaclass hash.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void removeRealizedMetaclass(class_t *cls)
+static void removeRealizedMetaclass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
- if (isRealized(cls)) {
- assert(isMetaClass(cls));
+ if (cls->isRealized()) {
+ assert(cls->isMetaClass());
NXHashRemove(realizedMetaclasses(), cls);
}
}
{
rwlock_assert_writing(&runtimeLock);
- static NXMapTable *future_named_class_map = NULL;
+ static NXMapTable *future_named_class_map = nil;
if (future_named_class_map) return future_named_class_map;
* Installs cls as the class structure to use for the named class if it appears.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void addFutureNamedClass(const char *name, class_t *cls)
+static void addFutureNamedClass(const char *name, Class cls)
{
void *old;
rwlock_assert_writing(&runtimeLock);
if (PrintFuture) {
- _objc_inform("FUTURE: reserving %p for %s", cls, name);
+ _objc_inform("FUTURE: reserving %p for %s", (void*)cls, name);
}
- cls->setData((class_rw_t *)_calloc_internal(sizeof(*cls->data()), 1));
+ class_rw_t *rw = (class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1);
+ class_ro_t *ro = (class_ro_t *)_calloc_internal(sizeof(class_ro_t), 1);
+ ro->name = _strdup_internal(name);
+ rw->ro = ro;
+ cls->setData(rw);
cls->data()->flags = RO_FUTURE;
old = NXMapKeyCopyingInsert(futureNamedClasses(), name, cls);
/***********************************************************************
* remappedClasses
* Returns the oldClass => newClass map for realized future classes.
-* Returns the oldClass => NULL map for ignored weak-linked classes.
+* Returns the oldClass => nil map for ignored weak-linked classes.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static NXMapTable *remappedClasses(BOOL create)
{
- static NXMapTable *remapped_class_map = NULL;
+ static NXMapTable *remapped_class_map = nil;
rwlock_assert_locked(&runtimeLock);
if (remapped_class_map) return remapped_class_map;
- if (!create) return NULL;
+ if (!create) return nil;
// remapped_class_map is big enough to hold CF's classes and a few others
INIT_ONCE_PTR(remapped_class_map,
{
rwlock_assert_locked(&runtimeLock);
- BOOL result = (remappedClasses(NO) == NULL);
+ BOOL result = (remappedClasses(NO) == nil);
return result;
}
/***********************************************************************
* addRemappedClass
* newcls is a realized future class, replacing oldcls.
-* OR newcls is NULL, replacing ignored weak-linked class oldcls.
+* OR newcls is nil, replacing ignored weak-linked class oldcls.
* Locking: runtimeLock must be write-locked by the caller
**********************************************************************/
-static void addRemappedClass(class_t *oldcls, class_t *newcls)
+static void addRemappedClass(Class oldcls, Class newcls)
{
rwlock_assert_writing(&runtimeLock);
if (PrintFuture) {
_objc_inform("FUTURE: using %p instead of %p for %s",
- oldcls, newcls, getName(oldcls));
+ (void*)oldcls, (void*)newcls, oldcls->name());
}
void *old;
* remapClass
* Returns the live class pointer for cls, which may be pointing to
* a class struct that has been reallocated.
-* Returns NULL if cls is ignored because of weak linking.
+* Returns nil if cls is ignored because of weak linking.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static class_t *remapClass(class_t *cls)
+static Class remapClass(Class cls)
{
rwlock_assert_locked(&runtimeLock);
- class_t *c2;
+ Class c2;
- if (!cls) return NULL;
+ if (!cls) return nil;
if (NXMapMember(remappedClasses(YES), cls, (void**)&c2) == NX_MAPNOTAKEY) {
return cls;
}
}
-static class_t *remapClass(classref_t cls)
+static Class remapClass(classref_t cls)
{
- return remapClass((class_t *)cls);
+ return remapClass((Class)cls);
}
-Class _class_remap(Class cls_gen)
+Class _class_remap(Class cls)
{
rwlock_read(&runtimeLock);
- Class result = (Class)remapClass(newcls(cls_gen));
+ Class result = remapClass(cls);
rwlock_unlock_read(&runtimeLock);
return result;
}
* or is an ignored weak-linked class.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static void remapClassRef(class_t **clsref)
+static void remapClassRef(Class *clsref)
{
rwlock_assert_locked(&runtimeLock);
- class_t *newcls = remapClass(*clsref);
+ Class newcls = remapClass(*clsref);
if (*clsref != newcls) *clsref = newcls;
}
* and classes that the slow path can't find (like objc_registerClassPair).
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static NXMapTable *nonmeta_class_map = NULL;
+static NXMapTable *nonmeta_class_map = nil;
static NXMapTable *nonMetaClasses(void)
{
rwlock_assert_locked(&runtimeLock);
* Adds metacls => cls to the memoized metaclass map
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void addNonMetaClass(class_t *cls)
+static void addNonMetaClass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
void *old;
- old = NXMapInsert(nonMetaClasses(), cls->isa, cls);
+ old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls);
- assert(isRealized(cls));
- assert(isRealized(cls->isa));
- assert(!isMetaClass(cls));
- assert(isMetaClass(cls->isa));
+ assert(cls->isRealized());
+ assert(cls->ISA()->isRealized());
+ assert(!cls->isMetaClass());
+ assert(cls->ISA()->isMetaClass());
assert(!old);
}
-static void removeNonMetaClass(class_t *cls)
+static void removeNonMetaClass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
- NXMapRemove(nonMetaClasses(), cls->isa);
+ NXMapRemove(nonMetaClasses(), cls->ISA());
}
* Used by +initialize.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static class_t *getNonMetaClass(class_t *metacls, id inst)
+static Class getNonMetaClass(Class metacls, id inst)
{
static int total, slow, memo;
rwlock_assert_locked(&runtimeLock);
total++;
// return cls itself if it's already a non-meta class
- if (!isMetaClass(metacls)) return metacls;
+ if (!metacls->isMetaClass()) return metacls;
// metacls really is a metaclass
// special case for root metaclass
- // where inst == inst->isa == metacls is possible
- if (metacls->isa == metacls) {
- class_t *cls = metacls->superclass;
- assert(isRealized(cls));
- assert(!isMetaClass(cls));
- assert(cls->isa == metacls);
- if (cls->isa == metacls) return cls;
+ // where inst == inst->ISA() == metacls is possible
+ if (metacls->ISA() == metacls) {
+ Class cls = metacls->superclass;
+ assert(cls->isRealized());
+ assert(!cls->isMetaClass());
+ assert(cls->ISA() == metacls);
+ if (cls->ISA() == metacls) return cls;
}
// use inst if available
if (inst) {
- class_t *cls = (class_t *)inst;
+ Class cls = (Class)inst;
realizeClass(cls);
// cls may be a subclass - find the real class for metacls
- while (cls && cls->isa != metacls) {
+ while (cls && cls->ISA() != metacls) {
cls = cls->superclass;
realizeClass(cls);
}
if (cls) {
- assert(!isMetaClass(cls));
- assert(cls->isa == metacls);
+ assert(!cls->isMetaClass());
+ assert(cls->ISA() == metacls);
return cls;
}
#if !NDEBUG
}
// try memoized table
- class_t *cls = (class_t *)NXMapGet(nonMetaClasses(), metacls);
+ Class cls = (Class)NXMapGet(nonMetaClasses(), metacls);
if (cls) {
memo++;
if (PrintInitializing) {
memo, total, memo*100.0/total);
}
- assert(isRealized(cls));
- assert(!isMetaClass(cls));
- assert(cls->isa == metacls);
+ assert(cls->isRealized());
+ assert(!cls->isMetaClass());
+ assert(cls->ISA() == metacls);
return cls;
}
classref_t *classlist = _getObjc2ClassList(hi, &count);
for (size_t i = 0; i < count; i++) {
cls = remapClass(classlist[i]);
- if (cls && cls->isa == metacls) {
+ if (cls && cls->ISA() == metacls) {
// memoize result
realizeClass(cls);
addNonMetaClass(cls);
}
}
- _objc_fatal("no class for metaclass %p", metacls);
+ _objc_fatal("no class for metaclass %p", (void*)metacls);
return cls;
}
* Used by +initialize.
* Locking: acquires runtimeLock
**********************************************************************/
-Class _class_getNonMetaClass(Class cls_gen, id obj)
+Class _class_getNonMetaClass(Class cls, id obj)
{
- class_t *cls = newcls(cls_gen);
rwlock_write(&runtimeLock);
cls = getNonMetaClass(cls, obj);
- assert(isRealized(cls));
+ assert(cls->isRealized());
rwlock_unlock_write(&runtimeLock);
- return (Class)cls;
+ return cls;
}
* Adds subcls as a subclass of supercls.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void addSubclass(class_t *supercls, class_t *subcls)
+static void addSubclass(Class supercls, Class subcls)
{
rwlock_assert_writing(&runtimeLock);
if (supercls && subcls) {
- assert(isRealized(supercls));
- assert(isRealized(subcls));
+ assert(supercls->isRealized());
+ assert(subcls->isRealized());
subcls->data()->nextSiblingClass = supercls->data()->firstSubclass;
supercls->data()->firstSubclass = subcls;
- if (supercls->data()->flags & RW_HAS_CXX_STRUCTORS) {
- subcls->data()->flags |= RW_HAS_CXX_STRUCTORS;
+ if (supercls->data()->flags & RW_HAS_CXX_CTOR) {
+ subcls->data()->flags |= RW_HAS_CXX_CTOR;
+ }
+
+ if (supercls->data()->flags & RW_HAS_CXX_DTOR) {
+ subcls->data()->flags |= RW_HAS_CXX_DTOR;
}
if (supercls->hasCustomRR()) {
* Removes subcls as a subclass of supercls.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void removeSubclass(class_t *supercls, class_t *subcls)
+static void removeSubclass(Class supercls, Class subcls)
{
rwlock_assert_writing(&runtimeLock);
- assert(isRealized(supercls));
- assert(isRealized(subcls));
- assert(getSuperclass(subcls) == supercls);
+ assert(supercls->isRealized());
+ assert(subcls->isRealized());
+ assert(subcls->superclass == supercls);
- class_t **cp;
+ Class *cp;
for (cp = &supercls->data()->firstSubclass;
*cp && *cp != subcls;
cp = &(*cp)->data()->nextSiblingClass)
**********************************************************************/
static NXMapTable *protocols(void)
{
- static NXMapTable *protocol_map = NULL;
+ static NXMapTable *protocol_map = nil;
rwlock_assert_locked(&runtimeLock);
ivar_t *ivar = ivar_list_nth(ro->ivars, i);
if (!ivar->offset) continue; // anonymous bitfield
- uint32_t alignment = ivar_alignment(ivar);
+ uint32_t alignment = ivar->alignment();
if (alignment > maxAlignment) maxAlignment = alignment;
}
if (PrintIvars) {
_objc_inform("IVARS: offset %u -> %u for %s (size %u, align %u)",
oldOffset, newOffset, ivar->name,
- ivar->size, ivar_alignment(ivar));
+ ivar->size, ivar->alignment());
}
}
* Look up an ivar by name.
* Locking: runtimeLock must be read- or write-locked by the caller.
**********************************************************************/
-static ivar_t *getIvar(class_t *cls, const char *name)
+static ivar_t *getIvar(Class cls, const char *name)
{
rwlock_assert_locked(&runtimeLock);
const ivar_list_t *ivars;
- assert(isRealized(cls));
+ assert(cls->isRealized());
if ((ivars = cls->data()->ro->ivars)) {
uint32_t i;
for (i = 0; i < ivars->count; i++) {
ivar_t *ivar = ivar_list_nth(ivars, i);
if (!ivar->offset) continue; // anonymous bitfield
- // ivar->name may be NULL for anonymous bitfields etc.
+ // ivar->name may be nil for anonymous bitfields etc.
if (ivar->name && 0 == strcmp(name, ivar->name)) {
return ivar;
}
}
}
- return NULL;
+ return nil;
}
-static void reconcileInstanceVariables(class_t *cls, class_t *supercls) {
+
+static void reconcileInstanceVariables(Class cls, Class supercls)
+{
class_rw_t *rw = cls->data();
const class_ro_t *ro = rw->ro;
+
+ assert(supercls);
+ assert(!cls->isMetaClass());
+
+ // Non-fragile ivars - reconcile this class with its superclass
+ layout_bitmap ivarBitmap;
+ layout_bitmap weakBitmap;
+ bool layoutsChanged = NO;
+ bool mergeLayouts = UseGC;
+ const class_ro_t *super_ro = supercls->data()->ro;
- if (supercls) {
- // Non-fragile ivars - reconcile this class with its superclass
- // Does this really need to happen for the isMETA case?
- layout_bitmap ivarBitmap;
- layout_bitmap weakBitmap;
- BOOL layoutsChanged = NO;
- BOOL mergeLayouts = UseGC;
- const class_ro_t *super_ro = supercls->data()->ro;
+ if (DebugNonFragileIvars) {
+ // Debugging: Force non-fragile ivars to slide.
+ // Intended to find compiler, runtime, and program bugs.
+ // If it fails with this and works without, you have a problem.
+
+ // Operation: Reset everything to 0 + misalignment.
+ // Then force the normal sliding logic to push everything back.
+
+ // Exceptions: root classes, metaclasses, *NSCF* classes,
+ // __CF* classes, NSConstantString, NSSimpleCString
- if (DebugNonFragileIvars) {
- // Debugging: Force non-fragile ivars to slide.
- // Intended to find compiler, runtime, and program bugs.
- // If it fails with this and works without, you have a problem.
+ // (already know it's not root because supercls != nil)
+ if (!strstr(cls->name(), "NSCF") &&
+ 0 != strncmp(cls->name(), "__CF", 4) &&
+ 0 != strcmp(cls->name(), "NSConstantString") &&
+ 0 != strcmp(cls->name(), "NSSimpleCString"))
+ {
+ uint32_t oldStart = ro->instanceStart;
+ uint32_t oldSize = ro->instanceSize;
+ class_ro_t *ro_w = make_ro_writeable(rw);
+ ro = rw->ro;
- // Operation: Reset everything to 0 + misalignment.
- // Then force the normal sliding logic to push everything back.
+ // Find max ivar alignment in class.
+ // default to word size to simplify ivar update
+ uint32_t alignment = 1<<WORD_SHIFT;
+ if (ro->ivars) {
+ uint32_t i;
+ for (i = 0; i < ro->ivars->count; i++) {
+ ivar_t *ivar = ivar_list_nth(ro->ivars, i);
+ if (ivar->alignment() > alignment) {
+ alignment = ivar->alignment();
+ }
+ }
+ }
+ uint32_t misalignment = ro->instanceStart % alignment;
+ uint32_t delta = ro->instanceStart - misalignment;
+ ro_w->instanceStart = misalignment;
+ ro_w->instanceSize -= delta;
- // Exceptions: root classes, metaclasses, *NSCF* classes,
- // __CF* classes, NSConstantString, NSSimpleCString
+ if (PrintIvars) {
+ _objc_inform("IVARS: DEBUG: forcing ivars for class '%s' "
+ "to slide (instanceStart %zu -> %zu)",
+ cls->name(), (size_t)oldStart,
+ (size_t)ro->instanceStart);
+ }
- // (already know it's not root because supercls != nil)
- if (!strstr(getName(cls), "NSCF") &&
- 0 != strncmp(getName(cls), "__CF", 4) &&
- 0 != strcmp(getName(cls), "NSConstantString") &&
- 0 != strcmp(getName(cls), "NSSimpleCString"))
- {
- uint32_t oldStart = ro->instanceStart;
- uint32_t oldSize = ro->instanceSize;
- class_ro_t *ro_w = make_ro_writeable(rw);
- ro = rw->ro;
-
- // Find max ivar alignment in class.
- // default to word size to simplify ivar update
- uint32_t alignment = 1<<WORD_SHIFT;
- if (ro->ivars) {
- uint32_t i;
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (ivar_alignment(ivar) > alignment) {
- alignment = ivar_alignment(ivar);
- }
- }
+ if (ro->ivars) {
+ uint32_t i;
+ for (i = 0; i < ro->ivars->count; i++) {
+ ivar_t *ivar = ivar_list_nth(ro->ivars, i);
+ if (!ivar->offset) continue; // anonymous bitfield
+ *ivar->offset -= delta;
}
- uint32_t misalignment = ro->instanceStart % alignment;
- uint32_t delta = ro->instanceStart - misalignment;
- ro_w->instanceStart = misalignment;
- ro_w->instanceSize -= delta;
-
- if (PrintIvars) {
- _objc_inform("IVARS: DEBUG: forcing ivars for class '%s' "
- "to slide (instanceStart %zu -> %zu)",
- getName(cls), (size_t)oldStart,
- (size_t)ro->instanceStart);
+ }
+
+ if (mergeLayouts) {
+ layout_bitmap layout;
+ if (ro->ivarLayout) {
+ layout = layout_bitmap_create(ro->ivarLayout,
+ oldSize, oldSize, NO);
+ layout_bitmap_slide_anywhere(&layout,
+ delta >> WORD_SHIFT, 0);
+ ro_w->ivarLayout = layout_string_create(layout);
+ layout_bitmap_free(layout);
}
-
- if (ro->ivars) {
- uint32_t i;
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
- *ivar->offset -= delta;
- }
- }
-
- if (mergeLayouts) {
- layout_bitmap layout;
- if (ro->ivarLayout) {
- layout = layout_bitmap_create(ro->ivarLayout,
- oldSize, oldSize, NO);
- layout_bitmap_slide_anywhere(&layout,
- delta >> WORD_SHIFT, 0);
- ro_w->ivarLayout = layout_string_create(layout);
- layout_bitmap_free(layout);
- }
- if (ro->weakIvarLayout) {
- layout = layout_bitmap_create(ro->weakIvarLayout,
- oldSize, oldSize, YES);
- layout_bitmap_slide_anywhere(&layout,
- delta >> WORD_SHIFT, 0);
- ro_w->weakIvarLayout = layout_string_create(layout);
- layout_bitmap_free(layout);
- }
+ if (ro->weakIvarLayout) {
+ layout = layout_bitmap_create(ro->weakIvarLayout,
+ oldSize, oldSize, YES);
+ layout_bitmap_slide_anywhere(&layout,
+ delta >> WORD_SHIFT, 0);
+ ro_w->weakIvarLayout = layout_string_create(layout);
+ layout_bitmap_free(layout);
}
}
}
-
- // fixme can optimize for "class has no new ivars", etc
+ }
+
+ if (ro->instanceStart >= super_ro->instanceSize && !mergeLayouts) {
+ // Superclass has not overgrown its space, and we don't
+ // need to rebuild GC layouts. We're done here.
+ return;
+ }
+ // fixme can optimize for "class has no new ivars", etc
+
+ if (mergeLayouts) {
// WARNING: gcc c++ sets instanceStart/Size=0 for classes with
// no local ivars, but does provide a layout bitmap.
// Handle that case specially so layout_bitmap_create doesn't die
ivarBitmap = layout_bitmap_create_empty(super_ro->instanceSize, NO);
weakBitmap = layout_bitmap_create_empty(super_ro->instanceSize, YES);
layoutsChanged = YES;
- } else {
+ }
+ else {
ivarBitmap =
- layout_bitmap_create(ro->ivarLayout,
- ro->instanceSize,
- ro->instanceSize, NO);
+ layout_bitmap_create(ro->ivarLayout,
+ ro->instanceSize,
+ ro->instanceSize, NO);
weakBitmap =
- layout_bitmap_create(ro->weakIvarLayout,
- ro->instanceSize,
- ro->instanceSize, YES);
+ layout_bitmap_create(ro->weakIvarLayout,
+ ro->instanceSize,
+ ro->instanceSize, YES);
+ }
+ }
+
+ if (ro->instanceStart < super_ro->instanceSize) {
+ // Superclass has changed size. This class's ivars must move.
+ // Also slide layout bits in parallel.
+ // This code is incapable of compacting the subclass to
+ // compensate for a superclass that shrunk, so don't do that.
+ if (PrintIvars) {
+ _objc_inform("IVARS: sliding ivars for class %s "
+ "(superclass was %u bytes, now %u)",
+ ro->name, ro->instanceStart,
+ super_ro->instanceSize);
}
+ class_ro_t *ro_w = make_ro_writeable(rw);
+ ro = rw->ro;
+ moveIvars(ro_w, super_ro->instanceSize,
+ mergeLayouts ? &ivarBitmap : nil,
+ mergeLayouts ? &weakBitmap : nil);
+ gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->name);
+ layoutsChanged = YES;
+ }
+
+ if (mergeLayouts) {
+ // Check superclass's layout against this class's layout.
+ // This needs to be done even if the superclass is not bigger.
+ layout_bitmap superBitmap;
- if (ro->instanceStart < super_ro->instanceSize) {
- // Superclass has changed size. This class's ivars must move.
- // Also slide layout bits in parallel.
- // This code is incapable of compacting the subclass to
- // compensate for a superclass that shrunk, so don't do that.
- if (PrintIvars) {
- _objc_inform("IVARS: sliding ivars for class %s "
- "(superclass was %u bytes, now %u)",
- ro->name, ro->instanceStart,
- super_ro->instanceSize);
- }
- class_ro_t *ro_w = make_ro_writeable(rw);
- ro = rw->ro;
- moveIvars(ro_w, super_ro->instanceSize,
- mergeLayouts ? &ivarBitmap : NULL, mergeLayouts ? &weakBitmap : NULL);
- gdb_objc_class_changed((Class)cls, OBJC_CLASS_IVARS_CHANGED, ro->name);
- layoutsChanged = mergeLayouts;
- }
+ superBitmap = layout_bitmap_create(super_ro->ivarLayout,
+ super_ro->instanceSize,
+ super_ro->instanceSize, NO);
+ layoutsChanged |= layout_bitmap_splat(ivarBitmap, superBitmap,
+ ro->instanceStart);
+ layout_bitmap_free(superBitmap);
- if (mergeLayouts) {
- // Check superclass's layout against this class's layout.
- // This needs to be done even if the superclass is not bigger.
- layout_bitmap superBitmap = layout_bitmap_create(super_ro->ivarLayout,
- super_ro->instanceSize,
- super_ro->instanceSize, NO);
- layoutsChanged |= layout_bitmap_splat(ivarBitmap, superBitmap,
- ro->instanceStart);
- layout_bitmap_free(superBitmap);
-
- // check the superclass' weak layout.
- superBitmap = layout_bitmap_create(super_ro->weakIvarLayout,
- super_ro->instanceSize,
- super_ro->instanceSize, YES);
- layoutsChanged |= layout_bitmap_splat(weakBitmap, superBitmap,
- ro->instanceStart);
- layout_bitmap_free(superBitmap);
- }
+ // check the superclass' weak layout.
+ superBitmap = layout_bitmap_create(super_ro->weakIvarLayout,
+ super_ro->instanceSize,
+ super_ro->instanceSize, YES);
+ layoutsChanged |= layout_bitmap_splat(weakBitmap, superBitmap,
+ ro->instanceStart);
+ layout_bitmap_free(superBitmap);
+ // Rebuild layout strings if necessary.
if (layoutsChanged) {
- // Rebuild layout strings.
if (PrintIvars) {
- _objc_inform("IVARS: gc layout changed for class %s",
- ro->name);
+ _objc_inform("IVARS: gc layout changed for class %s",ro->name);
}
class_ro_t *ro_w = make_ro_writeable(rw);
ro = rw->ro;
* Returns the real class structure for the class.
* Locking: runtimeLock must be write-locked by the caller
**********************************************************************/
-static class_t *realizeClass(class_t *cls)
+static Class realizeClass(Class cls)
{
rwlock_assert_writing(&runtimeLock);
const class_ro_t *ro;
class_rw_t *rw;
- class_t *supercls;
- class_t *metacls;
+ Class supercls;
+ Class metacls;
BOOL isMeta;
- if (!cls) return NULL;
- if (isRealized(cls)) return cls;
+ if (!cls) return nil;
+ if (cls->isRealized()) return cls;
assert(cls == remapClass(cls));
ro = (const class_ro_t *)cls->data();
// This was a future class. rw data is already allocated.
rw = cls->data();
ro = cls->data()->ro;
- changeInfo(cls, RW_REALIZED, RW_FUTURE);
+ cls->changeInfo(RW_REALIZED, RW_FUTURE);
} else {
// Normal class. Allocate writeable class data.
rw = (class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1);
if (PrintConnecting) {
_objc_inform("CLASS: realizing class '%s' %s %p %p",
- ro->name, isMeta ? "(meta)" : "", cls, ro);
+ ro->name, isMeta ? "(meta)" : "", (void*)cls, ro);
}
// Realize superclass and metaclass, if they aren't already.
// This needs to be done after RW_REALIZED is set above, for root classes.
supercls = realizeClass(remapClass(cls->superclass));
- metacls = realizeClass(remapClass(cls->isa));
+ metacls = realizeClass(remapClass(cls->ISA()));
// Check for remapped superclass and metaclass
if (supercls != cls->superclass) {
cls->superclass = supercls;
}
- if (metacls != cls->isa) {
- cls->isa = metacls;
+ if (metacls != cls->ISA()) {
+ cls->changeIsa(metacls);
}
/* debug: print them all
_objc_inform("IVARS: %s.%s (offset %u, size %u, align %u)",
ro->name, ivar->name,
- *ivar->offset, ivar->size, ivar_alignment(ivar));
+ *ivar->offset, ivar->size, ivar->alignment());
}
}
*/
// Reconcile instance variable offsets / layout.
- if (!isMeta) reconcileInstanceVariables(cls, supercls);
+ if (supercls && !isMeta) reconcileInstanceVariables(cls, supercls);
// Copy some flags from ro to rw
- if (ro->flags & RO_HAS_CXX_STRUCTORS) rw->flags |= RW_HAS_CXX_STRUCTORS;
+ if (ro->flags & RO_HAS_CXX_STRUCTORS) {
+ rw->flags |= RW_HAS_CXX_DTOR;
+ if (! (ro->flags & RO_HAS_CXX_DTOR_ONLY)) {
+ rw->flags |= RW_HAS_CXX_CTOR;
+ }
+ }
// Connect this class to its superclass's subclass lists
if (supercls) {
* Return YES if some superclass of cls was weak-linked and is missing.
**********************************************************************/
static BOOL
-missingWeakSuperclass(class_t *cls)
+missingWeakSuperclass(Class cls)
{
- assert(!isRealized(cls));
+ assert(!cls->isRealized());
if (!cls->superclass) {
- // superclass NULL. This is normal for root classes only.
+ // superclass nil. This is normal for root classes only.
return (!(cls->data()->flags & RO_ROOT));
} else {
- // superclass not NULL. Check if a higher superclass is missing.
- class_t *supercls = remapClass(cls->superclass);
+ // superclass not nil. Check if a higher superclass is missing.
+ Class supercls = remapClass(cls->superclass);
assert(cls != cls->superclass);
assert(cls != supercls);
if (!supercls) return YES;
- if (isRealized(supercls)) return NO;
+ if (supercls->isRealized()) return NO;
return missingWeakSuperclass(supercls);
}
}
{
rwlock_write(&runtimeLock);
- class_t *cls;
+ Class cls;
NXMapTable *future_named_class_map = futureNamedClasses();
- if ((cls = (class_t *)NXMapGet(future_named_class_map, name))) {
+ if ((cls = (Class)NXMapGet(future_named_class_map, name))) {
// Already have a future class for this name.
rwlock_unlock_write(&runtimeLock);
- return (Class)cls;
+ return cls;
}
- cls = (class_t *)_calloc_class(sizeof(*cls));
+ cls = _calloc_class(sizeof(objc_class));
addFutureNamedClass(name, cls);
rwlock_unlock_write(&runtimeLock);
- return (Class)cls;
+ return cls;
}
}
+BOOL _class_isFutureClass(Class cls)
+{
+ return cls && cls->isFuture();
+}
+
+
/***********************************************************************
-* flushVtables
-* Rebuilds vtables for cls and its realized subclasses.
-* If cls is Nil, all realized classes and metaclasses are touched.
-* Locking: runtimeLock must be held by the caller.
+* _objc_flush_caches
+* Flushes all caches.
+* (Historical behavior: flush caches for cls, its metaclass,
+* and subclasses thereof. Nil flushes all classes.)
+* Locking: acquires runtimeLock
**********************************************************************/
-static void flushVtables(class_t *cls)
+static void flushCaches(Class cls)
{
rwlock_assert_writing(&runtimeLock);
- if (PrintVtables && !cls) {
- _objc_inform("VTABLES: ### EXPENSIVE ### global vtable flush!");
+ mutex_lock(&cacheUpdateLock);
+
+ if (cls) {
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, {
+ cache_erase_nolock(&c->cache);
+ });
+
+ if (!cls->superclass) {
+ // root; metaclasses are subclasses and were flushed above
+ } else {
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls->ISA(), {
+ cache_erase_nolock(&c->cache);
+ });
+ }
+ }
+ else {
+ Class c;
+ NXHashTable *classes = realizedClasses();
+ NXHashState state = NXInitHashState(classes);
+ while (NXNextHashState(classes, &state, (void **)&c)) {
+ cache_erase_nolock(&c->cache);
+ }
+ classes = realizedMetaclasses();
+ state = NXInitHashState(classes);
+ while (NXNextHashState(classes, &state, (void **)&c)) {
+ cache_erase_nolock(&c->cache);
+ }
}
- FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, {
- updateVtable(c, NO);
- });
+ mutex_unlock(&cacheUpdateLock);
}
-/***********************************************************************
-* flushCaches
-* Flushes caches for cls and its realized subclasses.
-* Does not update vtables.
-* If cls is Nil, all realized and metaclasses classes are touched.
-* Locking: runtimeLock must be held by the caller.
-**********************************************************************/
-static void flushCaches(class_t *cls)
+static void flushImps(Class cls, SEL sel1, IMP imp1, SEL sel2, IMP imp2)
{
rwlock_assert_writing(&runtimeLock);
- FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, {
- flush_cache((Class)c);
- });
+ mutex_lock(&cacheUpdateLock);
+
+ if (cls) {
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls, {
+ cache_eraseImp_nolock(c, sel1, imp1);
+ if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
+ });
+
+ if (!cls->superclass) {
+ // root; metaclasses are subclasses and were flushed above
+ } else {
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, cls->ISA(), {
+ cache_eraseImp_nolock(c, sel1, imp1);
+ if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
+ });
+ }
+ }
+ else {
+ Class c;
+ NXHashTable *classes = realizedClasses();
+ NXHashState state = NXInitHashState(classes);
+ while (NXNextHashState(classes, &state, (void **)&c)) {
+ cache_eraseImp_nolock(c, sel1, imp1);
+ if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
+ }
+ classes = realizedMetaclasses();
+ state = NXInitHashState(classes);
+ while (NXNextHashState(classes, &state, (void **)&c)) {
+ cache_eraseImp_nolock(c, sel1, imp1);
+ if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
+ }
+ }
+
+ mutex_unlock(&cacheUpdateLock);
}
-/***********************************************************************
-* flush_caches
-* Flushes caches and rebuilds vtables for cls, its subclasses,
-* and optionally its metaclass.
-* Locking: acquires runtimeLock
-**********************************************************************/
-void flush_caches(Class cls_gen, BOOL flush_meta)
+void _objc_flush_caches(Class cls)
{
- class_t *cls = newcls(cls_gen);
rwlock_write(&runtimeLock);
- // fixme optimize vtable flushing? (only needed for vtable'd selectors)
flushCaches(cls);
- flushVtables(cls);
- // don't flush root class's metaclass twice (it's a subclass of the root)
- if (flush_meta && getSuperclass(cls)) {
- flushCaches(cls->isa);
- flushVtables(cls->isa);
- }
rwlock_unlock_write(&runtimeLock);
+
+ if (!cls) {
+ // collectALot if cls==nil
+ mutex_lock(&cacheUpdateLock);
+ cache_collect(true);
+ mutex_unlock(&cacheUpdateLock);
+ }
}
recursive_mutex_unlock(&loadMethodLock);
- return NULL;
+ return nil;
}
uint32_t hIndex;
size_t count;
size_t i;
- class_t **resolvedFutureClasses = NULL;
+ Class *resolvedFutureClasses = nil;
size_t resolvedFutureClassCount = 0;
static unsigned int totalMethodLists;
static unsigned int preoptimizedMethodLists;
#define EACH_HEADER \
hIndex = 0; \
- crashlog_header_name(NULL) && hIndex < hCount && (hi = hList[hIndex]) && crashlog_header_name(hi); \
+ crashlog_header_name(nil) && hIndex < hCount && (hi = hList[hIndex]) && crashlog_header_name(hi); \
hIndex++
if (!doneOnce) {
doneOnce = YES;
- initVtables();
+
+ if (DisableTaggedPointers) {
+ disableTaggedPointers();
+ }
// Count classes. Size various table based on the total.
- size_t total = 0;
- size_t unoptimizedTotal = 0;
+ int total = 0;
+ int unoptimizedTotal = 0;
for (EACH_HEADER) {
if (_getObjc2ClassList(hi, &count)) {
- total += count;
+ total += (int)count;
if (!hi->inSharedCache) unoptimizedTotal += count;
}
}
if (PrintConnecting) {
- _objc_inform("CLASS: found %zu classes during launch", total);
+ _objc_inform("CLASS: found %d classes during launch", total);
}
// namedClasses (NOT realizedClasses)
// Preoptimized classes don't go in this table.
// 4/3 is NXMapTable's load factor
- size_t namedClassesSize =
+ int namedClassesSize =
(isPreoptimized() ? unoptimizedTotal : total) * 4 / 3;
gdb_objc_realized_classes =
NXCreateMapTableFromZone(NXStrValueMapPrototype, namedClassesSize,
// realizedClasses and realizedMetaclasses - less than the full total
realized_class_hash =
- NXCreateHashTableFromZone(NXPtrPrototype, total / 8, NULL,
+ NXCreateHashTableFromZone(NXPtrPrototype, total / 8, nil,
_objc_internal_zone());
realized_metaclass_hash =
- NXCreateHashTableFromZone(NXPtrPrototype, total / 8, NULL,
+ NXCreateHashTableFromZone(NXPtrPrototype, total / 8, nil,
_objc_internal_zone());
}
classref_t *classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
- class_t *cls = (class_t *)classlist[i];
- const char *name = getName(cls);
+ Class cls = (Class)classlist[i];
+ const char *name = cls->name();
if (missingWeakSuperclass(cls)) {
// No superclass (probably weak-linked).
_objc_inform("CLASS: IGNORING class '%s' with "
"missing weak-linked superclass", name);
}
- addRemappedClass(cls, NULL);
- cls->superclass = NULL;
+ addRemappedClass(cls, nil);
+ cls->superclass = nil;
continue;
}
- class_t *newCls = NULL;
+ // Note: Class __ARCLite__'s hack does not go through here.
+ // Class structure fixups that apply to it also need to be
+ // performed in non-lazy realization below.
+
+ Class newCls = nil;
if (NXCountMapTable(future_named_class_map) > 0) {
- newCls = (class_t *)NXMapGet(future_named_class_map, name);
+ newCls = (Class)NXMapGet(future_named_class_map, name);
removeFutureNamedClass(name);
}
if (newCls) {
- // Copy class_t to future class's struct.
+ // Copy objc_class to future class's struct.
// Preserve future's rw data block.
class_rw_t *rw = newCls->data();
- memcpy(newCls, cls, sizeof(class_t));
+ const class_ro_t *old_ro = rw->ro;
+ memcpy(newCls, cls, sizeof(objc_class));
rw->ro = (class_ro_t *)newCls->data();
newCls->setData(rw);
-
+ _free_internal((void *)old_ro->name);
+ _free_internal((void *)old_ro);
+
addRemappedClass(cls, newCls);
cls = newCls;
// Non-lazily realize the class below.
- resolvedFutureClasses = (class_t **)
+ resolvedFutureClasses = (Class *)
_realloc_internal(resolvedFutureClasses,
(resolvedFutureClassCount+1)
- * sizeof(class_t *));
+ * sizeof(Class));
resolvedFutureClasses[resolvedFutureClassCount++] = newCls;
}
// for future reference: shared cache never contains MH_BUNDLEs
if (headerIsBundle) {
cls->data()->flags |= RO_FROM_BUNDLE;
- cls->isa->data()->flags |= RO_FROM_BUNDLE;
+ cls->ISA()->data()->flags |= RO_FROM_BUNDLE;
}
if (PrintPreopt) {
totalMethodLists++;
if (isMethodListFixedUp(mlist)) preoptimizedMethodLists++;
}
- if ((mlist = ((class_ro_t *)cls->isa->data())->baseMethods)) {
+ if ((mlist = ((class_ro_t *)cls->ISA()->data())->baseMethods)) {
totalMethodLists++;
if (isMethodListFixedUp(mlist)) preoptimizedMethodLists++;
}
if (!noClassesRemapped()) {
for (EACH_HEADER) {
- class_t **classrefs = _getObjc2ClassRefs(hi, &count);
+ Class *classrefs = _getObjc2ClassRefs(hi, &count);
for (i = 0; i < count; i++) {
remapClassRef(&classrefs[i]);
}
}
sel_unlock();
+#if SUPPORT_FIXUP
+ // Fix up old objc_msgSend_fixup call sites
+ for (EACH_HEADER) {
+ message_ref_t *refs = _getObjc2MessageRefs(hi, &count);
+ if (count == 0) continue;
+
+ if (PrintVtables) {
+ _objc_inform("VTABLES: repairing %zu unsupported vtable dispatch "
+ "call sites in %s", count, hi->fname);
+ }
+ for (i = 0; i < count; i++) {
+ fixupMessageRef(refs+i);
+ }
+ }
+#endif
+
// Discover protocols. Fix up protocol refs.
NXMapTable *protocol_map = protocols();
for (EACH_HEADER) {
- extern class_t OBJC_CLASS_$_Protocol;
+ extern objc_class OBJC_CLASS_$_Protocol;
Class cls = (Class)&OBJC_CLASS_$_Protocol;
assert(cls);
protocol_t **protocols = _getObjc2ProtocolList(hi, &count);
// fixme duplicate protocol from bundle
for (i = 0; i < count; i++) {
if (!NXMapGet(protocol_map, protocols[i]->name)) {
- protocols[i]->isa = cls;
+ protocols[i]->initIsa(cls);
NXMapKeyCopyingInsert(protocol_map,
protocols[i]->name, protocols[i]);
if (PrintProtocols) {
classref_t *classlist =
_getObjc2NonlazyClassList(hi, &count);
for (i = 0; i < count; i++) {
- realizeClass(remapClass(classlist[i]));
+ Class cls = remapClass(classlist[i]);
+ if (!cls) continue;
+
+ realizeClass(cls);
}
}
_getObjc2CategoryList(hi, &count);
for (i = 0; i < count; i++) {
category_t *cat = catlist[i];
- class_t *cls = remapClass(cat->cls);
+ Class cls = remapClass(cat->cls);
if (!cls) {
// Category's target class is missing (probably weak-linked).
// Disavow any knowledge of this category.
- catlist[i] = NULL;
+ catlist[i] = nil;
if (PrintConnecting) {
_objc_inform("CLASS: IGNORING category \?\?\?(%s) %p with "
"missing weak-linked target class",
|| cat->instanceProperties)
{
addUnattachedCategoryForClass(cat, cls, hi);
- if (isRealized(cls)) {
+ if (cls->isRealized()) {
remethodizeClass(cls);
classExists = YES;
}
if (PrintConnecting) {
_objc_inform("CLASS: found category -%s(%s) %s",
- getName(cls), cat->name,
+ cls->name(), cat->name,
classExists ? "on existing class" : "");
}
}
if (cat->classMethods || cat->protocols
/* || cat->classProperties */)
{
- addUnattachedCategoryForClass(cat, cls->isa, hi);
- if (isRealized(cls->isa)) {
- remethodizeClass(cls->isa);
+ addUnattachedCategoryForClass(cat, cls->ISA(), hi);
+ if (cls->ISA()->isRealized()) {
+ remethodizeClass(cls->ISA());
}
if (PrintConnecting) {
_objc_inform("CLASS: found category +%s(%s)",
- getName(cls), cat->name);
+ cls->name(), cat->name);
}
}
}
**********************************************************************/
// Recursively schedule +load for cls and any un-+load-ed superclasses.
// cls must already be connected.
-static void schedule_class_load(class_t *cls)
+static void schedule_class_load(Class cls)
{
if (!cls) return;
- assert(isRealized(cls)); // _read_images should realize
+ assert(cls->isRealized()); // _read_images should realize
if (cls->data()->flags & RW_LOADED) return;
// Ensure superclass-first ordering
- schedule_class_load(getSuperclass(cls));
+ schedule_class_load(cls->superclass);
- add_class_to_loadable_list((Class)cls);
- changeInfo(cls, RW_LOADED, 0);
+ add_class_to_loadable_list(cls);
+ cls->setInfo(RW_LOADED);
}
void prepare_load_methods(header_info *hi)
category_t **categorylist = _getObjc2NonlazyCategoryList(hi, &count);
for (i = 0; i < count; i++) {
category_t *cat = categorylist[i];
- class_t *cls = remapClass(cat->cls);
+ Class cls = remapClass(cat->cls);
if (!cls) continue; // category for ignored weak-linked class
realizeClass(cls);
- assert(isRealized(cls->isa));
- add_category_to_loadable_list((Category)cat);
+ assert(cls->ISA()->isRealized());
+ add_category_to_loadable_list(cat);
}
}
for (i = 0; i < count; i++) {
category_t *cat = catlist[i];
if (!cat) continue; // category for ignored weak-linked class
- class_t *cls = remapClass(cat->cls);
+ Class cls = remapClass(cat->cls);
assert(cls); // shouldn't have live category for dead class
// fixme for MH_DYLIB cat's class may have been unloaded already
removeUnattachedCategoryForClass(cat, cls);
// +load queue
- remove_category_from_loadable_list((Category)cat);
+ remove_category_from_loadable_list(cat);
}
// Unload classes.
// This avoid bugs where this loop unloads a subclass before its superclass
for (i = 0; i < count; i++) {
- class_t *cls = remapClass(classlist[i]);
+ Class cls = remapClass(classlist[i]);
if (cls) {
- remove_class_from_loadable_list((Class)cls);
- detach_class(cls->isa, YES);
+ remove_class_from_loadable_list(cls);
+ detach_class(cls->ISA(), YES);
detach_class(cls, NO);
}
}
for (i = 0; i < count; i++) {
- class_t *cls = remapClass(classlist[i]);
+ Class cls = remapClass(classlist[i]);
if (cls) {
- free_class(cls->isa);
+ free_class(cls->ISA());
free_class(cls);
}
}
struct objc_method_description *
method_getDescription(Method m)
{
- if (!m) return NULL;
- return (struct objc_method_description *)newmethod(m);
+ if (!m) return nil;
+ return (struct objc_method_description *)m;
}
static IMP
_method_getImplementation(method_t *m)
{
- if (!m) return NULL;
+ if (!m) return nil;
return m->imp;
}
IMP
method_getImplementation(Method m)
{
- return _method_getImplementation(newmethod(m));
+ return _method_getImplementation(m);
}
/***********************************************************************
* method_getName
* Returns this method's selector.
-* The method must not be NULL.
+* The method must not be nil.
* The method must already have been fixed-up.
* Locking: none
**********************************************************************/
SEL
-method_getName(Method m_gen)
+method_getName(Method m)
{
- method_t *m = newmethod(m_gen);
- if (!m) return NULL;
+ if (!m) return nil;
- assert((SEL)m->name == sel_registerName((char *)m->name));
- return (SEL)m->name;
+ assert(m->name == sel_registerName(sel_getName(m->name)));
+ return m->name;
}
/***********************************************************************
* method_getTypeEncoding
* Returns this method's old-style type encoding string.
-* The method must not be NULL.
+* The method must not be nil.
* Locking: none
**********************************************************************/
const char *
method_getTypeEncoding(Method m)
{
- if (!m) return NULL;
- return newmethod(m)->types;
+ if (!m) return nil;
+ return m->types;
}
* The previous implementation is returned.
**********************************************************************/
static IMP
-_method_setImplementation(class_t *cls, method_t *m, IMP imp)
+_method_setImplementation(Class cls, method_t *m, IMP imp)
{
rwlock_assert_writing(&runtimeLock);
- if (!m) return NULL;
- if (!imp) return NULL;
+ if (!m) return nil;
+ if (!imp) return nil;
if (ignoreSelector(m->name)) {
// Ignored methods stay ignored
IMP old = _method_getImplementation(m);
m->imp = imp;
- // No cache flushing needed - cache contains Methods not IMPs.
-
- // vtable and RR/AWZ updates are slow if cls is NULL (i.e. unknown)
+ // Class-side cache updates are slow if cls is nil (i.e. unknown)
+ // RR/AWZ updates are slow if cls is nil (i.e. unknown)
// fixme build list of classes whose Methods are known externally?
- if (vtable_containsSelector(m->name)) {
- flushVtables(cls);
- }
+ // Scrub the old IMP from the cache.
+ // Can't simply overwrite the new IMP because the cached value could be
+ // the same IMP from a different Method.
+ flushImps(cls, m->name, old, nil, nil);
// Catch changes to retain/release and allocWithZone implementations
updateCustomRR_AWZ(cls, m);
- // fixme update monomorphism if necessary
-
return old;
}
IMP
method_setImplementation(Method m, IMP imp)
{
- // Don't know the class - will be slow if vtables are affected
+ // Don't know the class - will be slow if RR/AWZ are affected
// fixme build list of classes whose Methods are known externally?
IMP result;
rwlock_write(&runtimeLock);
- result = _method_setImplementation(Nil, newmethod(m), imp);
+ result = _method_setImplementation(Nil, m, imp);
rwlock_unlock_write(&runtimeLock);
return result;
}
-void method_exchangeImplementations(Method m1_gen, Method m2_gen)
+void method_exchangeImplementations(Method m1, Method m2)
{
- method_t *m1 = newmethod(m1_gen);
- method_t *m2 = newmethod(m2_gen);
if (!m1 || !m2) return;
rwlock_write(&runtimeLock);
m1->imp = m2->imp;
m2->imp = m1_imp;
- // vtable and RR/AWZ updates are slow because class is unknown
+
+ // RR/AWZ updates are slow because class is unknown
+ // Class-side cache updates are slow because class is unknown
// fixme build list of classes whose Methods are known externally?
- if (vtable_containsSelector(m1->name) ||
- vtable_containsSelector(m2->name))
- {
- // Don't know the class - will be slow if vtables are affected
- // fixme build list of classes whose Methods are known externally?
- flushVtables(NULL);
- }
+ // Scrub the old IMPs from the caches.
+ // Can't simply overwrite the new IMP because the cached value could be
+ // the same IMP from a different Method.
+ flushImps(nil, m1->name,m2->imp, m2->name,m1->imp);
updateCustomRR_AWZ(nil, m1);
updateCustomRR_AWZ(nil, m2);
- // fixme update monomorphism if necessary
-
rwlock_unlock_write(&runtimeLock);
}
ivar_getOffset(Ivar ivar)
{
if (!ivar) return 0;
- return *newivar(ivar)->offset;
+ return *ivar->offset;
}
const char *
ivar_getName(Ivar ivar)
{
- if (!ivar) return NULL;
- return newivar(ivar)->name;
+ if (!ivar) return nil;
+ return ivar->name;
}
const char *
ivar_getTypeEncoding(Ivar ivar)
{
- if (!ivar) return NULL;
- return newivar(ivar)->type;
+ if (!ivar) return nil;
+ return ivar->type;
}
const char *property_getName(objc_property_t prop)
{
- return newproperty(prop)->name;
+ return prop->name;
}
const char *property_getAttributes(objc_property_t prop)
{
- return newproperty(prop)->attributes;
+ return prop->attributes;
}
objc_property_attribute_t *property_copyAttributeList(objc_property_t prop,
{
if (!prop) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
objc_property_attribute_t *result;
rwlock_read(&runtimeLock);
- result = copyPropertyAttributeList(newproperty(prop)->attributes,outCount);
+ result = copyPropertyAttributeList(prop->attributes,outCount);
rwlock_unlock_read(&runtimeLock);
return result;
}
char * property_copyAttributeValue(objc_property_t prop, const char *name)
{
- if (!prop || !name || *name == '\0') return NULL;
+ if (!prop || !name || *name == '\0') return nil;
char *result;
rwlock_read(&runtimeLock);
- result = copyPropertyAttributeValue(newproperty(prop)->attributes, name);
+ result = copyPropertyAttributeValue(prop->attributes, name);
rwlock_unlock_read(&runtimeLock);
return result;
}
* b is the index of m in m's method list
* a+b is the index of m's extended types in the extended types array
**********************************************************************/
-static void getExtendedTypesIndexesForMethod(protocol_t *proto, const method_t *m, BOOL isRequiredMethod, BOOL isInstanceMethod, uint32_t& a, uint32_t &b)
+static void getExtendedTypesIndexesForMethod(protocol_t *proto, const method_t *m, bool isRequiredMethod, bool isInstanceMethod, uint32_t& a, uint32_t &b)
{
a = 0;
b = method_list_index(proto->optionalClassMethods, m);
return;
}
- a += method_list_count(proto->optionalClassMethods);
-}
+ a += method_list_count(proto->optionalClassMethods);
+}
+
+
+/***********************************************************************
+* getExtendedTypesIndexForMethod
+* Returns the index of m's extended types in proto's extended types array.
+**********************************************************************/
+static uint32_t getExtendedTypesIndexForMethod(protocol_t *proto, const method_t *m, bool isRequiredMethod, bool isInstanceMethod)
+{
+ uint32_t a;
+ uint32_t b;
+ getExtendedTypesIndexesForMethod(proto, m, isRequiredMethod,
+ isInstanceMethod, a, b);
+ return a + b;
+}
+
+
+/***********************************************************************
+* fixupProtocolMethodList
+* Fixes up a single method list in a protocol.
+**********************************************************************/
+static void
+fixupProtocolMethodList(protocol_t *proto, method_list_t **mlistp,
+ bool required, bool instance)
+{
+ rwlock_assert_writing(&runtimeLock);
+
+ if (!*mlistp) return;
+ if (isMethodListFixedUp(*mlistp)) return;
+
+ bool hasExtendedMethodTypes = proto->hasExtendedMethodTypes();
+ *mlistp = fixupMethodList(*mlistp, true/*always copy for simplicity*/,
+ !hasExtendedMethodTypes/*sort if no ext*/);
+
+ method_list_t *mlist = *mlistp;
+
+ if (hasExtendedMethodTypes) {
+ // Sort method list and extended method types together.
+ // fixupMethodList() can't do this.
+ // fixme COW stomp
+ uint32_t count = method_list_count(mlist);
+ uint32_t prefix;
+ uint32_t junk;
+ getExtendedTypesIndexesForMethod(proto, method_list_nth(mlist, 0),
+ required, instance, prefix, junk);
+ const char **types = proto->extendedMethodTypes;
+ for (uint32_t i = 0; i < count; i++) {
+ for (uint32_t j = i+1; j < count; j++) {
+ method_t *mi = method_list_nth(mlist, i);
+ method_t *mj = method_list_nth(mlist, j);
+ if (mi->name > mj->name) {
+ method_list_swap(mlist, i, j);
+ std::swap(types[prefix+i], types[prefix+j]);
+ }
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* fixupProtocol
+* Fixes up all of a protocol's method lists.
+**********************************************************************/
+static void
+fixupProtocol(protocol_t *proto)
+{
+ rwlock_assert_writing(&runtimeLock);
+
+ if (proto->protocols) {
+ for (uintptr_t i = 0; i < proto->protocols->count; i++) {
+ protocol_t *sub = remapProtocol(proto->protocols->list[i]);
+ if (!sub->isFixedUp()) fixupProtocol(sub);
+ }
+ }
+ fixupProtocolMethodList(proto, &proto->instanceMethods, YES, YES);
+ fixupProtocolMethodList(proto, &proto->classMethods, YES, NO);
+ fixupProtocolMethodList(proto, &proto->optionalInstanceMethods, NO, YES);
+ fixupProtocolMethodList(proto, &proto->optionalClassMethods, NO, NO);
-/***********************************************************************
-* getExtendedTypesIndexForMethod
-* Returns the index of m's extended types in proto's extended types array.
-**********************************************************************/
-static uint32_t getExtendedTypesIndexForMethod(protocol_t *proto, const method_t *m, BOOL isRequiredMethod, BOOL isInstanceMethod)
-{
- uint32_t a;
- uint32_t b;
- getExtendedTypesIndexesForMethod(proto, m, isRequiredMethod,
- isInstanceMethod, a, b);
- return a + b;
+ // fixme memory barrier so we can check this with no lock
+ proto->flags |= PROTOCOL_FIXED_UP;
}
/***********************************************************************
-* _protocol_getMethod_nolock
-* Locking: runtimeLock must be write-locked by the caller
+* fixupProtocolIfNeeded
+* Fixes up all of a protocol's method lists if they aren't fixed up already.
+* Locking: write-locks runtimeLock.
**********************************************************************/
-static method_t *
-_protocol_getMethod_nolock(protocol_t *proto, SEL sel,
- BOOL isRequiredMethod, BOOL isInstanceMethod,
- BOOL recursive)
+static void
+fixupProtocolIfNeeded(protocol_t *proto)
{
- rwlock_assert_writing(&runtimeLock);
+ rwlock_assert_unlocked(&runtimeLock);
+ assert(proto);
- if (!proto || !sel) return NULL;
+ if (!proto->isFixedUp()) {
+ rwlock_write(&runtimeLock);
+ fixupProtocol(proto);
+ rwlock_unlock_write(&runtimeLock);
+ }
+}
- method_list_t **mlistp = NULL;
- if (isRequiredMethod) {
- if (isInstanceMethod) {
+static method_list_t *
+getProtocolMethodList(protocol_t *proto, bool required, bool instance)
+{
+ method_list_t **mlistp = nil;
+ if (required) {
+ if (instance) {
mlistp = &proto->instanceMethods;
} else {
mlistp = &proto->classMethods;
}
} else {
- if (isInstanceMethod) {
+ if (instance) {
mlistp = &proto->optionalInstanceMethods;
} else {
mlistp = &proto->optionalClassMethods;
}
}
- if (*mlistp) {
- method_list_t *mlist = *mlistp;
- if (!isMethodListFixedUp(mlist)) {
- bool hasExtendedMethodTypes = proto->hasExtendedMethodTypes();
- mlist = fixupMethodList(mlist, true/*always copy for simplicity*/,
- !hasExtendedMethodTypes/*sort if no ext*/);
- *mlistp = mlist;
-
- if (hasExtendedMethodTypes) {
- // Sort method list and extended method types together.
- // fixupMethodList() can't do this.
- // fixme COW stomp
- uint32_t count = method_list_count(mlist);
- uint32_t prefix;
- uint32_t unused;
- getExtendedTypesIndexesForMethod(proto, method_list_nth(mlist, 0), isRequiredMethod, isInstanceMethod, prefix, unused);
- const char **types = proto->extendedMethodTypes;
- for (uint32_t i = 0; i < count; i++) {
- for (uint32_t j = i+1; j < count; j++) {
- method_t *mi = method_list_nth(mlist, i);
- method_t *mj = method_list_nth(mlist, j);
- if (mi->name > mj->name) {
- method_list_swap(mlist, i, j);
- std::swap(types[prefix+i], types[prefix+j]);
- }
- }
- }
- }
- }
+ return *mlistp;
+}
+
+
+/***********************************************************************
+* protocol_getMethod_nolock
+* Locking: runtimeLock must be held by the caller
+**********************************************************************/
+static method_t *
+protocol_getMethod_nolock(protocol_t *proto, SEL sel,
+ bool isRequiredMethod, bool isInstanceMethod,
+ bool recursive)
+{
+ rwlock_assert_locked(&runtimeLock);
+
+ if (!proto || !sel) return nil;
+ assert(proto->isFixedUp());
+
+ method_list_t *mlist =
+ getProtocolMethodList(proto, isRequiredMethod, isInstanceMethod);
+ if (mlist) {
method_t *m = search_method_list(mlist, sel);
if (m) return m;
}
method_t *m;
for (uint32_t i = 0; i < proto->protocols->count; i++) {
protocol_t *realProto = remapProtocol(proto->protocols->list[i]);
- m = _protocol_getMethod_nolock(realProto, sel,
- isRequiredMethod, isInstanceMethod,
- true);
+ m = protocol_getMethod_nolock(realProto, sel,
+ isRequiredMethod, isInstanceMethod,
+ true);
if (m) return m;
}
}
- return NULL;
+ return nil;
}
/***********************************************************************
-* _protocol_getMethod
+* protocol_getMethod
* fixme
-* Locking: write-locks runtimeLock
+* Locking: acquires runtimeLock
**********************************************************************/
Method
-_protocol_getMethod(Protocol *p, SEL sel, BOOL isRequiredMethod, BOOL isInstanceMethod, BOOL recursive)
+protocol_getMethod(protocol_t *proto, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive)
{
- rwlock_write(&runtimeLock);
- method_t *result = _protocol_getMethod_nolock(newprotocol(p), sel,
- isRequiredMethod,
- isInstanceMethod,
- recursive);
- rwlock_unlock_write(&runtimeLock);
- return (Method)result;
+ if (!proto) return nil;
+ fixupProtocolIfNeeded(proto);
+
+ rwlock_read(&runtimeLock);
+ method_t *result = protocol_getMethod_nolock(proto, sel,
+ isRequiredMethod,
+ isInstanceMethod,
+ recursive);
+ rwlock_unlock_read(&runtimeLock);
+ return result;
}
/***********************************************************************
-* _protocol_getMethodTypeEncoding_nolock
+* protocol_getMethodTypeEncoding_nolock
* Return the @encode string for the requested protocol method.
-* Returns NULL if the compiler did not emit any extended @encode data.
+* Returns nil if the compiler did not emit any extended @encode data.
* Locking: runtimeLock must be held for writing by the caller
**********************************************************************/
const char *
-_protocol_getMethodTypeEncoding_nolock(protocol_t *proto, SEL sel,
- BOOL isRequiredMethod,
- BOOL isInstanceMethod)
+protocol_getMethodTypeEncoding_nolock(protocol_t *proto, SEL sel,
+ bool isRequiredMethod,
+ bool isInstanceMethod)
{
- rwlock_assert_writing(&runtimeLock);
+ rwlock_assert_locked(&runtimeLock);
+
+ if (!proto) return nil;
+ if (!proto->hasExtendedMethodTypes()) return nil;
- if (!proto) return NULL;
- if (!proto->hasExtendedMethodTypes()) return NULL;
+ assert(proto->isFixedUp());
method_t *m =
- _protocol_getMethod_nolock(proto, sel,
- isRequiredMethod, isInstanceMethod, false);
+ protocol_getMethod_nolock(proto, sel,
+ isRequiredMethod, isInstanceMethod, false);
if (m) {
uint32_t i = getExtendedTypesIndexForMethod(proto, m,
isRequiredMethod,
if (proto->protocols) {
for (uintptr_t i = 0; i < proto->protocols->count; i++) {
const char *enc =
- _protocol_getMethodTypeEncoding_nolock(remapProtocol(proto->protocols->list[i]), sel, isRequiredMethod, isInstanceMethod);
+ protocol_getMethodTypeEncoding_nolock(remapProtocol(proto->protocols->list[i]), sel, isRequiredMethod, isInstanceMethod);
if (enc) return enc;
}
}
- return NULL;
+ return nil;
}
/***********************************************************************
* _protocol_getMethodTypeEncoding
* Return the @encode string for the requested protocol method.
-* Returns NULL if the compiler did not emit any extended @encode data.
-* Locking: runtimeLock must not be held by the caller
+* Returns nil if the compiler did not emit any extended @encode data.
+* Locking: acquires runtimeLock
**********************************************************************/
const char *
_protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel,
BOOL isRequiredMethod, BOOL isInstanceMethod)
{
+ protocol_t *proto = newprotocol(proto_gen);
+
+ if (!proto) return nil;
+ fixupProtocolIfNeeded(proto);
+
const char *enc;
- rwlock_write(&runtimeLock);
- enc = _protocol_getMethodTypeEncoding_nolock(newprotocol(proto_gen), sel,
- isRequiredMethod,
- isInstanceMethod);
- rwlock_unlock_write(&runtimeLock);
+ rwlock_read(&runtimeLock);
+ enc = protocol_getMethodTypeEncoding_nolock(proto, sel,
+ isRequiredMethod,
+ isInstanceMethod);
+ rwlock_unlock_read(&runtimeLock);
return enc;
}
BOOL isRequiredMethod, BOOL isInstanceMethod)
{
Method m =
- _protocol_getMethod(p, aSel, isRequiredMethod, isInstanceMethod, true);
+ protocol_getMethod(newprotocol(p), aSel,
+ isRequiredMethod, isInstanceMethod, true);
if (m) return *method_getDescription(m);
- else return (struct objc_method_description){NULL, NULL};
+ else return (struct objc_method_description){nil, nil};
}
/***********************************************************************
-* _protocol_conformsToProtocol_nolock
+* protocol_conformsToProtocol_nolock
* Returns YES if self conforms to other.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static BOOL _protocol_conformsToProtocol_nolock(protocol_t *self, protocol_t *other)
+static bool
+protocol_conformsToProtocol_nolock(protocol_t *self, protocol_t *other)
{
+ rwlock_assert_locked(&runtimeLock);
+
if (!self || !other) {
return NO;
}
+ // protocols need not be fixed up
+
if (0 == strcmp(self->name, other->name)) {
return YES;
}
if (0 == strcmp(other->name, proto->name)) {
return YES;
}
- if (_protocol_conformsToProtocol_nolock(proto, other)) {
+ if (protocol_conformsToProtocol_nolock(proto, other)) {
return YES;
}
}
{
BOOL result;
rwlock_read(&runtimeLock);
- result = _protocol_conformsToProtocol_nolock(newprotocol(self),
- newprotocol(other));
+ result = protocol_conformsToProtocol_nolock(newprotocol(self),
+ newprotocol(other));
rwlock_unlock_read(&runtimeLock);
return result;
}
unsigned int *outCount)
{
protocol_t *proto = newprotocol(p);
- struct objc_method_description *result = NULL;
+ struct objc_method_description *result = nil;
unsigned int count = 0;
if (!proto) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
- rwlock_read(&runtimeLock);
+ fixupProtocolIfNeeded(proto);
- method_list_t *mlist = NULL;
+ rwlock_read(&runtimeLock);
- if (isRequiredMethod) {
- if (isInstanceMethod) {
- mlist = proto->instanceMethods;
- } else {
- mlist = proto->classMethods;
- }
- } else {
- if (isInstanceMethod) {
- mlist = proto->optionalInstanceMethods;
- } else {
- mlist = proto->optionalClassMethods;
- }
- }
+ method_list_t *mlist =
+ getProtocolMethodList(proto, isRequiredMethod, isInstanceMethod);
if (mlist) {
unsigned int i;
calloc(count + 1, sizeof(struct objc_method_description));
for (i = 0; i < count; i++) {
method_t *m = method_list_nth(mlist, i);
- result[i].name = sel_registerName((const char *)m->name);
+ result[i].name = m->name;
result[i].types = (char *)m->types;
}
}
/***********************************************************************
* protocol_getProperty
* fixme
-* Locking: acquires runtimeLock
+* Locking: runtimeLock must be held by the caller
**********************************************************************/
static property_t *
-_protocol_getProperty_nolock(protocol_t *proto, const char *name,
- BOOL isRequiredProperty, BOOL isInstanceProperty)
+protocol_getProperty_nolock(protocol_t *proto, const char *name,
+ bool isRequiredProperty, bool isInstanceProperty)
{
+ rwlock_assert_locked(&runtimeLock);
+
if (!isRequiredProperty || !isInstanceProperty) {
// Only required instance properties are currently supported
- return NULL;
+ return nil;
}
property_list_t *plist;
for (i = 0; i < proto->protocols->count; i++) {
protocol_t *p = remapProtocol(proto->protocols->list[i]);
property_t *prop =
- _protocol_getProperty_nolock(p, name,
- isRequiredProperty,
- isInstanceProperty);
+ protocol_getProperty_nolock(p, name,
+ isRequiredProperty,
+ isInstanceProperty);
if (prop) return prop;
}
}
- return NULL;
+ return nil;
}
objc_property_t protocol_getProperty(Protocol *p, const char *name,
{
property_t *result;
- if (!p || !name) return NULL;
+ if (!p || !name) return nil;
rwlock_read(&runtimeLock);
- result = _protocol_getProperty_nolock(newprotocol(p), name,
- isRequiredProperty,
- isInstanceProperty);
+ result = protocol_getProperty_nolock(newprotocol(p), name,
+ isRequiredProperty,
+ isInstanceProperty);
rwlock_unlock_read(&runtimeLock);
return (objc_property_t)result;
static property_t **
copyPropertyList(property_list_t *plist, unsigned int *outCount)
{
- property_t **result = NULL;
+ property_t **result = nil;
unsigned int count = 0;
if (plist) {
for (i = 0; i < count; i++) {
result[i] = property_list_nth(plist, i);
}
- result[i] = NULL;
+ result[i] = nil;
}
if (outCount) *outCount = count;
objc_property_t *protocol_copyPropertyList(Protocol *proto, unsigned int *outCount)
{
- property_t **result = NULL;
+ property_t **result = nil;
if (!proto) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
rwlock_read(&runtimeLock);
protocol_copyProtocolList(Protocol *p, unsigned int *outCount)
{
unsigned int count = 0;
- Protocol **result = NULL;
+ Protocol **result = nil;
protocol_t *proto = newprotocol(p);
if (!proto) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
rwlock_read(&runtimeLock);
for (i = 0; i < count; i++) {
result[i] = (Protocol *)remapProtocol(proto->protocols->list[i]);
}
- result[i] = NULL;
+ result[i] = nil;
}
rwlock_unlock_read(&runtimeLock);
* objc_allocateProtocol
* Creates a new protocol. The protocol may not be used until
* objc_registerProtocol() is called.
-* Returns NULL if a protocol with the same name already exists.
+* Returns nil if a protocol with the same name already exists.
* Locking: acquires runtimeLock
**********************************************************************/
Protocol *
if (NXMapGet(protocols(), name)) {
rwlock_unlock_write(&runtimeLock);
- return NULL;
+ return nil;
}
protocol_t *result = (protocol_t *)_calloc_internal(sizeof(protocol_t), 1);
- extern class_t OBJC_CLASS_$___IncompleteProtocol;
+ extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
- result->isa = cls;
+ result->initIsa(cls);
result->name = _strdup_internal(name);
// fixme reserve name without installing
rwlock_write(&runtimeLock);
- extern class_t OBJC_CLASS_$___IncompleteProtocol;
+ extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class oldcls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
- extern class_t OBJC_CLASS_$_Protocol;
+ extern objc_class OBJC_CLASS_$_Protocol;
Class cls = (Class)&OBJC_CLASS_$_Protocol;
- if (proto->isa == cls) {
+ if (proto->ISA() == cls) {
_objc_inform("objc_registerProtocol: protocol '%s' was already "
"registered!", proto->name);
rwlock_unlock_write(&runtimeLock);
return;
}
- if (proto->isa != oldcls) {
+ if (proto->ISA() != oldcls) {
_objc_inform("objc_registerProtocol: protocol '%s' was not allocated "
"with objc_allocateProtocol!", proto->name);
rwlock_unlock_write(&runtimeLock);
return;
}
- proto->isa = cls;
+ proto->initIsa(cls);
NXMapKeyCopyingInsert(protocols(), proto->name, proto);
protocol_t *proto = newprotocol(proto_gen);
protocol_t *addition = newprotocol(addition_gen);
- extern class_t OBJC_CLASS_$___IncompleteProtocol;
+ extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
if (!proto_gen) return;
rwlock_write(&runtimeLock);
- if (proto->isa != cls) {
+ if (proto->ISA() != cls) {
_objc_inform("protocol_addProtocol: modified protocol '%s' is not "
"under construction!", proto->name);
rwlock_unlock_write(&runtimeLock);
return;
}
- if (addition->isa == cls) {
+ if (addition->ISA() == cls) {
_objc_inform("protocol_addProtocol: added protocol '%s' is still "
"under construction!", addition->name);
rwlock_unlock_write(&runtimeLock);
* Locking: acquires runtimeLock
**********************************************************************/
static void
-_protocol_addMethod(method_list_t **list, SEL name, const char *types)
+protocol_addMethod_nolock(method_list_t **list, SEL name, const char *types)
{
if (!*list) {
*list = (method_list_t *)
method_t *meth = method_list_nth(*list, (*list)->count++);
meth->name = name;
meth->types = _strdup_internal(types ? types : "");
- meth->imp = NULL;
+ meth->imp = nil;
}
void
{
protocol_t *proto = newprotocol(proto_gen);
- extern class_t OBJC_CLASS_$___IncompleteProtocol;
+ extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
if (!proto_gen) return;
rwlock_write(&runtimeLock);
- if (proto->isa != cls) {
+ if (proto->ISA() != cls) {
_objc_inform("protocol_addMethodDescription: protocol '%s' is not "
"under construction!", proto->name);
rwlock_unlock_write(&runtimeLock);
}
if (isRequiredMethod && isInstanceMethod) {
- _protocol_addMethod(&proto->instanceMethods, name, types);
+ protocol_addMethod_nolock(&proto->instanceMethods, name, types);
} else if (isRequiredMethod && !isInstanceMethod) {
- _protocol_addMethod(&proto->classMethods, name, types);
+ protocol_addMethod_nolock(&proto->classMethods, name, types);
} else if (!isRequiredMethod && isInstanceMethod) {
- _protocol_addMethod(&proto->optionalInstanceMethods, name, types);
+ protocol_addMethod_nolock(&proto->optionalInstanceMethods, name,types);
} else /* !isRequiredMethod && !isInstanceMethod) */ {
- _protocol_addMethod(&proto->optionalClassMethods, name, types);
+ protocol_addMethod_nolock(&proto->optionalClassMethods, name, types);
}
rwlock_unlock_write(&runtimeLock);
* Locking: acquires runtimeLock
**********************************************************************/
static void
-_protocol_addProperty(property_list_t **plist, const char *name,
- const objc_property_attribute_t *attrs,
- unsigned int count)
+protocol_addProperty_nolock(property_list_t **plist, const char *name,
+ const objc_property_attribute_t *attrs,
+ unsigned int count)
{
if (!*plist) {
*plist = (property_list_t *)
{
protocol_t *proto = newprotocol(proto_gen);
- extern class_t OBJC_CLASS_$___IncompleteProtocol;
+ extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
if (!proto) return;
rwlock_write(&runtimeLock);
- if (proto->isa != cls) {
+ if (proto->ISA() != cls) {
_objc_inform("protocol_addProperty: protocol '%s' is not "
"under construction!", proto->name);
rwlock_unlock_write(&runtimeLock);
}
if (isRequiredProperty && isInstanceProperty) {
- _protocol_addProperty(&proto->instanceProperties, name, attrs, count);
+ protocol_addProperty_nolock(&proto->instanceProperties, name, attrs, count);
}
//else if (isRequiredProperty && !isInstanceProperty) {
- // _protocol_addProperty(&proto->classProperties, name, attrs, count);
+ // protocol_addProperty_nolock(&proto->classProperties, name, attrs, count);
//} else if (!isRequiredProperty && isInstanceProperty) {
- // _protocol_addProperty(&proto->optionalInstanceProperties, name, attrs, count);
+ // protocol_addProperty_nolock(&proto->optionalInstanceProperties, name, attrs, count);
//} else /* !isRequiredProperty && !isInstanceProperty) */ {
- // _protocol_addProperty(&proto->optionalClassProperties, name, attrs, count);
+ // protocol_addProperty_nolock(&proto->optionalClassProperties, name, attrs, count);
//}
rwlock_unlock_write(&runtimeLock);
realizeAllClasses();
int count;
- class_t *cls;
+ Class cls;
NXHashState state;
NXHashTable *classes = realizedClasses();
int allCount = NXCountHashTable(classes);
while (count < bufferLen &&
NXNextHashState(classes, &state, (void **)&cls))
{
- buffer[count++] = (Class)cls;
+ buffer[count++] = cls;
}
rwlock_unlock_write(&runtimeLock);
* Returns pointers to all classes.
* This requires all classes be realized, which is regretfully non-lazy.
*
-* outCount may be NULL. *outCount is the number of classes returned.
-* If the returned array is not NULL, it is NULL-terminated and must be
+* outCount may be nil. *outCount is the number of classes returned.
+* If the returned array is not nil, it is nil-terminated and must be
* freed with free().
* Locking: write-locks runtimeLock
**********************************************************************/
realizeAllClasses();
- Class *result = NULL;
+ Class *result = nil;
NXHashTable *classes = realizedClasses();
unsigned int count = NXCountHashTable(classes);
if (count > 0) {
- class_t *cls;
+ Class cls;
NXHashState state = NXInitHashState(classes);
result = (Class *)malloc((1+count) * sizeof(Class));
count = 0;
while (NXNextHashState(classes, &state, (void **)&cls)) {
- result[count++] = (Class)cls;
+ result[count++] = cls;
}
- result[count] = NULL;
+ result[count] = nil;
}
rwlock_unlock_write(&runtimeLock);
if (count == 0) {
rwlock_unlock_read(&runtimeLock);
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
result = (Protocol **)calloc(1 + count, sizeof(Protocol *));
result[i++] = proto;
}
- result[i++] = NULL;
+ result[i++] = nil;
assert(i == count+1);
rwlock_unlock_read(&runtimeLock);
/***********************************************************************
* objc_getProtocol
-* Get a protocol by name, or return NULL
+* Get a protocol by name, or return nil
* Locking: read-locks runtimeLock
**********************************************************************/
Protocol *objc_getProtocol(const char *name)
* Locking: read-locks runtimeLock
**********************************************************************/
Method *
-class_copyMethodList(Class cls_gen, unsigned int *outCount)
+class_copyMethodList(Class cls, unsigned int *outCount)
{
- class_t *cls = newcls(cls_gen);
unsigned int count = 0;
- Method *result = NULL;
+ Method *result = nil;
if (!cls) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
rwlock_read(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
FOREACH_METHOD_LIST(mlist, cls, {
count += mlist->count;
FOREACH_METHOD_LIST(mlist, cls, {
unsigned int i;
for (i = 0; i < mlist->count; i++) {
- Method aMethod = (Method)method_list_nth(mlist, i);
+ method_t *aMethod = method_list_nth(mlist, i);
if (ignoreSelector(method_getName(aMethod))) {
count--;
continue;
result[m++] = aMethod;
}
});
- result[m] = NULL;
+ result[m] = nil;
}
rwlock_unlock_read(&runtimeLock);
* Locking: read-locks runtimeLock
**********************************************************************/
Ivar *
-class_copyIvarList(Class cls_gen, unsigned int *outCount)
+class_copyIvarList(Class cls, unsigned int *outCount)
{
- class_t *cls = newcls(cls_gen);
const ivar_list_t *ivars;
- Ivar *result = NULL;
+ Ivar *result = nil;
unsigned int count = 0;
unsigned int i;
if (!cls) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
rwlock_read(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
if ((ivars = cls->data()->ro->ivars) && ivars->count) {
result = (Ivar *)malloc((ivars->count+1) * sizeof(Ivar));
for (i = 0; i < ivars->count; i++) {
ivar_t *ivar = ivar_list_nth(ivars, i);
if (!ivar->offset) continue; // anonymous bitfield
- result[count++] = (Ivar)ivar;
+ result[count++] = ivar;
}
- result[count] = NULL;
+ result[count] = nil;
}
rwlock_unlock_read(&runtimeLock);
/***********************************************************************
* class_copyPropertyList. Returns a heap block containing the
-* properties declared in the class, or NULL if the class
+* properties declared in the class, or nil if the class
* declares no properties. Caller must free the block.
* Does not copy any superclass's properties.
* Locking: read-locks runtimeLock
**********************************************************************/
objc_property_t *
-class_copyPropertyList(Class cls_gen, unsigned int *outCount)
+class_copyPropertyList(Class cls, unsigned int *outCount)
{
- class_t *cls = newcls(cls_gen);
chained_property_list *plist;
unsigned int count = 0;
- property_t **result = NULL;
+ property_t **result = nil;
if (!cls) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
rwlock_read(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
for (plist = cls->data()->properties; plist; plist = plist->next) {
count += plist->count;
result[p++] = &plist->list[i];
}
}
- result[p] = NULL;
+ result[p] = nil;
}
rwlock_unlock_read(&runtimeLock);
/***********************************************************************
-* _class_getLoadMethod
+* objc_class::getLoadMethod
* fixme
* Called only from add_class_to_loadable_list.
* Locking: runtimeLock must be read- or write-locked by the caller.
**********************************************************************/
IMP
-_class_getLoadMethod(Class cls_gen)
+objc_class::getLoadMethod()
{
rwlock_assert_locked(&runtimeLock);
- class_t *cls = newcls(cls_gen);
const method_list_t *mlist;
uint32_t i;
- assert(isRealized(cls));
- assert(isRealized(cls->isa));
- assert(!isMetaClass(cls));
- assert(isMetaClass(cls->isa));
+ assert(isRealized());
+ assert(ISA()->isRealized());
+ assert(!isMetaClass());
+ assert(ISA()->isMetaClass());
- mlist = cls->isa->data()->ro->baseMethods;
+ mlist = ISA()->data()->ro->baseMethods;
if (mlist) for (i = 0; i < mlist->count; i++) {
method_t *m = method_list_nth(mlist, i);
if (0 == strcmp((const char *)m->name, "load")) {
}
}
- return NULL;
+ return nil;
}
const char *
_category_getName(Category cat)
{
- return newcategory(cat)->name;
+ return cat->name;
}
_category_getClassName(Category cat)
{
rwlock_assert_locked(&runtimeLock);
- return getName(remapClass(newcategory(cat)->cls));
+ return remapClass(cat->cls)->name();
}
_category_getClass(Category cat)
{
rwlock_read(&runtimeLock);
- class_t *result = remapClass(newcategory(cat)->cls);
- assert(isRealized(result)); // ok for call_category_loads' usage
+ Class result = remapClass(cat->cls);
+ assert(result->isRealized()); // ok for call_category_loads' usage
rwlock_unlock_read(&runtimeLock);
- return (Class)result;
+ return result;
}
const method_list_t *mlist;
uint32_t i;
- mlist = newcategory(cat)->classMethods;
+ mlist = cat->classMethods;
if (mlist) for (i = 0; i < mlist->count; i++) {
method_t *m = method_list_nth(mlist, i);
if (0 == strcmp((const char *)m->name, "load")) {
}
}
- return NULL;
+ return nil;
}
* Locking: read-locks runtimeLock
**********************************************************************/
Protocol * __unsafe_unretained *
-class_copyProtocolList(Class cls_gen, unsigned int *outCount)
+class_copyProtocolList(Class cls, unsigned int *outCount)
{
- class_t *cls = newcls(cls_gen);
Protocol **r;
const protocol_list_t **p;
unsigned int count = 0;
unsigned int i;
- Protocol **result = NULL;
+ Protocol **result = nil;
if (!cls) {
if (outCount) *outCount = 0;
- return NULL;
+ return nil;
}
rwlock_read(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
for (p = cls->data()->protocols; p && *p; p++) {
count += (uint32_t)(*p)->count;
*r++ = (Protocol *)remapProtocol((*p)->list[i]);
}
}
- *r++ = NULL;
+ *r++ = nil;
}
rwlock_unlock_read(&runtimeLock);
shift = 0;
for (i = 0; i < count; i++) {
- class_t *cls = remapClass(classlist[i]);
+ Class cls = remapClass(classlist[i]);
if (cls) {
- names[i-shift] = getName(cls);
+ names[i-shift] = cls->name();
} else {
shift++; // ignored weak-linked class
}
}
count -= shift;
- names[count] = NULL;
+ names[count] = nil;
rwlock_unlock_read(&runtimeLock);
}
-/***********************************************************************
-* _class_getCache
-* fixme
-* Locking: none
-**********************************************************************/
-Cache
-_class_getCache(Class cls)
-{
- return newcls(cls)->cache;
-}
-
-
-/***********************************************************************
-* _class_getInstanceSize
-* Uses alignedInstanceSize() to ensure that
-* obj + class_getInstanceSize(obj->isa) == object_getIndexedIvars(obj)
-* Locking: none
-**********************************************************************/
-size_t
-_class_getInstanceSize(Class cls)
-{
- if (!cls) return 0;
- return alignedInstanceSize(newcls(cls));
-}
-
-static uint32_t
-unalignedInstanceSize(class_t *cls)
-{
- assert(cls);
- assert(isRealized(cls));
- return (uint32_t)cls->data()->ro->instanceSize;
-}
-
-static uint32_t
-alignedInstanceSize(class_t *cls)
-{
- assert(cls);
- assert(isRealized(cls));
- // fixme rdar://5278267
- return (uint32_t)((unalignedInstanceSize(cls) + WORD_MASK) & ~WORD_MASK);
-}
-
/***********************************************************************
* _class_getInstanceStart
* Uses alignedInstanceStart() to ensure that ARR layout strings are
**********************************************************************/
static uint32_t
-alignedInstanceStart(class_t *cls)
+alignedInstanceStart(Class cls)
{
assert(cls);
- assert(isRealized(cls));
+ assert(cls->isRealized());
return (uint32_t)((cls->data()->ro->instanceStart + WORD_MASK) & ~WORD_MASK);
}
-uint32_t _class_getInstanceStart(Class cls_gen) {
- class_t *cls = newcls(cls_gen);
+uint32_t _class_getInstanceStart(Class cls) {
return alignedInstanceStart(cls);
}
class_getVersion(Class cls)
{
if (!cls) return 0;
- assert(isRealized(newcls(cls)));
- return newcls(cls)->data()->version;
-}
-
-
-/***********************************************************************
-* _class_setCache
-* fixme
-* Locking: none
-**********************************************************************/
-void
-_class_setCache(Class cls, Cache cache)
-{
- newcls(cls)->cache = cache;
+ assert(cls->isRealized());
+ return cls->data()->version;
}
class_setVersion(Class cls, int version)
{
if (!cls) return;
- assert(isRealized(newcls(cls)));
- newcls(cls)->data()->version = version;
-}
-
-
-/***********************************************************************
-* _class_getName
-* fixme
-* Locking: acquires runtimeLock
-**********************************************************************/
-const char *_class_getName(Class cls)
-{
- if (!cls) return "nil";
- // fixme hack rwlock_write(&runtimeLock);
- const char *name = getName(newcls(cls));
- // rwlock_unlock_write(&runtimeLock);
- return name;
+ assert(cls->isRealized());
+ cls->data()->version = version;
}
-/***********************************************************************
-* getName
-* fixme
-* Locking: runtimeLock must be held by the caller
-**********************************************************************/
-static const char *
-getName(class_t *cls)
-{
- // fixme hack rwlock_assert_writing(&runtimeLock);
- assert(cls);
-
- if (isRealized(cls)) {
- return cls->data()->ro->name;
- } else {
- return ((const class_ro_t *)cls->data())->name;
- }
-}
-
static method_t *findMethodInSortedMethodList(SEL key, const method_list_t *list)
{
const method_t * const first = &list->first;
}
}
- return NULL;
+ return nil;
}
/***********************************************************************
}
#endif
- return NULL;
+ return nil;
}
static method_t *
-getMethodNoSuper_nolock(class_t *cls, SEL sel)
+getMethodNoSuper_nolock(Class cls, SEL sel)
{
rwlock_assert_locked(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
// fixme nil cls?
- // fixme NULL sel?
+ // fixme nil sel?
FOREACH_METHOD_LIST(mlist, cls, {
method_t *m = search_method_list(mlist, sel);
if (m) return m;
});
- return NULL;
-}
-
-
-/***********************************************************************
-* _class_getMethodNoSuper
-* fixme
-* Locking: read-locks runtimeLock
-**********************************************************************/
-Method
-_class_getMethodNoSuper(Class cls, SEL sel)
-{
- rwlock_read(&runtimeLock);
- Method result = (Method)getMethodNoSuper_nolock(newcls(cls), sel);
- rwlock_unlock_read(&runtimeLock);
- return result;
-}
-
-/***********************************************************************
-* _class_getMethodNoSuper
-* For use inside lockForMethodLookup() only.
-* Locking: read-locks runtimeLock
-**********************************************************************/
-Method
-_class_getMethodNoSuper_nolock(Class cls, SEL sel)
-{
- return (Method)getMethodNoSuper_nolock(newcls(cls), sel);
+ return nil;
}
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
static method_t *
-getMethod_nolock(class_t *cls, SEL sel)
+getMethod_nolock(Class cls, SEL sel)
{
- method_t *m = NULL;
+ method_t *m = nil;
rwlock_assert_locked(&runtimeLock);
// fixme nil cls?
- // fixme NULL sel?
+ // fixme nil sel?
- assert(isRealized(cls));
+ assert(cls->isRealized());
- while (cls && ((m = getMethodNoSuper_nolock(cls, sel))) == NULL) {
- cls = getSuperclass(cls);
+ while (cls && ((m = getMethodNoSuper_nolock(cls, sel))) == nil) {
+ cls = cls->superclass;
}
return m;
* fixme
* Locking: read-locks runtimeLock
**********************************************************************/
-Method _class_getMethod(Class cls, SEL sel)
-{
- Method m;
- rwlock_read(&runtimeLock);
- m = (Method)getMethod_nolock(newcls(cls), sel);
- rwlock_unlock_read(&runtimeLock);
- return m;
-}
-
-
-/***********************************************************************
-* ABI-specific lookUpMethod helpers.
-* Locking: read- and write-locks runtimeLock.
-**********************************************************************/
-void lockForMethodLookup(void)
-{
- rwlock_read(&runtimeLock);
-}
-void unlockForMethodLookup(void)
-{
- rwlock_unlock_read(&runtimeLock);
-}
-
-IMP prepareForMethodLookup(Class cls, SEL sel, BOOL init, id obj)
-{
- rwlock_assert_unlocked(&runtimeLock);
-
- if (!isRealized(newcls(cls))) {
- rwlock_write(&runtimeLock);
- realizeClass(newcls(cls));
- rwlock_unlock_write(&runtimeLock);
- }
-
- if (init && !_class_isInitialized(cls)) {
- _class_initialize (_class_getNonMetaClass(cls, obj));
- // If sel == initialize, _class_initialize will send +initialize and
- // then the messenger will send +initialize again after this
- // procedure finishes. Of course, if this is not being called
- // from the messenger then it won't happen. 2778172
- }
-
- return NULL;
-}
-
-
-/***********************************************************************
-* class_getProperty
-* fixme
-* Locking: read-locks runtimeLock
-**********************************************************************/
-objc_property_t class_getProperty(Class cls_gen, const char *name)
-{
- property_t *result = NULL;
- chained_property_list *plist;
- class_t *cls = newcls(cls_gen);
-
- if (!cls || !name) return NULL;
-
- rwlock_read(&runtimeLock);
-
- assert(isRealized(cls));
-
- for ( ; cls; cls = getSuperclass(cls)) {
- for (plist = cls->data()->properties; plist; plist = plist->next) {
- uint32_t i;
- for (i = 0; i < plist->count; i++) {
- if (0 == strcmp(name, plist->list[i].name)) {
- result = &plist->list[i];
- goto done;
- }
- }
- }
- }
-
- done:
- rwlock_unlock_read(&runtimeLock);
-
- return (objc_property_t)result;
-}
-
-
-/***********************************************************************
-* Locking: fixme
-**********************************************************************/
-BOOL _class_isMetaClass(Class cls)
-{
- if (!cls) return NO;
- return isMetaClass(newcls(cls));
-}
-
-static BOOL
-isMetaClass(class_t *cls)
-{
- assert(cls);
- assert(isRealized(cls));
- return (cls->data()->ro->flags & RO_META) ? YES : NO;
-}
-
-class_t *getMeta(class_t *cls)
-{
- if (isMetaClass(cls)) return cls;
- else return cls->isa;
-}
-
-Class _class_getMeta(Class cls)
-{
- return (Class)getMeta(newcls(cls));
-}
-
-Class gdb_class_getClass(Class cls)
-{
- const char *className = getName(newcls(cls));
- if(!className || !strlen(className)) return Nil;
- Class rCls = look_up_class(className, NO, NO);
- return rCls;
-}
-
-Class gdb_object_getClass(id obj)
+static Method _class_getMethod(Class cls, SEL sel)
{
- Class cls = _object_getClass(obj);
- return gdb_class_getClass(cls);
-}
-
-BOOL gdb_objc_isRuntimeLocked()
-{
- if (rwlock_try_write(&runtimeLock)) {
- rwlock_unlock_write(&runtimeLock);
- } else
- return YES;
-
- if (mutex_try_lock(&cacheUpdateLock)) {
- mutex_unlock(&cacheUpdateLock);
- } else
- return YES;
-
- return NO;
+ method_t *m;
+ rwlock_read(&runtimeLock);
+ m = getMethod_nolock(cls, sel);
+ rwlock_unlock_read(&runtimeLock);
+ return m;
}
+
/***********************************************************************
-* Locking: fixme
+* class_getInstanceMethod. Return the instance method for the
+* specified class and selector.
**********************************************************************/
-BOOL
-_class_isInitializing(Class cls_gen)
+Method class_getInstanceMethod(Class cls, SEL sel)
{
- class_t *cls = newcls(_class_getMeta(cls_gen));
- return (cls->data()->flags & RW_INITIALIZING) ? YES : NO;
+ if (!cls || !sel) return nil;
+
+ // This deliberately avoids +initialize because it historically did so.
+
+ // This implementation is a bit weird because it's the only place that
+ // wants a Method instead of an IMP.
+
+#warning fixme build and search caches
+
+ // Search method lists, try method resolver, etc.
+ lookUpImpOrNil(cls, sel, nil,
+ NO/*initialize*/, NO/*cache*/, YES/*resolver*/);
+
+#warning fixme build and search caches
+
+ return _class_getMethod(cls, sel);
}
/***********************************************************************
-* Locking: fixme
+* log_and_fill_cache
+* Log this method call. If the logger permits it, fill the method cache.
+* cls is the method whose cache should be filled.
+* implementer is the class that owns the implementation in question.
**********************************************************************/
-BOOL
-_class_isInitialized(Class cls_gen)
+static void
+log_and_fill_cache(Class cls, Class implementer, IMP imp, SEL sel)
{
- class_t *cls = newcls(_class_getMeta(cls_gen));
- return (cls->data()->flags & RW_INITIALIZED) ? YES : NO;
+#if SUPPORT_MESSAGE_LOGGING
+ if (objcMsgLogEnabled) {
+ bool cacheIt = logMessageSend(implementer->isMetaClass(),
+ cls->getName(),
+ implementer->getName(),
+ sel);
+ if (!cacheIt) return;
+ }
+#endif
+ cache_fill (cls, sel, imp);
}
/***********************************************************************
-* Locking: fixme
+* _class_lookupMethodAndLoadCache.
+* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpImp().
+* This lookup avoids optimistic cache scan because the dispatcher
+* already tried that.
**********************************************************************/
-void
-_class_setInitializing(Class cls_gen)
-{
- assert(!_class_isMetaClass(cls_gen));
- class_t *cls = newcls(_class_getMeta(cls_gen));
- changeInfo(cls, RW_INITIALIZING, 0);
+IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls)
+{
+ return lookUpImpOrForward(cls, sel, obj,
+ YES/*initialize*/, NO/*cache*/, YES/*resolver*/);
}
/***********************************************************************
-* Locking: write-locks runtimeLock
+* lookUpImpOrForward.
+* The standard IMP lookup.
+* initialize==NO tries to avoid +initialize (but sometimes fails)
+* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere)
+* Most callers should use initialize==YES and cache==YES.
+* inst is an instance of cls or a subclass thereof, or nil if none is known.
+* If cls is an un-initialized metaclass then a non-nil inst is faster.
+* May return _objc_msgForward_impcache. IMPs destined for external use
+* must be converted to _objc_msgForward or _objc_msgForward_stret.
+* If you don't want forwarding at all, use lookUpImpOrNil() instead.
**********************************************************************/
-void
-_class_setInitialized(Class cls_gen)
+IMP lookUpImpOrForward(Class cls, SEL sel, id inst,
+ bool initialize, bool cache, bool resolver)
{
- class_t *metacls;
- class_t *cls;
+ Class curClass;
+ IMP imp = nil;
+ Method meth;
+ bool triedResolver = NO;
- rwlock_write(&runtimeLock);
+ rwlock_assert_unlocked(&runtimeLock);
+
+ // Optimistic cache lookup
+ if (cache) {
+ imp = cache_getImp(cls, sel);
+ if (imp) return imp;
+ }
+
+ if (!cls->isRealized()) {
+ rwlock_write(&runtimeLock);
+ realizeClass(cls);
+ rwlock_unlock_write(&runtimeLock);
+ }
- assert(!_class_isMetaClass(cls_gen));
+ if (initialize && !cls->isInitialized()) {
+ _class_initialize (_class_getNonMetaClass(cls, inst));
+ // If sel == initialize, _class_initialize will send +initialize and
+ // then the messenger will send +initialize again after this
+ // procedure finishes. Of course, if this is not being called
+ // from the messenger then it won't happen. 2778172
+ }
- cls = newcls(cls_gen);
- metacls = getMeta(cls);
+ // The lock is held to make method-lookup + cache-fill atomic
+ // with respect to method addition. Otherwise, a category could
+ // be added but ignored indefinitely because the cache was re-filled
+ // with the old value after the cache flush on behalf of the category.
+ retry:
+ rwlock_read(&runtimeLock);
- // Update vtables (initially postponed pending +initialize completion)
- // Do cls first because root metacls is a subclass of root cls
- updateVtable(cls, YES);
- updateVtable(metacls, YES);
+ // Ignore GC selectors
+ if (ignoreSelector(sel)) {
+ imp = _objc_ignored_method;
+ cache_fill(cls, sel, imp);
+ goto done;
+ }
- rwlock_unlock_write(&runtimeLock);
+ // Try this class's cache.
+
+ imp = cache_getImp(cls, sel);
+ if (imp) goto done;
+
+ // Try this class's method lists.
+
+ meth = getMethodNoSuper_nolock(cls, sel);
+ if (meth) {
+ log_and_fill_cache(cls, cls, meth->imp, sel);
+ imp = meth->imp;
+ goto done;
+ }
+
+ // Try superclass caches and method lists.
+
+ curClass = cls;
+ while ((curClass = curClass->superclass)) {
+ // Superclass cache.
+ imp = cache_getImp(curClass, sel);
+ if (imp) {
+ if (imp != (IMP)_objc_msgForward_impcache) {
+ // Found the method in a superclass. Cache it in this class.
+ log_and_fill_cache(cls, curClass, imp, sel);
+ goto done;
+ }
+ else {
+ // Found a forward:: entry in a superclass.
+ // Stop searching, but don't cache yet; call method
+ // resolver for this class first.
+ break;
+ }
+ }
+
+ // Superclass method list.
+ meth = getMethodNoSuper_nolock(curClass, sel);
+ if (meth) {
+ log_and_fill_cache(cls, curClass, meth->imp, sel);
+ imp = meth->imp;
+ goto done;
+ }
+ }
+
+ // No implementation found. Try method resolver once.
+
+ if (resolver && !triedResolver) {
+ rwlock_unlock_read(&runtimeLock);
+ _class_resolveMethod(cls, sel, inst);
+ // Don't cache the result; we don't hold the lock so it may have
+ // changed already. Re-do the search from scratch instead.
+ triedResolver = YES;
+ goto retry;
+ }
+
+ // No implementation found, and method resolver didn't help.
+ // Use forwarding.
+
+ imp = (IMP)_objc_msgForward_impcache;
+ cache_fill(cls, sel, imp);
+
+ done:
+ rwlock_unlock_read(&runtimeLock);
+
+ // paranoia: look for ignored selectors with non-ignored implementations
+ assert(!(ignoreSelector(sel) && imp != (IMP)&_objc_ignored_method));
- changeInfo(metacls, RW_INITIALIZED, RW_INITIALIZING);
+ // paranoia: never let uncached leak out
+ assert(imp != _objc_msgSend_uncached_impcache);
+
+ return imp;
}
/***********************************************************************
-* Locking: fixme
+* lookUpImpOrNil.
+* Like lookUpImpOrForward, but returns nil instead of _objc_msgForward_impcache
**********************************************************************/
-BOOL
-_class_shouldGrowCache(Class cls)
+IMP lookUpImpOrNil(Class cls, SEL sel, id inst,
+ bool initialize, bool cache, bool resolver)
{
- return YES; // fixme good or bad for memory use?
+ IMP imp = lookUpImpOrForward(cls, sel, inst, initialize, cache, resolver);
+ if (imp == _objc_msgForward_impcache) return nil;
+ else return imp;
}
/***********************************************************************
-* Locking: fixme
+* lookupMethodInClassAndLoadCache.
+* Like _class_lookupMethodAndLoadCache, but does not search superclasses.
+* Caches and returns objc_msgForward if the method is not found in the class.
**********************************************************************/
-void
-_class_setGrowCache(Class cls, BOOL grow)
+IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel)
{
- // fixme good or bad for memory use?
+ Method meth;
+ IMP imp;
+
+ // fixme this is incomplete - no resolver, +initialize, GC -
+ // but it's only used for .cxx_construct/destruct so we don't care
+ assert(sel == SEL_cxx_construct || sel == SEL_cxx_destruct);
+
+ // Search cache first.
+ imp = cache_getImp(cls, sel);
+ if (imp) return imp;
+
+ // Cache miss. Search method list.
+
+ rwlock_read(&runtimeLock);
+
+ meth = getMethodNoSuper_nolock(cls, sel);
+
+ if (meth) {
+ // Hit in method list. Cache it.
+ cache_fill(cls, sel, meth->imp);
+ rwlock_unlock_read(&runtimeLock);
+ return meth->imp;
+ } else {
+ // Miss in method list. Cache objc_msgForward.
+ cache_fill(cls, sel, _objc_msgForward_impcache);
+ rwlock_unlock_read(&runtimeLock);
+ return _objc_msgForward_impcache;
+ }
}
/***********************************************************************
-* _class_isLoadable
+* class_getProperty
* fixme
-* Locking: none
+* Locking: read-locks runtimeLock
**********************************************************************/
-BOOL
-_class_isLoadable(Class cls)
+objc_property_t class_getProperty(Class cls, const char *name)
{
- assert(isRealized(newcls(cls)));
- return YES; // any class registered for +load is definitely loadable
-}
+ property_t *result = nil;
+ chained_property_list *plist;
+ if (!cls || !name) return nil;
-/***********************************************************************
-* Locking: fixme
-**********************************************************************/
-static BOOL
-hasCxxStructors(class_t *cls)
-{
- // this DOES check superclasses too, because addSubclass()
- // propagates the flag from the superclass.
- assert(isRealized(cls));
- return (cls->data()->flags & RW_HAS_CXX_STRUCTORS) ? YES : NO;
-}
+ rwlock_read(&runtimeLock);
-BOOL
-_class_hasCxxStructors(Class cls)
-{
- return hasCxxStructors(newcls(cls));
-}
+ assert(cls->isRealized());
+ for ( ; cls; cls = cls->superclass) {
+ for (plist = cls->data()->properties; plist; plist = plist->next) {
+ uint32_t i;
+ for (i = 0; i < plist->count; i++) {
+ if (0 == strcmp(name, plist->list[i].name)) {
+ result = &plist->list[i];
+ goto done;
+ }
+ }
+ }
+ }
-/***********************************************************************
-* Locking: fixme
-**********************************************************************/
-BOOL
-_class_shouldFinalizeOnMainThread(Class cls)
-{
- assert(isRealized(newcls(cls)));
- return (newcls(cls)->data()->flags & RW_FINALIZE_ON_MAIN_THREAD) ? YES : NO;
+ done:
+ rwlock_unlock_read(&runtimeLock);
+
+ return (objc_property_t)result;
}
/***********************************************************************
* Locking: fixme
**********************************************************************/
-void
-_class_setFinalizeOnMainThread(Class cls)
+
+Class gdb_class_getClass(Class cls)
{
- assert(isRealized(newcls(cls)));
- changeInfo(newcls(cls), RW_FINALIZE_ON_MAIN_THREAD, 0);
+ const char *className = cls->name();
+ if(!className || !strlen(className)) return Nil;
+ Class rCls = look_up_class(className, NO, NO);
+ return rCls;
}
-
-/***********************************************************************
-* _class_instancesHaveAssociatedObjects
-* May manipulate unrealized future classes in the CF-bridged case.
-**********************************************************************/
-BOOL
-_class_instancesHaveAssociatedObjects(Class cls_gen)
+Class gdb_object_getClass(id obj)
{
- class_t *cls = newcls(cls_gen);
- assert(isFuture(cls) || isRealized(cls));
- return (cls->data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS) ? YES : NO;
+ if (!obj) return nil;
+ return gdb_class_getClass(obj->getIsa());
}
/***********************************************************************
-* _class_setInstancesHaveAssociatedObjects
-* May manipulate unrealized future classes in the CF-bridged case.
+* Locking: write-locks runtimeLock
**********************************************************************/
-void
-_class_setInstancesHaveAssociatedObjects(Class cls_gen)
+void
+objc_class::setInitialized()
{
- class_t *cls = newcls(cls_gen);
- assert(isFuture(cls) || isRealized(cls));
- changeInfo(cls, RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS, 0);
+ Class metacls;
+
+ assert(!isMetaClass());
+
+ metacls = this->ISA();
+
+ metacls->changeInfo(RW_INITIALIZED, RW_INITIALIZING);
}
* _class_usesAutomaticRetainRelease
* Returns YES if class was compiled with -fobjc-arc
**********************************************************************/
-BOOL _class_usesAutomaticRetainRelease(Class cls_gen)
+BOOL _class_usesAutomaticRetainRelease(Class cls)
{
- class_t *cls = newcls(cls_gen);
return (cls->data()->ro->flags & RO_IS_ARR) ? YES : NO;
}
/***********************************************************************
-* Return YES if sel is used by allocWithZone implementors
+* Return YES if sel is used by alloc or allocWithZone implementors
**********************************************************************/
static bool isAWZSelector(SEL sel)
{
- return (sel == SEL_allocWithZone);
+ return (sel == SEL_allocWithZone || sel == SEL_alloc);
}
* Mark this class and all of its subclasses as implementors or
* inheritors of custom RR (retain/release/autorelease/retainCount)
**********************************************************************/
-void class_t::setHasCustomRR(bool inherited)
+void objc_class::setHasCustomRR(bool inherited)
{
rwlock_assert_writing(&runtimeLock);
if (hasCustomRR()) return;
- FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, this, {
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, (Class)this, {
if (PrintCustomRR && !c->hasCustomRR()) {
- _objc_inform("CUSTOM RR: %s%s%s", getName(c),
- isMetaClass(c) ? " (meta)" : "",
- (inherited || c != this) ? " (inherited)" : "");
+ _objc_inform("CUSTOM RR: %s%s%s", c->name(),
+ c->isMetaClass() ? " (meta)" : "",
+ (inherited || c != (Class)this) ? " (inherited)" : "");
}
#if CLASS_FAST_FLAGS_VIA_RW_DATA
- c->data_NEVER_USE |= (uintptr_t)1;
+ c->data_NEVER_USE |= CLASS_FAST_FLAG_HAS_CUSTOM_RR;
#else
c->data()->flags |= RW_HAS_CUSTOM_RR;
#endif
/***********************************************************************
* Mark this class and all of its subclasses as implementors or
-* inheritors of custom allocWithZone:
+* inheritors of custom alloc/allocWithZone:
**********************************************************************/
-void class_t::setHasCustomAWZ(bool inherited )
+void objc_class::setHasCustomAWZ(bool inherited )
{
rwlock_assert_writing(&runtimeLock);
if (hasCustomAWZ()) return;
- FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, this, {
+ FOREACH_REALIZED_CLASS_AND_SUBCLASS(c, (Class)this, {
if (PrintCustomAWZ && !c->hasCustomAWZ()) {
- _objc_inform("CUSTOM AWZ: %s%s%s", getName(c),
- isMetaClass(c) ? " (meta)" : "",
- (inherited || c != this) ? " (inherited)" : "");
+ _objc_inform("CUSTOM AWZ: %s%s%s", c->name(),
+ c->isMetaClass() ? " (meta)" : "",
+ (inherited || c != (Class)this) ? " (inherited)" : "");
}
-#if CLASS_FAST_FLAGS_VIA_RW_DATA
- c->data_NEVER_USE |= (uintptr_t)2;
-#else
c->data()->flags |= RW_HAS_CUSTOM_AWZ;
-#endif
});
}
* Update custom RR and AWZ when a method changes its IMP
**********************************************************************/
static void
-updateCustomRR_AWZ(class_t *cls, method_t *meth)
+updateCustomRR_AWZ(Class cls, method_t *meth)
{
// In almost all cases, IMP swizzling does not affect custom RR/AWZ bits.
// The class is already marked for custom RR/AWZ, so changing the IMP
} else {
// Don't know the class.
// The only special case is metaclass NSObject.
- FOREACH_METHOD_LIST(mlist, classNSObject()->isa, {
+ FOREACH_METHOD_LIST(mlist, classNSObject()->ISA(), {
for (uint32_t i = 0; i < mlist->count; i++) {
if (meth == method_list_nth(mlist, i)) {
// Yep, they're swizzling metaclass NSObject.
- classNSObject()->isa->setHasCustomRR();
+ classNSObject()->ISA()->setHasCustomAWZ();
return;
}
}
}
}
-/***********************************************************************
-* Locking: none
-* fixme assert realized to get superclass remapping?
-**********************************************************************/
-Class
-_class_getSuperclass(Class cls)
-{
- return (Class)getSuperclass(newcls(cls));
-}
-
-static class_t *
-getSuperclass(class_t *cls)
-{
- if (!cls) return NULL;
- return cls->superclass;
-}
-
/***********************************************************************
* class_getIvarLayout
* Called by the garbage collector.
-* The class must be NULL or already realized.
+* The class must be nil or already realized.
* Locking: none
**********************************************************************/
const uint8_t *
-class_getIvarLayout(Class cls_gen)
+class_getIvarLayout(Class cls)
{
- class_t *cls = newcls(cls_gen);
if (cls) return cls->data()->ro->ivarLayout;
- else return NULL;
+ else return nil;
}
/***********************************************************************
* class_getWeakIvarLayout
* Called by the garbage collector.
-* The class must be NULL or already realized.
+* The class must be nil or already realized.
* Locking: none
**********************************************************************/
const uint8_t *
-class_getWeakIvarLayout(Class cls_gen)
+class_getWeakIvarLayout(Class cls)
{
- class_t *cls = newcls(cls_gen);
if (cls) return cls->data()->ro->weakIvarLayout;
- else return NULL;
+ else return nil;
}
/***********************************************************************
* class_setIvarLayout
* Changes the class's GC scan layout.
-* NULL layout means no unscanned ivars
+* nil layout means no unscanned ivars
* The class must be under construction.
* fixme: sanity-check layout vs instance size?
* fixme: sanity-check layout vs superclass?
* Locking: acquires runtimeLock
**********************************************************************/
void
-class_setIvarLayout(Class cls_gen, const uint8_t *layout)
+class_setIvarLayout(Class cls, const uint8_t *layout)
{
- class_t *cls = newcls(cls_gen);
if (!cls) return;
rwlock_write(&runtimeLock);
// allowed, there would be a race below (us vs. concurrent GC scan)
if (!(cls->data()->flags & RW_CONSTRUCTING)) {
_objc_inform("*** Can't set ivar layout for already-registered "
- "class '%s'", getName(cls));
+ "class '%s'", cls->name());
rwlock_unlock_write(&runtimeLock);
return;
}
// SPI: Instance-specific object layout.
void
-_class_setIvarLayoutAccessor(Class cls_gen, const uint8_t* (*accessor) (id object)) {
- class_t *cls = newcls(cls_gen);
+_class_setIvarLayoutAccessor(Class cls, const uint8_t* (*accessor) (id object)) {
if (!cls) return;
rwlock_write(&runtimeLock);
// FIXME: this really isn't safe to free if there are instances of this class already.
if (!(cls->data()->flags & RW_HAS_INSTANCE_SPECIFIC_LAYOUT)) try_free(ro_w->ivarLayout);
ro_w->ivarLayout = (uint8_t *)accessor;
- changeInfo(cls, RW_HAS_INSTANCE_SPECIFIC_LAYOUT, 0);
+ cls->setInfo(RW_HAS_INSTANCE_SPECIFIC_LAYOUT);
rwlock_unlock_write(&runtimeLock);
}
const uint8_t *
-_object_getIvarLayout(Class cls_gen, id object) {
- class_t *cls = newcls(cls_gen);
+_object_getIvarLayout(Class cls, id object)
+{
if (cls) {
const uint8_t* layout = cls->data()->ro->ivarLayout;
if (cls->data()->flags & RW_HAS_INSTANCE_SPECIFIC_LAYOUT) {
}
return layout;
}
- return NULL;
+ return nil;
}
/***********************************************************************
* class_setWeakIvarLayout
* Changes the class's GC weak layout.
-* NULL layout means no weak ivars
+* nil layout means no weak ivars
* The class must be under construction.
* fixme: sanity-check layout vs instance size?
* fixme: sanity-check layout vs superclass?
* Locking: acquires runtimeLock
**********************************************************************/
void
-class_setWeakIvarLayout(Class cls_gen, const uint8_t *layout)
+class_setWeakIvarLayout(Class cls, const uint8_t *layout)
{
- class_t *cls = newcls(cls_gen);
if (!cls) return;
rwlock_write(&runtimeLock);
// allowed, there would be a race below (us vs. concurrent GC scan)
if (!(cls->data()->flags & RW_CONSTRUCTING)) {
_objc_inform("*** Can't set weak ivar layout for already-registered "
- "class '%s'", getName(cls));
+ "class '%s'", cls->name());
rwlock_unlock_write(&runtimeLock);
return;
}
{
rwlock_read(&runtimeLock);
- for ( ; cls != Nil; cls = class_getSuperclass(cls)) {
- ivar_t *ivar = getIvar(newcls(cls), name);
+ for ( ; cls; cls = cls->superclass) {
+ ivar_t *ivar = getIvar(cls, name);
if (ivar) {
rwlock_unlock_read(&runtimeLock);
if (memberOf) *memberOf = cls;
- return (Ivar)ivar;
+ return ivar;
}
}
rwlock_unlock_read(&runtimeLock);
- return NULL;
+ return nil;
}
* fixme
* Locking: read-locks runtimeLock
**********************************************************************/
-BOOL class_conformsToProtocol(Class cls_gen, Protocol *proto_gen)
+BOOL class_conformsToProtocol(Class cls, Protocol *proto_gen)
{
- class_t *cls = newcls(cls_gen);
protocol_t *proto = newprotocol(proto_gen);
const protocol_list_t **plist;
unsigned int i;
BOOL result = NO;
- if (!cls_gen) return NO;
+ if (!cls) return NO;
if (!proto_gen) return NO;
rwlock_read(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
for (plist = cls->data()->protocols; plist && *plist; plist++) {
for (i = 0; i < (*plist)->count; i++) {
protocol_t *p = remapProtocol((*plist)->list[i]);
- if (p == proto || _protocol_conformsToProtocol_nolock(p, proto)) {
+ if (p == proto || protocol_conformsToProtocol_nolock(p, proto)) {
result = YES;
goto done;
}
}
-/***********************************************************************
+/**********************************************************************
* addMethod
* fixme
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static IMP
-addMethod(class_t *cls, SEL name, IMP imp, const char *types, BOOL replace)
+addMethod(Class cls, SEL name, IMP imp, const char *types, BOOL replace)
{
- IMP result = NULL;
+ IMP result = nil;
rwlock_assert_writing(&runtimeLock);
assert(types);
- assert(isRealized(cls));
+ assert(cls->isRealized());
method_t *m;
if ((m = getMethodNoSuper_nolock(cls, name))) {
newlist->first.imp = (IMP)&_objc_ignored_method;
}
- BOOL vtablesAffected = NO;
- attachMethodLists(cls, &newlist, 1, NO, NO, &vtablesAffected);
- flushCaches(cls);
- if (vtablesAffected) flushVtables(cls);
+ attachMethodLists(cls, &newlist, 1, NO, NO, YES);
- result = NULL;
+ result = nil;
}
return result;
if (!cls) return NO;
rwlock_write(&runtimeLock);
- IMP old = addMethod(newcls(cls), name, imp, types ?: "", NO);
+ IMP old = addMethod(cls, name, imp, types ?: "", NO);
rwlock_unlock_write(&runtimeLock);
return old ? NO : YES;
}
IMP
class_replaceMethod(Class cls, SEL name, IMP imp, const char *types)
{
- if (!cls) return NULL;
+ if (!cls) return nil;
rwlock_write(&runtimeLock);
- IMP old = addMethod(newcls(cls), name, imp, types ?: "", YES);
+ IMP old = addMethod(cls, name, imp, types ?: "", YES);
rwlock_unlock_write(&runtimeLock);
return old;
}
* Locking: acquires runtimeLock
**********************************************************************/
BOOL
-class_addIvar(Class cls_gen, const char *name, size_t size,
+class_addIvar(Class cls, const char *name, size_t size,
uint8_t alignment, const char *type)
{
- class_t *cls = newcls(cls_gen);
-
if (!cls) return NO;
if (!type) type = "";
- if (name && 0 == strcmp(name, "")) name = NULL;
+ if (name && 0 == strcmp(name, "")) name = nil;
rwlock_write(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
// No class variables
- if (isMetaClass(cls)) {
+ if (cls->isMetaClass()) {
rwlock_unlock_write(&runtimeLock);
return NO;
}
newlist->entsize = (uint32_t)sizeof(ivar_t);
}
- uint32_t offset = unalignedInstanceSize(cls);
+ uint32_t offset = cls->unalignedInstanceSize();
uint32_t alignMask = (1<<alignment)-1;
offset = (offset + alignMask) & ~alignMask;
ivar_t *ivar = ivar_list_nth(newlist, newlist->count++);
- ivar->offset = (uintptr_t *)_malloc_internal(sizeof(*ivar->offset));
+#if __x86_64__
+ // Deliberately over-allocate the ivar offset variable.
+ // Use calloc() to clear all 64 bits. See the note in struct ivar_t.
+ ivar->offset = (int32_t *)(int64_t *)_calloc_internal(sizeof(int64_t), 1);
+#else
+ ivar->offset = (int32_t *)_malloc_internal(sizeof(int32_t));
+#endif
*ivar->offset = offset;
- ivar->name = name ? _strdup_internal(name) : NULL;
+ ivar->name = name ? _strdup_internal(name) : nil;
ivar->type = _strdup_internal(type);
- ivar->alignment = alignment;
+ ivar->alignment_raw = alignment;
ivar->size = (uint32_t)size;
ro_w->ivars = newlist;
* Adds a protocol to a class.
* Locking: acquires runtimeLock
**********************************************************************/
-BOOL class_addProtocol(Class cls_gen, Protocol *protocol_gen)
+BOOL class_addProtocol(Class cls, Protocol *protocol_gen)
{
- class_t *cls = newcls(cls_gen);
protocol_t *protocol = newprotocol(protocol_gen);
protocol_list_t *plist;
const protocol_list_t **plistp;
if (!cls) return NO;
- if (class_conformsToProtocol(cls_gen, protocol_gen)) return NO;
+ if (class_conformsToProtocol(cls, protocol_gen)) return NO;
rwlock_write(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
// fixme optimize
plist = (protocol_list_t *)
_realloc_internal(cls->data()->protocols,
(count+2) * sizeof(protocol_list_t *));
cls->data()->protocols[count] = plist;
- cls->data()->protocols[count+1] = NULL;
+ cls->data()->protocols[count+1] = nil;
// fixme metaclass?
* Locking: acquires runtimeLock
**********************************************************************/
static BOOL
-_class_addProperty(Class cls_gen, const char *name,
+_class_addProperty(Class cls, const char *name,
const objc_property_attribute_t *attrs, unsigned int count,
BOOL replace)
{
- class_t *cls = newcls(cls_gen);
chained_property_list *plist;
if (!cls) return NO;
if (!name) return NO;
- property_t *prop = class_getProperty(cls_gen, name);
+ property_t *prop = class_getProperty(cls, name);
if (prop && !replace) {
// already exists, refuse to replace
return NO;
else {
rwlock_write(&runtimeLock);
- assert(isRealized(cls));
+ assert(cls->isRealized());
plist = (chained_property_list *)
_malloc_internal(sizeof(*plist) + sizeof(plist->list[0]));
}
BOOL
-class_addProperty(Class cls_gen, const char *name,
+class_addProperty(Class cls, const char *name,
const objc_property_attribute_t *attrs, unsigned int n)
{
- return _class_addProperty(cls_gen, name, attrs, n, NO);
+ return _class_addProperty(cls, name, attrs, n, NO);
}
void
-class_replaceProperty(Class cls_gen, const char *name,
+class_replaceProperty(Class cls, const char *name,
const objc_property_attribute_t *attrs, unsigned int n)
{
- _class_addProperty(cls_gen, name, attrs, n, YES);
+ _class_addProperty(cls, name, attrs, n, YES);
}
* Look up a class by name, and realize it.
* Locking: acquires runtimeLock
**********************************************************************/
-id
+Class
look_up_class(const char *name,
BOOL includeUnconnected __attribute__((unused)),
BOOL includeClassHandler __attribute__((unused)))
if (!name) return nil;
rwlock_read(&runtimeLock);
- class_t *result = getClass(name);
- BOOL unrealized = result && !isRealized(result);
+ Class result = getClass(name);
+ BOOL unrealized = result && !result->isRealized();
rwlock_unlock_read(&runtimeLock);
if (unrealized) {
rwlock_write(&runtimeLock);
realizeClass(result);
rwlock_unlock_write(&runtimeLock);
}
- return (id)result;
+ return result;
}
* Locking: acquires runtimeLock
**********************************************************************/
Class
-objc_duplicateClass(Class original_gen, const char *name,
+objc_duplicateClass(Class original, const char *name,
size_t extraBytes)
{
- class_t *original = newcls(original_gen);
- class_t *duplicate;
+ Class duplicate;
rwlock_write(&runtimeLock);
- assert(isRealized(original));
- assert(!isMetaClass(original));
+ assert(original->isRealized());
+ assert(!original->isMetaClass());
- duplicate = (class_t *)
- _calloc_class(alignedInstanceSize(original->isa) + extraBytes);
- if (unalignedInstanceSize(original->isa) < sizeof(class_t)) {
+ duplicate = _calloc_class(original->ISA()->alignedInstanceSize()+extraBytes);
+ if (original->ISA()->unalignedInstanceSize() < sizeof(objc_class)) {
_objc_inform("busted! %s\n", original->data()->ro->name);
}
- duplicate->isa = original->isa;
+ duplicate->initIsa(original->ISA());
duplicate->superclass = original->superclass;
- duplicate->cache = (Cache)&_objc_empty_cache;
- duplicate->vtable = &_objc_empty_vtable;
+
+ duplicate->cache.buckets = (bucket_t *)&_objc_empty_cache;
+ // cache.shiftmask and cache.occupied are already zero
duplicate->setData((class_rw_t *)_calloc_internal(sizeof(*original->data()), 1));
- duplicate->data()->flags = (original->data()->flags | RW_COPIED_RO) & ~RW_SPECIALIZED_VTABLE;
+ duplicate->data()->flags = (original->data()->flags | RW_COPIED_RO);
duplicate->data()->version = original->data()->version;
- duplicate->data()->firstSubclass = NULL;
- duplicate->data()->nextSiblingClass = NULL;
+ duplicate->data()->firstSubclass = nil;
+ duplicate->data()->nextSiblingClass = nil;
duplicate->data()->ro = (class_ro_t *)
_memdup_internal(original->data()->ro, sizeof(*original->data()->ro));
addNamedClass(duplicate, duplicate->data()->ro->name);
addRealizedClass(duplicate);
- // no: duplicate->isa == original->isa
- // addRealizedMetaclass(duplicate->isa);
+ // no: duplicate->ISA == original->ISA
+ // addRealizedMetaclass(duplicate->ISA);
if (PrintConnecting) {
_objc_inform("CLASS: realizing class '%s' (duplicate of %s) %p %p",
name, original->data()->ro->name,
- duplicate, duplicate->data()->ro);
+ (void*)duplicate, duplicate->data()->ro);
}
rwlock_unlock_write(&runtimeLock);
- return (Class)duplicate;
+ return duplicate;
}
/***********************************************************************
// &UnsetLayout is the default ivar layout during class construction
static const uint8_t UnsetLayout = 0;
-static void objc_initializeClassPair_internal(Class superclass_gen, const char *name, Class cls_gen, Class meta_gen)
+static void objc_initializeClassPair_internal(Class superclass, const char *name, Class cls, Class meta)
{
rwlock_assert_writing(&runtimeLock);
- class_t *superclass = newcls(superclass_gen);
- class_t *cls = newcls(cls_gen);
- class_t *meta = newcls(meta_gen);
class_ro_t *cls_ro_w, *meta_ro_w;
+
+ cls->cache.buckets = (bucket_t *)&_objc_empty_cache;
+ meta->cache.buckets = (bucket_t *)&_objc_empty_cache;
+ // cache.shiftmask and cache.occupied are already zero
cls->setData((class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1));
meta->setData((class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1));
meta->data()->ro = meta_ro_w;
// Set basic info
- cls->cache = (Cache)&_objc_empty_cache;
- meta->cache = (Cache)&_objc_empty_cache;
- cls->vtable = &_objc_empty_vtable;
- meta->vtable = &_objc_empty_vtable;
cls->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED;
meta->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED;
meta_ro_w->flags |= RO_ROOT;
}
if (superclass) {
- cls_ro_w->instanceStart = unalignedInstanceSize(superclass);
- meta_ro_w->instanceStart = unalignedInstanceSize(superclass->isa);
+ cls_ro_w->instanceStart = superclass->unalignedInstanceSize();
+ meta_ro_w->instanceStart = superclass->ISA()->unalignedInstanceSize();
cls_ro_w->instanceSize = cls_ro_w->instanceStart;
meta_ro_w->instanceSize = meta_ro_w->instanceStart;
} else {
cls_ro_w->instanceStart = 0;
- meta_ro_w->instanceStart = (uint32_t)sizeof(class_t);
+ meta_ro_w->instanceStart = (uint32_t)sizeof(objc_class);
cls_ro_w->instanceSize = (uint32_t)sizeof(id); // just an isa
meta_ro_w->instanceSize = meta_ro_w->instanceStart;
}
cls_ro_w->weakIvarLayout = &UnsetLayout;
// Connect to superclasses and metaclasses
- cls->isa = meta;
+ cls->initIsa(meta);
if (superclass) {
- meta->isa = superclass->isa->isa;
+ meta->initIsa(superclass->ISA()->ISA());
cls->superclass = superclass;
- meta->superclass = superclass->isa;
+ meta->superclass = superclass->ISA();
addSubclass(superclass, cls);
- addSubclass(superclass->isa, meta);
+ addSubclass(superclass->ISA(), meta);
} else {
- meta->isa = meta;
+ meta->initIsa(meta);
cls->superclass = Nil;
meta->superclass = cls;
addSubclass(cls, meta);
/***********************************************************************
* objc_initializeClassPair
**********************************************************************/
-Class objc_initializeClassPair(Class superclass_gen, const char *name, Class cls_gen, Class meta_gen)
+Class objc_initializeClassPair(Class superclass, const char *name, Class cls, Class meta)
{
- class_t *superclass = newcls(superclass_gen);
-
rwlock_write(&runtimeLock);
//
}
// fixme reserve class against simultaneous allocation
- if (superclass) assert(isRealized(superclass));
+ if (superclass) assert(superclass->isRealized());
if (superclass && superclass->data()->flags & RW_CONSTRUCTING) {
// Can't make subclass of an in-construction class
// just initialize what was supplied
- objc_initializeClassPair_internal(superclass_gen, name, cls_gen, meta_gen);
+ objc_initializeClassPair_internal(superclass, name, cls, meta);
rwlock_unlock_write(&runtimeLock);
- return cls_gen;
+ return cls;
}
/***********************************************************************
* fixme
* Locking: acquires runtimeLock
**********************************************************************/
-Class objc_allocateClassPair(Class superclass_gen, const char *name,
+Class objc_allocateClassPair(Class superclass, const char *name,
size_t extraBytes)
{
- class_t *superclass = newcls(superclass_gen);
Class cls, meta;
rwlock_write(&runtimeLock);
}
// fixme reserve class against simmultaneous allocation
- if (superclass) assert(isRealized(superclass));
+ if (superclass) assert(superclass->isRealized());
if (superclass && superclass->data()->flags & RW_CONSTRUCTING) {
// Can't make subclass of an in-construction class
// Allocate new classes.
- size_t size = sizeof(class_t);
- size_t metasize = sizeof(class_t);
+ size_t size = sizeof(objc_class);
+ size_t metasize = sizeof(objc_class);
if (superclass) {
- size = alignedInstanceSize(superclass->isa);
- metasize = alignedInstanceSize(superclass->isa->isa);
+ size = superclass->ISA()->alignedInstanceSize();
+ metasize = superclass->ISA()->ISA()->alignedInstanceSize();
}
cls = _calloc_class(size + extraBytes);
meta = _calloc_class(metasize + extraBytes);
- objc_initializeClassPair_internal(superclass_gen, name, cls, meta);
+ objc_initializeClassPair_internal(superclass, name, cls, meta);
rwlock_unlock_write(&runtimeLock);
- return (Class)cls;
+ return cls;
}
* fixme
* Locking: acquires runtimeLock
**********************************************************************/
-void objc_registerClassPair(Class cls_gen)
+void objc_registerClassPair(Class cls)
{
- class_t *cls = newcls(cls_gen);
-
rwlock_write(&runtimeLock);
if ((cls->data()->flags & RW_CONSTRUCTED) ||
- (cls->isa->data()->flags & RW_CONSTRUCTED))
+ (cls->ISA()->data()->flags & RW_CONSTRUCTED))
{
_objc_inform("objc_registerClassPair: class '%s' was already "
"registered!", cls->data()->ro->name);
}
if (!(cls->data()->flags & RW_CONSTRUCTING) ||
- !(cls->isa->data()->flags & RW_CONSTRUCTING))
+ !(cls->ISA()->data()->flags & RW_CONSTRUCTING))
{
_objc_inform("objc_registerClassPair: class '%s' was not "
"allocated with objc_allocateClassPair!",
// Build ivar layouts
if (UseGC) {
- class_t *supercls = getSuperclass(cls);
+ Class supercls = cls->superclass;
class_ro_t *ro_w = (class_ro_t *)cls->data()->ro;
if (ro_w->ivarLayout != &UnsetLayout) {
}
else if (!supercls) {
// Root class. Scan conservatively (should be isa ivar only).
- ro_w->ivarLayout = NULL;
+ ro_w->ivarLayout = nil;
}
- else if (ro_w->ivars == NULL) {
+ else if (ro_w->ivars == nil) {
// No local ivars. Use superclass's layouts.
ro_w->ivarLayout =
_ustrdup_internal(supercls->data()->ro->ivarLayout);
// Has local ivars. Build layouts based on superclass.
layout_bitmap bitmap =
layout_bitmap_create(supercls->data()->ro->ivarLayout,
- unalignedInstanceSize(supercls),
- unalignedInstanceSize(cls), NO);
+ supercls->unalignedInstanceSize(),
+ cls->unalignedInstanceSize(), NO);
uint32_t i;
for (i = 0; i < ro_w->ivars->count; i++) {
ivar_t *ivar = ivar_list_nth(ro_w->ivars, i);
}
else if (!supercls) {
// Root class. No weak ivars (should be isa ivar only).
- ro_w->weakIvarLayout = NULL;
+ ro_w->weakIvarLayout = nil;
}
- else if (ro_w->ivars == NULL) {
+ else if (ro_w->ivars == nil) {
// No local ivars. Use superclass's layout.
ro_w->weakIvarLayout =
_ustrdup_internal(supercls->data()->ro->weakIvarLayout);
// Clear "under construction" bit, set "done constructing" bit
cls->data()->flags &= ~RW_CONSTRUCTING;
- cls->isa->data()->flags &= ~RW_CONSTRUCTING;
+ cls->ISA()->data()->flags &= ~RW_CONSTRUCTING;
cls->data()->flags |= RW_CONSTRUCTED;
- cls->isa->data()->flags |= RW_CONSTRUCTED;
+ cls->ISA()->data()->flags |= RW_CONSTRUCTED;
// Add to named and realized classes
addNamedClass(cls, cls->data()->ro->name);
addRealizedClass(cls);
- addRealizedMetaclass(cls->isa);
+ addRealizedMetaclass(cls->ISA());
addNonMetaClass(cls);
rwlock_unlock_write(&runtimeLock);
* Call this before free_class.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void detach_class(class_t *cls, BOOL isMeta)
+static void detach_class(Class cls, BOOL isMeta)
{
rwlock_assert_writing(&runtimeLock);
if (cats) free(cats);
// superclass's subclass list
- if (isRealized(cls)) {
- class_t *supercls = getSuperclass(cls);
+ if (cls->isRealized()) {
+ Class supercls = cls->superclass;
if (supercls) {
removeSubclass(supercls, cls);
}
// class tables and +load queue
if (!isMeta) {
- removeNamedClass(cls, getName(cls));
+ removeNamedClass(cls, cls->name());
removeRealizedClass(cls);
removeNonMetaClass(cls);
} else {
* Call this after detach_class.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void free_class(class_t *cls)
+static void free_class(Class cls)
{
rwlock_assert_writing(&runtimeLock);
- if (! isRealized(cls)) return;
+ if (! cls->isRealized()) return;
uint32_t i;
- // Dereferences the cache contents; do this before freeing methods
- if (cls->cache != (Cache)&_objc_empty_cache) _cache_free(cls->cache);
+ if (cls->cache.buckets != (bucket_t *)&_objc_empty_cache) {
+ free(cls->cache.buckets);
+ }
FOREACH_METHOD_LIST(mlist, cls, {
for (i = 0; i < mlist->count; i++) {
}
}
- if (cls->vtable != &_objc_empty_vtable &&
- cls->data()->flags & RW_SPECIALIZED_VTABLE) try_free(cls->vtable);
try_free(cls->data()->ro->ivarLayout);
try_free(cls->data()->ro->weakIvarLayout);
try_free(cls->data()->ro->name);
}
-void objc_disposeClassPair(Class cls_gen)
+void objc_disposeClassPair(Class cls)
{
- class_t *cls = newcls(cls_gen);
-
rwlock_write(&runtimeLock);
if (!(cls->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING)) ||
- !(cls->isa->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING)))
+ !(cls->ISA()->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING)))
{
// class not allocated with objc_allocateClassPair
// disposing still-unregistered class is OK!
return;
}
- if (isMetaClass(cls)) {
+ if (cls->isMetaClass()) {
_objc_inform("objc_disposeClassPair: class '%s' is a metaclass, "
"not a class!", cls->data()->ro->name);
rwlock_unlock_write(&runtimeLock);
if (cls->data()->firstSubclass) {
_objc_inform("objc_disposeClassPair: class '%s' still has subclasses, "
"including '%s'!", cls->data()->ro->name,
- getName(cls->data()->firstSubclass));
+ cls->data()->firstSubclass->name());
}
- if (cls->isa->data()->firstSubclass) {
+ if (cls->ISA()->data()->firstSubclass) {
_objc_inform("objc_disposeClassPair: class '%s' still has subclasses, "
"including '%s'!", cls->data()->ro->name,
- getName(cls->isa->data()->firstSubclass));
+ cls->ISA()->data()->firstSubclass->name());
}
// don't remove_class_from_loadable_list()
// - it's not there and we don't have the lock
- detach_class(cls->isa, YES);
+ detach_class(cls->ISA(), YES);
detach_class(cls, NO);
- free_class(cls->isa);
+ free_class(cls->ISA());
free_class(cls);
rwlock_unlock_write(&runtimeLock);
{
if (!cls) return nil;
- assert(isRealized(newcls(cls)));
+ assert(cls->isRealized());
- size_t size = alignedInstanceSize(newcls(cls)) + extraBytes;
+ size_t size = cls->alignedInstanceSize() + extraBytes;
// CF requires all object be at least 16 bytes.
if (size < 16) size = 16;
}
if (!obj) return nil;
- obj->isa = cls; // need not be object_setClass
+ obj->initIsa(cls);
- if (_class_hasCxxStructors(cls)) {
+ if (cls->hasCxxCtor()) {
obj = _objc_constructOrFree(cls, obj);
}
id
class_createInstance(Class cls, size_t extraBytes)
{
- return _class_createInstanceFromZone(cls, extraBytes, NULL);
+ return _class_createInstanceFromZone(cls, extraBytes, nil);
}
/***********************************************************************
class_createInstances(Class cls, size_t extraBytes,
id *results, unsigned num_requested)
{
- return _class_createInstancesFromZone(cls, extraBytes, NULL,
+ return _class_createInstancesFromZone(cls, extraBytes, nil,
results, num_requested);
}
static BOOL classOrSuperClassesUseARR(Class cls) {
while (cls) {
if (_class_usesAutomaticRetainRelease(cls)) return true;
- cls = class_getSuperclass(cls);
+ cls = cls->superclass;
}
return false;
}
static void arr_fixup_copied_references(id newObject, id oldObject)
{
// use ARR layouts to correctly copy the references from old object to new, both strong and weak.
- Class cls = oldObject->isa;
- while (cls) {
+ Class cls = oldObject->ISA();
+ for ( ; cls; cls = cls->superclass) {
if (_class_usesAutomaticRetainRelease(cls)) {
// FIXME: align the instance start to nearest id boundary. This currently handles the case where
// the the compiler folds a leading BOOL (char, short, etc.) into the alignment slop of a superclass.
}
}
}
- cls = class_getSuperclass(cls);
}
}
size_t size;
if (!oldObj) return nil;
- if (OBJC_IS_TAGGED_PTR(oldObj)) return oldObj;
+ if (oldObj->isTaggedPointer()) return oldObj;
- size = _class_getInstanceSize(oldObj->isa) + extraBytes;
+ size = oldObj->ISA()->alignedInstanceSize() + extraBytes;
#if SUPPORT_GC
if (UseGC) {
obj = (id) auto_zone_allocate_object(gc_zone, size,
#if SUPPORT_GC
if (UseGC)
gc_fixup_weakreferences(obj, oldObj);
- else if (classOrSuperClassesUseARR(obj->isa))
+ else if (classOrSuperClassesUseARR(obj->ISA()))
arr_fixup_copied_references(obj, oldObj);
#else
- if (classOrSuperClassesUseARR(obj->isa))
+ if (classOrSuperClassesUseARR(obj->ISA()))
arr_fixup_copied_references(obj, oldObj);
#endif
void *objc_destructInstance(id obj)
{
if (obj) {
- Class isa_gen = _object_getClass(obj);
- class_t *isa = newcls(isa_gen);
+ Class cls = obj->getIsa();
// Read all of the flags at once for performance.
- bool cxx = hasCxxStructors(isa);
- bool assoc = !UseGC && _class_instancesHaveAssociatedObjects(isa_gen);
+ bool cxx = cls->hasCxxDtor();
+ bool assoc = !UseGC && cls->instancesHaveAssociatedObjects();
// This order is important.
if (cxx) object_cxxDestruct(obj);
return nil;
}
-#if SUPPORT_FIXUP
-OBJC_EXTERN id objc_msgSend_fixedup(id, SEL, ...);
-OBJC_EXTERN id objc_msgSendSuper2_fixedup(id, SEL, ...);
-OBJC_EXTERN id objc_msgSend_stret_fixedup(id, SEL, ...);
-OBJC_EXTERN id objc_msgSendSuper2_stret_fixedup(id, SEL, ...);
-#if defined(__i386__) || defined(__x86_64__)
-OBJC_EXTERN id objc_msgSend_fpret_fixedup(id, SEL, ...);
-#endif
-#if defined(__x86_64__)
-OBJC_EXTERN id objc_msgSend_fp2ret_fixedup(id, SEL, ...);
-#endif
/***********************************************************************
-* _objc_fixupMessageRef
-* Fixes up message ref *msg.
-* obj is the receiver. supr is NULL for non-super messages
-* Locking: acquires runtimeLock
+* Tagged pointer objects.
+*
+* Tagged pointer objects store the class and the object value in the
+* object pointer; the "pointer" does not actually point to anything.
+*
+* Tagged pointer objects currently use this representation:
+* (LSB)
+* 1 bit set if tagged, clear if ordinary object pointer
+* 3 bits tag index
+* 60 bits payload
+* (MSB)
+* The tag index defines the object's class.
+* The payload format is defined by the object's class.
+*
+* This representation is subject to change. Representation-agnostic SPI is:
+* objc-internal.h for class implementers.
+* objc-gdb.h for debuggers.
**********************************************************************/
-OBJC_EXTERN IMP
-_objc_fixupMessageRef(id obj, struct objc_super2 *supr, message_ref_t *msg)
+#if !SUPPORT_TAGGED_POINTERS
+
+// These variables are always provided for debuggers.
+uintptr_t objc_debug_taggedpointer_mask = 0;
+unsigned objc_debug_taggedpointer_slot_shift = 0;
+uintptr_t objc_debug_taggedpointer_slot_mask = 0;
+unsigned objc_debug_taggedpointer_payload_lshift = 0;
+unsigned objc_debug_taggedpointer_payload_rshift = 0;
+Class objc_debug_taggedpointer_classes[1] = { nil };
+
+static void
+disableTaggedPointers() { }
+
+#else
+
+// The "slot" used in the class table and given to the debugger
+// includes the is-tagged bit. This makes objc_msgSend faster.
+
+uintptr_t objc_debug_taggedpointer_mask = TAG_MASK;
+unsigned objc_debug_taggedpointer_slot_shift = TAG_SLOT_SHIFT;
+uintptr_t objc_debug_taggedpointer_slot_mask = TAG_SLOT_MASK;
+unsigned objc_debug_taggedpointer_payload_lshift = TAG_PAYLOAD_LSHIFT;
+unsigned objc_debug_taggedpointer_payload_rshift = TAG_PAYLOAD_RSHIFT;
+// objc_debug_taggedpointer_classes is defined in objc-msg-*.s
+
+static void
+disableTaggedPointers()
{
- IMP imp;
- class_t *isa;
-#if SUPPORT_VTABLE
- int vtableIndex;
+ objc_debug_taggedpointer_mask = 0;
+ objc_debug_taggedpointer_slot_shift = 0;
+ objc_debug_taggedpointer_slot_mask = 0;
+ objc_debug_taggedpointer_payload_lshift = 0;
+ objc_debug_taggedpointer_payload_rshift = 0;
+}
+
+static int
+tagSlotForTagIndex(objc_tag_index_t tag)
+{
+#if TAG_MASK == 1
+ return (tag << 1) | 1;
+#else
+# error unimplemented
#endif
+}
- rwlock_assert_unlocked(&runtimeLock);
- if (!supr) {
- // normal message - search obj->isa for the method implementation
- isa = (class_t *) _object_getClass(obj);
-
- if (!isRealized(isa)) {
- // obj is a class object, isa is its metaclass
- class_t *cls;
- rwlock_write(&runtimeLock);
- cls = realizeClass((class_t *)obj);
- rwlock_unlock_write(&runtimeLock);
-
- // shouldn't have instances of unrealized classes!
- assert(isMetaClass(isa));
- // shouldn't be relocating classes here!
- assert(cls == (class_t *)obj);
- }
+/***********************************************************************
+* _objc_registerTaggedPointerClass
+* Set the class to use for the given tagged pointer index.
+* Aborts if the tag is out of range, or if the tag is already
+* used by some other class.
+**********************************************************************/
+void
+_objc_registerTaggedPointerClass(objc_tag_index_t tag, Class cls)
+{
+ if (objc_debug_taggedpointer_mask == 0) {
+ _objc_fatal("tagged pointers are disabled");
}
- else {
- // this is objc_msgSend_super, and supr->current_class->superclass
- // is the class to search for the method implementation
- assert(isRealized((class_t *)supr->current_class));
- isa = getSuperclass((class_t *)supr->current_class);
+
+ if ((unsigned int)tag >= TAG_COUNT) {
+ _objc_fatal("tag index %u is too large.", tag);
+ }
+
+ int slot = tagSlotForTagIndex(tag);
+ Class oldCls = objc_tag_classes[slot];
+
+ if (cls && oldCls && cls != oldCls) {
+ _objc_fatal("tag index %u used for two different classes "
+ "(was %p %s, now %p %s)", tag,
+ oldCls, class_getName(oldCls), cls, class_getName(cls));
}
+ objc_tag_classes[slot] = cls;
+}
+
+
+// Deprecated name.
+void _objc_insert_tagged_isa(unsigned char slotNumber, Class isa)
+{
+ return _objc_registerTaggedPointerClass((objc_tag_index_t)slotNumber, isa);
+}
+
+
+/***********************************************************************
+* _objc_getClassForTag
+* Returns the class that is using the given tagged pointer tag.
+* Returns nil if no class is using that tag or the tag is out of range.
+**********************************************************************/
+Class
+_objc_getClassForTag(objc_tag_index_t tag)
+{
+ if ((unsigned int)tag >= TAG_COUNT) return nil;
+ return objc_tag_classes[tagSlotForTagIndex(tag)];
+}
+
+#endif
+
+
+#if SUPPORT_FIXUP
+
+OBJC_EXTERN void objc_msgSend_fixedup(void);
+OBJC_EXTERN void objc_msgSendSuper2_fixedup(void);
+OBJC_EXTERN void objc_msgSend_stret_fixedup(void);
+OBJC_EXTERN void objc_msgSendSuper2_stret_fixedup(void);
+#if defined(__i386__) || defined(__x86_64__)
+OBJC_EXTERN void objc_msgSend_fpret_fixedup(void);
+#endif
+#if defined(__x86_64__)
+OBJC_EXTERN void objc_msgSend_fp2ret_fixedup(void);
+#endif
+
+/***********************************************************************
+* fixupMessageRef
+* Repairs an old vtable dispatch call site.
+* vtable dispatch itself is not supported.
+**********************************************************************/
+static void
+fixupMessageRef(message_ref_t *msg)
+{
msg->sel = sel_registerName((const char *)msg->sel);
if (ignoreSelector(msg->sel)) {
// ignored selector - bypass dispatcher
- msg->imp = (IMP)&vtable_ignored;
- imp = (IMP)&_objc_ignored_method;
- }
-#if SUPPORT_VTABLE
- else if (msg->imp == (IMP)&objc_msgSend_fixup &&
- (vtableIndex = vtable_getIndex(msg->sel)) >= 0)
- {
- // vtable dispatch
- msg->imp = vtableTrampolines[vtableIndex];
- imp = isa->vtable[vtableIndex];
+ msg->imp = (IMP)&_objc_ignored_method;
}
-#endif
- else {
- // ordinary dispatch
- imp = lookUpMethod((Class)isa, msg->sel, YES/*initialize*/, YES/*cache*/, obj);
-
- if (msg->imp == (IMP)&objc_msgSend_fixup) {
- msg->imp = (IMP)&objc_msgSend_fixedup;
- }
- else if (msg->imp == (IMP)&objc_msgSendSuper2_fixup) {
- msg->imp = (IMP)&objc_msgSendSuper2_fixedup;
- }
- else if (msg->imp == (IMP)&objc_msgSend_stret_fixup) {
- msg->imp = (IMP)&objc_msgSend_stret_fixedup;
- }
- else if (msg->imp == (IMP)&objc_msgSendSuper2_stret_fixup) {
- msg->imp = (IMP)&objc_msgSendSuper2_stret_fixedup;
- }
+ else if (msg->imp == &objc_msgSend_fixup) {
+ msg->imp = &objc_msgSend_fixedup;
+ }
+ else if (msg->imp == &objc_msgSendSuper2_fixup) {
+ msg->imp = &objc_msgSendSuper2_fixedup;
+ }
+ else if (msg->imp == &objc_msgSend_stret_fixup) {
+ msg->imp = &objc_msgSend_stret_fixedup;
+ }
+ else if (msg->imp == &objc_msgSendSuper2_stret_fixup) {
+ msg->imp = &objc_msgSendSuper2_stret_fixedup;
+ }
#if defined(__i386__) || defined(__x86_64__)
- else if (msg->imp == (IMP)&objc_msgSend_fpret_fixup) {
- msg->imp = (IMP)&objc_msgSend_fpret_fixedup;
- }
+ else if (msg->imp == &objc_msgSend_fpret_fixup) {
+ msg->imp = &objc_msgSend_fpret_fixedup;
+ }
#endif
#if defined(__x86_64__)
- else if (msg->imp == (IMP)&objc_msgSend_fp2ret_fixup) {
- msg->imp = (IMP)&objc_msgSend_fp2ret_fixedup;
- }
+ else if (msg->imp == &objc_msgSend_fp2ret_fixup) {
+ msg->imp = &objc_msgSend_fp2ret_fixedup;
+ }
#endif
- else {
- // The ref may already have been fixed up, either by another thread
- // or by +initialize via lookUpMethod above.
- }
- }
-
- return imp;
}
// SUPPORT_FIXUP
// ProKit SPI
-static class_t *setSuperclass(class_t *cls, class_t *newSuper)
+static Class setSuperclass(Class cls, Class newSuper)
{
- class_t *oldSuper;
+ Class oldSuper;
rwlock_assert_writing(&runtimeLock);
- assert(isRealized(cls));
- assert(isRealized(newSuper));
+ assert(cls->isRealized());
+ assert(newSuper->isRealized());
oldSuper = cls->superclass;
removeSubclass(oldSuper, cls);
- removeSubclass(oldSuper->isa, cls->isa);
+ removeSubclass(oldSuper->ISA(), cls->ISA());
cls->superclass = newSuper;
- cls->isa->superclass = newSuper->isa;
+ cls->ISA()->superclass = newSuper->ISA();
addSubclass(newSuper, cls);
- addSubclass(newSuper->isa, cls->isa);
+ addSubclass(newSuper->ISA(), cls->ISA());
- flushCaches(cls->isa);
- flushVtables(cls->isa);
- flushCaches(cls);
- flushVtables(cls);
+ // Flush subclass's method caches.
+ // If subclass is not yet +initialized then its cache will be empty.
+ // Otherwise this is very slow for sel-side caches.
+ if (cls->isInitialized() || cls->ISA()->isInitialized()) {
+ flushCaches(cls);
+ }
return oldSuper;
}
-Class class_setSuperclass(Class cls_gen, Class newSuper_gen)
+Class class_setSuperclass(Class cls, Class newSuper)
{
- class_t *cls = newcls(cls_gen);
- class_t *newSuper = newcls(newSuper_gen);
- class_t *oldSuper;
+ Class oldSuper;
rwlock_write(&runtimeLock);
oldSuper = setSuperclass(cls, newSuper);
rwlock_unlock_write(&runtimeLock);
- return (Class)oldSuper;
+ return oldSuper;
}
#endif
#define _OBJC_RUNTIME_OLD_H
#include "objc-private.h"
-#include "objc-file-old.h"
+#define CLS_CLASS 0x1
+#define CLS_META 0x2
+#define CLS_INITIALIZED 0x4
+#define CLS_POSING 0x8
+#define CLS_MAPPED 0x10
+#define CLS_FLUSH_CACHE 0x20
+#define CLS_GROW_CACHE 0x40
+#define CLS_NEED_BIND 0x80
+#define CLS_METHOD_ARRAY 0x100
+// the JavaBridge constructs classes with these markers
+#define CLS_JAVA_HYBRID 0x200
+#define CLS_JAVA_CLASS 0x400
+// thread-safe +initialize
+#define CLS_INITIALIZING 0x800
+// bundle unloading
+#define CLS_FROM_BUNDLE 0x1000
+// C++ ivar support
+#define CLS_HAS_CXX_STRUCTORS 0x2000
+// Lazy method list arrays
+#define CLS_NO_METHOD_ARRAY 0x4000
+// +load implementation
+#define CLS_HAS_LOAD_METHOD 0x8000
+// objc_allocateClassPair API
+#define CLS_CONSTRUCTING 0x10000
+// visibility=hidden
+#define CLS_HIDDEN 0x20000
+// GC: class has unsafe finalize method
+#define CLS_FINALIZE_ON_MAIN_THREAD 0x40000
+// Lazy property list arrays
+#define CLS_NO_PROPERTY_ARRAY 0x80000
+// +load implementation
+#define CLS_CONNECTED 0x100000
+#define CLS_LOADED 0x200000
+// objc_allocateClassPair API
+#define CLS_CONSTRUCTED 0x400000
+// class is leaf for cache flushing
+#define CLS_LEAF 0x800000
+// class instances may have associative references
+#define CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS 0x1000000
+// class has instance-specific GC layout
+#define CLS_HAS_INSTANCE_SPECIFIC_LAYOUT 0x2000000
+
+
+// Terminator for array of method lists
+#define END_OF_METHODS_LIST ((struct old_method_list*)-1)
-struct old_class {
- struct old_class *isa;
- struct old_class *super_class;
+#define ISCLASS(cls) (((cls)->info & CLS_CLASS) != 0)
+#define ISMETA(cls) (((cls)->info & CLS_META) != 0)
+#define GETMETA(cls) (ISMETA(cls) ? (cls) : (cls)->ISA())
+
+
+struct objc_class : objc_object {
+ Class superclass;
const char *name;
- long version;
- long info;
- long instance_size;
+ uint32_t version;
+ uint32_t info;
+ uint32_t instance_size;
struct old_ivar_list *ivars;
struct old_method_list **methodLists;
Cache cache;
// CLS_EXT only
const uint8_t *ivar_layout;
struct old_class_ext *ext;
+
+ void setInfo(uint32_t set) {
+ OSAtomicOr32Barrier(set, (volatile uint32_t *)&info);
+ }
+
+ void clearInfo(uint32_t clear) {
+ OSAtomicXor32Barrier(clear, (volatile uint32_t *)&info);
+ }
+
+
+ // set and clear must not overlap
+ void changeInfo(uint32_t set, uint32_t clear) {
+ assert((set & clear) == 0);
+
+ uint32_t oldf, newf;
+ do {
+ oldf = this->info;
+ newf = (oldf | set) & ~clear;
+ } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&info));
+ }
+
+ bool hasCxxCtor() {
+ // set_superclass propagates the flag from the superclass.
+ return info & CLS_HAS_CXX_STRUCTORS;
+ }
+
+ bool hasCxxDtor() {
+ return hasCxxCtor(); // one bit for both ctor and dtor
+ }
+
+ bool instancesHaveAssociatedObjects() {
+ return info & CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS;
+ }
+
+ void setInstancesHaveAssociatedObjects() {
+ setInfo(CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS);
+ }
+
+ bool shouldGrowCache() {
+ return info & CLS_GROW_CACHE;
+ }
+
+ void setShouldGrowCache(bool grow) {
+ if (grow) setInfo(CLS_GROW_CACHE);
+ else clearInfo(CLS_GROW_CACHE);
+ }
+
+ bool shouldFinalizeOnMainThread() {
+ return info & CLS_FINALIZE_ON_MAIN_THREAD;
+ }
+
+ void setShouldFinalizeOnMainThread() {
+ setInfo(CLS_FINALIZE_ON_MAIN_THREAD);
+ }
+
+ // +initialize bits are stored on the metaclass only
+ bool isInitializing() {
+ return getMeta()->info & CLS_INITIALIZING;
+ }
+
+ // +initialize bits are stored on the metaclass only
+ void setInitializing() {
+ getMeta()->setInfo(CLS_INITIALIZING);
+ }
+
+ // +initialize bits are stored on the metaclass only
+ bool isInitialized() {
+ return getMeta()->info & CLS_INITIALIZED;
+ }
+
+ // +initialize bits are stored on the metaclass only
+ void setInitialized() {
+ getMeta()->changeInfo(CLS_INITIALIZED, CLS_INITIALIZING);
+ }
+
+ bool isLoadable() {
+ // A class registered for +load is ready for +load to be called
+ // if it is connected.
+ return isConnected();
+ }
+
+ IMP getLoadMethod();
+
+ bool isConnected();
+
+ const char *getName() { return name; }
+
+ bool isMetaClass() {
+ return info & CLS_META;
+ }
+
+ // NOT identical to this->ISA() when this is a metaclass
+ Class getMeta() {
+ if (isMetaClass()) return (Class)this;
+ else return this->ISA();
+ }
+
+ // May be unaligned depending on class's ivars.
+ uint32_t unalignedInstanceSize() {
+ return instance_size;
+ }
+
+ // Class's ivar size rounded up to a pointer-size boundary.
+ uint32_t alignedInstanceSize() {
+ return (unalignedInstanceSize() + WORD_MASK) & ~WORD_MASK;
+ }
+
};
struct old_class_ext {
};
struct old_method_list {
- struct old_method_list *obsolete;
+ void *obsolete;
int method_count;
#ifdef __LP64__
};
-#define CLS_CLASS 0x1
-#define CLS_META 0x2
-#define CLS_INITIALIZED 0x4
-#define CLS_POSING 0x8
-#define CLS_MAPPED 0x10
-#define CLS_FLUSH_CACHE 0x20
-#define CLS_GROW_CACHE 0x40
-#define CLS_NEED_BIND 0x80
-#define CLS_METHOD_ARRAY 0x100
-// the JavaBridge constructs classes with these markers
-#define CLS_JAVA_HYBRID 0x200
-#define CLS_JAVA_CLASS 0x400
-// thread-safe +initialize
-#define CLS_INITIALIZING 0x800
-// bundle unloading
-#define CLS_FROM_BUNDLE 0x1000
-// C++ ivar support
-#define CLS_HAS_CXX_STRUCTORS 0x2000
-// Lazy method list arrays
-#define CLS_NO_METHOD_ARRAY 0x4000
-// +load implementation
-#define CLS_HAS_LOAD_METHOD 0x8000
-// objc_allocateClassPair API
-#define CLS_CONSTRUCTING 0x10000
-// visibility=hidden
-#define CLS_HIDDEN 0x20000
-// GC: class has unsafe finalize method
-#define CLS_FINALIZE_ON_MAIN_THREAD 0x40000
-// Lazy property list arrays
-#define CLS_NO_PROPERTY_ARRAY 0x80000
-// +load implementation
-#define CLS_CONNECTED 0x100000
-#define CLS_LOADED 0x200000
-// objc_allocateClassPair API
-#define CLS_CONSTRUCTED 0x400000
-// class is leaf for cache flushing
-#define CLS_LEAF 0x800000
-// class instances may have associative references
-#define CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS 0x1000000
-// class has instance-specific GC layout
-#define CLS_HAS_INSTANCE_SPECIFIC_LAYOUT 0x2000000
-
-
-// Terminator for array of method lists
-#define END_OF_METHODS_LIST ((struct old_method_list*)-1)
-
-#define ISCLASS(cls) (((cls)->info & CLS_CLASS) != 0)
-#define ISMETA(cls) (((cls)->info & CLS_META) != 0)
-#define GETMETA(cls) (ISMETA(cls) ? (cls) : (cls)->isa)
-
+#include "hashtable2.h"
__BEGIN_DECLS
-#define oldcls(cls) ((struct old_class *)cls)
#define oldprotocol(proto) ((struct old_protocol *)proto)
#define oldmethod(meth) ((struct old_method *)meth)
#define oldcategory(cat) ((struct old_category *)cat)
#define oldivar(ivar) ((struct old_ivar *)ivar)
#define oldproperty(prop) ((struct old_property *)prop)
-extern void unload_class(struct old_class *cls);
+extern NXHashTable *class_hash;
+
+extern void unload_class(Class cls);
-extern Class objc_getOrigClass (const char *name);
extern IMP lookupNamedMethodInMethodList(struct old_method_list *mlist, const char *meth_name);
-extern void _objc_insertMethods(struct old_class *cls, struct old_method_list *mlist, struct old_category *cat);
-extern void _objc_removeMethods(struct old_class *cls, struct old_method_list *mlist);
+extern void _objc_insertMethods(Class cls, struct old_method_list *mlist, struct old_category *cat);
+extern void _objc_removeMethods(Class cls, struct old_method_list *mlist);
extern void _objc_flush_caches (Class cls);
-extern BOOL _class_addProperties(struct old_class *cls, struct old_property_list *additions);
-extern void change_class_references(struct old_class *imposter, struct old_class *original, struct old_class *copy, BOOL changeSuperRefs);
+extern BOOL _class_addProperties(Class cls, struct old_property_list *additions);
+extern BOOL _class_hasLoadMethod(Class cls);
+extern void change_class_references(Class imposter, Class original, Class copy, BOOL changeSuperRefs);
extern void flush_marked_caches(void);
-extern void set_superclass(struct old_class *cls, struct old_class *supercls, BOOL cls_is_new);
+extern void set_superclass(Class cls, Class supercls, BOOL cls_is_new);
extern void try_free(const void *p);
extern struct old_property *property_list_nth(const struct old_property_list *plist, uint32_t i);
extern struct old_property **copyPropertyList(struct old_property_list *plist, unsigned int *outCount);
-extern void _class_setInfo(Class cls, long set);
-extern void _class_clearInfo(Class cls, long clear);
-extern void _class_changeInfo(Class cls, long set, long clear);
-
+extern struct objc_method_description * lookup_protocol_method(struct old_protocol *proto, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod, BOOL recursive);
// used by flush_caches outside objc-cache.m
extern void _cache_flush(Class cls);
+++ /dev/null
-/*
- * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/***********************************************************************
-* objc-runtime-old.m
-* Support for old-ABI classes and images.
-**********************************************************************/
-
-/***********************************************************************
- * Class loading and connecting (GrP 2004-2-11)
- *
- * When images are loaded (during program startup or otherwise), the
- * runtime needs to load classes and categories from the images, connect
- * classes to superclasses and categories to parent classes, and call
- * +load methods.
- *
- * The Objective-C runtime can cope with classes arriving in any order.
- * That is, a class may be discovered by the runtime before some
- * superclass is known. To handle out-of-order class loads, the
- * runtime uses a "pending class" system.
- *
- * (Historical note)
- * Panther and earlier: many classes arrived out-of-order because of
- * the poorly-ordered callback from dyld. However, the runtime's
- * pending mechanism only handled "missing superclass" and not
- * "present superclass but missing higher class". See Radar #3225652.
- * Tiger: The runtime's pending mechanism was augmented to handle
- * arbitrary missing classes. In addition, dyld was rewritten and
- * now sends the callbacks in strictly bottom-up link order.
- * The pending mechanism may now be needed only for rare and
- * hard to construct programs.
- * (End historical note)
- *
- * A class when first seen in an image is considered "unconnected".
- * It is stored in `unconnected_class_hash`. If all of the class's
- * superclasses exist and are already "connected", then the new class
- * can be connected to its superclasses and moved to `class_hash` for
- * normal use. Otherwise, the class waits in `unconnected_class_hash`
- * until the superclasses finish connecting.
- *
- * A "connected" class is
- * (1) in `class_hash`,
- * (2) connected to its superclasses,
- * (3) has no unconnected superclasses,
- * (4) is otherwise initialized and ready for use, and
- * (5) is eligible for +load if +load has not already been called.
- *
- * An "unconnected" class is
- * (1) in `unconnected_class_hash`,
- * (2) not connected to its superclasses,
- * (3) has an immediate superclass which is either missing or unconnected,
- * (4) is not ready for use, and
- * (5) is not yet eligible for +load.
- *
- * Image mapping is NOT CURRENTLY THREAD-SAFE with respect to just about
- * anything. Image mapping IS RE-ENTRANT in several places: superclass
- * lookup may cause ZeroLink to load another image, and +load calls may
- * cause dyld to load another image.
- *
- * Image mapping sequence:
- *
- * Read all classes in all new images.
- * Add them all to unconnected_class_hash.
- * Note any +load implementations before categories are attached.
- * Attach any pending categories.
- * Read all categories in all new images.
- * Attach categories whose parent class exists (connected or not),
- * and pend the rest.
- * Mark them all eligible for +load (if implemented), even if the
- * parent class is missing.
- * Try to connect all classes in all new images.
- * If the superclass is missing, pend the class
- * If the superclass is unconnected, try to recursively connect it
- * If the superclass is connected:
- * connect the class
- * mark the class eligible for +load, if implemented
- * fix up any pended classrefs referring to the class
- * connect any pended subclasses of the class
- * Resolve selector refs and class refs in all new images.
- * Class refs whose classes still do not exist are pended.
- * Fix up protocol objects in all new images.
- * Call +load for classes and categories.
- * May include classes or categories that are not in these images,
- * but are newly eligible because of these image.
- * Class +loads will be called superclass-first because of the
- * superclass-first nature of the connecting process.
- * Category +load needs to be deferred until the parent class is
- * connected and has had its +load called.
- *
- * Performance: all classes are read before any categories are read.
- * Fewer categories need be pended for lack of a parent class.
- *
- * Performance: all categories are attempted to be attached before
- * any classes are connected. Fewer class caches need be flushed.
- * (Unconnected classes and their respective subclasses are guaranteed
- * to be un-messageable, so their caches will be empty.)
- *
- * Performance: all classes are read before any classes are connected.
- * Fewer classes need be pended for lack of a superclass.
- *
- * Correctness: all selector and class refs are fixed before any
- * protocol fixups or +load methods. libobjc itself contains selector
- * and class refs which are used in protocol fixup and +load.
- *
- * Correctness: +load methods are scheduled in bottom-up link order.
- * This constraint is in addition to superclass order. Some +load
- * implementations expect to use another class in a linked-to library,
- * even if the two classes don't share a direct superclass relationship.
- *
- * Correctness: all classes are scanned for +load before any categories
- * are attached. Otherwise, if a category implements +load and its class
- * has no class methods, the class's +load scan would find the category's
- * +load method, which would then be called twice.
- *
- * Correctness: pended class refs are not fixed up until the class is
- * connected. Classes with missing weak superclasses remain unconnected.
- * Class refs to classes with missing weak superclasses must be NULL.
- * Therefore class refs to unconnected classes must remain un-fixed.
- *
- **********************************************************************/
-
-#if !__OBJC2__
-
-#include "objc-private.h"
-#include "objc-runtime-old.h"
-#include "objc-loadmethod.h"
-
-
-typedef struct _objc_unresolved_category
-{
- struct _objc_unresolved_category *next;
- struct old_category *cat; // may be NULL
- long version;
-} _objc_unresolved_category;
-
-typedef struct _PendingSubclass
-{
- struct old_class *subclass; // subclass to finish connecting; may be NULL
- struct _PendingSubclass *next;
-} PendingSubclass;
-
-typedef struct _PendingClassRef
-{
- struct old_class **ref; // class reference to fix up; may be NULL
- // (ref & 1) is a metaclass reference
- struct _PendingClassRef *next;
-} PendingClassRef;
-
-
-static uintptr_t classHash(void *info, Class data);
-static int classIsEqual(void *info, Class name, Class cls);
-static int _objc_defaultClassHandler(const char *clsName);
-static BOOL class_is_connected(struct old_class *cls);
-static inline NXMapTable *pendingClassRefsMapTable(void);
-static inline NXMapTable *pendingSubclassesMapTable(void);
-static void pendClassInstallation(struct old_class *cls, const char *superName);
-static void pendClassReference(struct old_class **ref, const char *className, BOOL isMeta);
-static void resolve_references_to_class(struct old_class *cls);
-static void resolve_subclasses_of_class(struct old_class *cls);
-static void really_connect_class(struct old_class *cls, struct old_class *supercls);
-static BOOL connect_class(struct old_class *cls);
-static void map_method_descs (struct objc_method_description_list * methods, BOOL copy);
-static void _objcTweakMethodListPointerForClass(struct old_class *cls);
-static inline void _objc_add_category(struct old_class *cls, struct old_category *category, int version);
-static BOOL _objc_add_category_flush_caches(struct old_class *cls, struct old_category *category, int version);
-static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat);
-static void resolve_categories_for_class(struct old_class *cls);
-static BOOL _objc_register_category(struct old_category *cat, int version);
-
-
-// Function called when a class is loaded from an image
-void (*callbackFunction)(Class, Category) = 0;
-
-// Hash table of classes
-NXHashTable * class_hash = 0;
-static NXHashTablePrototype classHashPrototype =
-{
- (uintptr_t (*) (const void *, const void *)) classHash,
- (int (*)(const void *, const void *, const void *)) classIsEqual,
- NXNoEffectFree, 0
-};
-
-// Hash table of unconnected classes
-static NXHashTable *unconnected_class_hash = NULL;
-
-// Exported copy of class_hash variable (hook for debugging tools)
-NXHashTable *_objc_debug_class_hash = NULL;
-
-// Category and class registries
-// Keys are COPIES of strings, to prevent stale pointers with unloaded bundles
-// Use NXMapKeyCopyingInsert and NXMapKeyFreeingRemove
-static NXMapTable * category_hash = NULL;
-
-// Keys are COPIES of strings, to prevent stale pointers with unloaded bundles
-// Use NXMapKeyCopyingInsert and NXMapKeyFreeingRemove
-static NXMapTable * pendingClassRefsMap = NULL;
-static NXMapTable * pendingSubclassesMap = NULL;
-
-// Protocols
-static NXMapTable *protocol_map = NULL; // name -> protocol
-static NXMapTable *protocol_ext_map = NULL; // protocol -> protocol ext
-
-// Function pointer objc_getClass calls through when class is not found
-static int (*objc_classHandler) (const char *) = _objc_defaultClassHandler;
-
-// Function pointer called by objc_getClass and objc_lookupClass when
-// class is not found. _objc_classLoader is called before objc_classHandler.
-static BOOL (*_objc_classLoader)(const char *) = NULL;
-
-
-/***********************************************************************
-* objc_dump_class_hash. Log names of all known classes.
-**********************************************************************/
-void objc_dump_class_hash(void)
-{
- NXHashTable *table;
- unsigned count;
- Class data;
- NXHashState state;
-
- table = class_hash;
- count = 0;
- state = NXInitHashState (table);
- while (NXNextHashState (table, &state, (void **) &data))
- printf ("class %d: %s\n", ++count, _class_getName(data));
-}
-
-
-/***********************************************************************
-* _objc_init_class_hash. Return the class lookup table, create it if
-* necessary.
-**********************************************************************/
-void _objc_init_class_hash(void)
-{
- // Do nothing if class hash table already exists
- if (class_hash)
- return;
-
- // class_hash starts small, with only enough capacity for libobjc itself.
- // If a second library is found by map_images(), class_hash is immediately
- // resized to capacity 1024 to cut down on rehashes.
- // Old numbers: A smallish Foundation+AppKit program will have
- // about 520 classes. Larger apps (like IB or WOB) have more like
- // 800 classes. Some customers have massive quantities of classes.
- // Foundation-only programs aren't likely to notice the ~6K loss.
- class_hash = NXCreateHashTableFromZone (classHashPrototype,
- 16,
- nil,
- _objc_internal_zone ());
- _objc_debug_class_hash = class_hash;
-}
-
-
-/***********************************************************************
-* objc_getClassList. Return the known classes.
-**********************************************************************/
-int objc_getClassList(Class *buffer, int bufferLen)
-{
- NXHashState state;
- Class cls;
- int cnt, num;
-
- mutex_lock(&classLock);
- if (!class_hash) {
- mutex_unlock(&classLock);
- return 0;
- }
- num = NXCountHashTable(class_hash);
- if (NULL == buffer) {
- mutex_unlock(&classLock);
- return num;
- }
- cnt = 0;
- state = NXInitHashState(class_hash);
- while (cnt < bufferLen &&
- NXNextHashState(class_hash, &state, (void **)&cls))
- {
- buffer[cnt++] = cls;
- }
- mutex_unlock(&classLock);
- return num;
-}
-
-
-/***********************************************************************
-* objc_copyClassList
-* Returns pointers to all classes.
-* This requires all classes be realized, which is regretfully non-lazy.
-*
-* outCount may be NULL. *outCount is the number of classes returned.
-* If the returned array is not NULL, it is NULL-terminated and must be
-* freed with free().
-* Locking: acquires classLock
-**********************************************************************/
-Class *
-objc_copyClassList(unsigned int *outCount)
-{
- Class *result;
- unsigned int count;
-
- mutex_lock(&classLock);
- result = NULL;
- count = class_hash ? NXCountHashTable(class_hash) : 0;
-
- if (count > 0) {
- Class cls;
- NXHashState state = NXInitHashState(class_hash);
- result = malloc((1+count) * sizeof(Class));
- count = 0;
- while (NXNextHashState(class_hash, &state, (void **)&cls)) {
- result[count++] = cls;
- }
- result[count] = NULL;
- }
- mutex_unlock(&classLock);
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-/***********************************************************************
-* objc_copyProtocolList
-* Returns pointers to all protocols.
-* Locking: acquires classLock
-**********************************************************************/
-Protocol * __unsafe_unretained *
-objc_copyProtocolList(unsigned int *outCount)
-{
- int count, i;
- Protocol *proto;
- const char *name;
- NXMapState state;
- Protocol **result;
-
- mutex_lock(&classLock);
-
- count = NXCountMapTable(protocol_map);
- if (count == 0) {
- mutex_unlock(&classLock);
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- result = calloc(1 + count, sizeof(Protocol *));
-
- i = 0;
- state = NXInitMapState(protocol_map);
- while (NXNextMapState(protocol_map, &state,
- (const void **)&name, (const void **)&proto))
- {
- result[i++] = proto;
- }
-
- result[i++] = NULL;
- assert(i == count+1);
-
- mutex_unlock(&classLock);
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-/***********************************************************************
-* objc_getClasses. Return class lookup table.
-*
-* NOTE: This function is very dangerous, since you cannot safely use
-* the hashtable without locking it, and the lock is private!
-**********************************************************************/
-void *objc_getClasses(void)
-{
- OBJC_WARN_DEPRECATED;
-
- // Return the class lookup hash table
- return class_hash;
-}
-
-
-/***********************************************************************
-* classHash.
-**********************************************************************/
-static uintptr_t classHash(void *info, Class data)
-{
- // Nil classes hash to zero
- if (!data)
- return 0;
-
- // Call through to real hash function
- return _objc_strhash (_class_getName(data));
-}
-
-/***********************************************************************
-* classIsEqual. Returns whether the class names match. If we ever
-* check more than the name, routines like objc_lookUpClass have to
-* change as well.
-**********************************************************************/
-static int classIsEqual(void *info, Class name, Class cls)
-{
- // Standard string comparison
- return strcmp(_class_getName(name), _class_getName(cls)) == 0;
-}
-
-
-// Unresolved future classes
-static NXHashTable *future_class_hash = NULL;
-
-// Resolved future<->original classes
-static NXMapTable *future_class_to_original_class_map = NULL;
-static NXMapTable *original_class_to_future_class_map = NULL;
-
-// CF requests about 20 future classes; HIToolbox requests one.
-#define FUTURE_COUNT 32
-
-
-/***********************************************************************
-* setOriginalClassForFutureClass
-* Record resolution of a future class.
-**********************************************************************/
-static void setOriginalClassForFutureClass(struct old_class *futureClass,
- struct old_class *originalClass)
-{
- if (!future_class_to_original_class_map) {
- future_class_to_original_class_map =
- NXCreateMapTableFromZone (NXPtrValueMapPrototype, FUTURE_COUNT,
- _objc_internal_zone ());
- original_class_to_future_class_map =
- NXCreateMapTableFromZone (NXPtrValueMapPrototype, FUTURE_COUNT,
- _objc_internal_zone ());
- }
-
- NXMapInsert (future_class_to_original_class_map,
- futureClass, originalClass);
- NXMapInsert (original_class_to_future_class_map,
- originalClass, futureClass);
-
- if (PrintFuture) {
- _objc_inform("FUTURE: using %p instead of %p for %s", futureClass, originalClass, originalClass->name);
- }
-}
-
-/***********************************************************************
-* getOriginalClassForFutureClass
-* getFutureClassForOriginalClass
-* Switch between a future class and its corresponding original class.
-* The future class is the one actually in use.
-* The original class is the one from disk.
-**********************************************************************/
-/*
-static struct old_class *
-getOriginalClassForFutureClass(struct old_class *futureClass)
-{
- if (!future_class_to_original_class_map) return Nil;
- return NXMapGet (future_class_to_original_class_map, futureClass);
-}
-*/
-static struct old_class *
-getFutureClassForOriginalClass(struct old_class *originalClass)
-{
- if (!original_class_to_future_class_map) return Nil;
- return NXMapGet (original_class_to_future_class_map, originalClass);
-}
-
-
-/***********************************************************************
-* makeFutureClass
-* Initialize the memory in *cls with an unresolved future class with the
-* given name. The memory is recorded in future_class_hash.
-**********************************************************************/
-static void makeFutureClass(struct old_class *cls, const char *name)
-{
- // CF requests about 20 future classes, plus HIToolbox has one.
- if (!future_class_hash) {
- future_class_hash =
- NXCreateHashTableFromZone(classHashPrototype, FUTURE_COUNT,
- NULL, _objc_internal_zone());
- }
-
- cls->name = _strdup_internal(name);
- NXHashInsert(future_class_hash, cls);
-
- if (PrintFuture) {
- _objc_inform("FUTURE: reserving %p for %s", cls, name);
- }
-}
-
-
-/***********************************************************************
-* _objc_allocateFutureClass
-* Allocate an unresolved future class for the given class name.
-* Returns any existing allocation if one was already made.
-* Assumes the named class doesn't exist yet.
-* Not thread safe.
-**********************************************************************/
-Class _objc_allocateFutureClass(const char *name)
-{
- struct old_class *cls;
-
- if (future_class_hash) {
- struct old_class query;
- query.name = name;
- if ((cls = NXHashGet(future_class_hash, &query))) {
- // Already have a future class for this name.
- return (Class)cls;
- }
- }
-
- cls = (struct old_class *)_calloc_class(sizeof(*cls));
- makeFutureClass(cls, name);
- return (Class)cls;
-}
-
-
-/***********************************************************************
-* objc_setFutureClass.
-* Like objc_getFutureClass, but uses the provided memory block.
-* If the class already exists, a posing-like substitution is performed.
-* Not thread safe.
-**********************************************************************/
-void objc_setFutureClass(Class cls, const char *name)
-{
- struct old_class *oldcls;
- struct old_class *newcls = (struct old_class *)cls; // Not a real class!
-
- if ((oldcls = oldcls((Class)look_up_class(name, NO/*unconnected*/, NO/*classhandler*/)))) {
- setOriginalClassForFutureClass(newcls, oldcls);
- // fixme hack
- memcpy(newcls, oldcls, sizeof(struct objc_class));
- newcls->info &= ~CLS_EXT;
-
- mutex_lock(&classLock);
- NXHashRemove(class_hash, oldcls);
- objc_removeRegisteredClass((Class)oldcls);
- change_class_references(newcls, oldcls, nil, YES);
- NXHashInsert(class_hash, newcls);
- objc_addRegisteredClass((Class)newcls);
- mutex_unlock(&classLock);
- } else {
- makeFutureClass(newcls, name);
- }
-}
-
-
-/***********************************************************************
-* _objc_defaultClassHandler. Default objc_classHandler. Does nothing.
-**********************************************************************/
-static int _objc_defaultClassHandler(const char *clsName)
-{
- // Return zero so objc_getClass doesn't bother re-searching
- return 0;
-}
-
-/***********************************************************************
-* objc_setClassHandler. Set objc_classHandler to the specified value.
-*
-* NOTE: This should probably deal with userSuppliedHandler being NULL,
-* because the objc_classHandler caller does not check... it would bus
-* error. It would make sense to handle NULL by restoring the default
-* handler. Is anyone hacking with this, though?
-**********************************************************************/
-void objc_setClassHandler(int (*userSuppliedHandler)(const char *))
-{
- OBJC_WARN_DEPRECATED;
-
- objc_classHandler = userSuppliedHandler;
-}
-
-
-/***********************************************************************
-* _objc_setClassLoader
-* Similar to objc_setClassHandler, but objc_classLoader is used for
-* both objc_getClass() and objc_lookupClass(), and objc_classLoader
-* pre-empts objc_classHandler.
-**********************************************************************/
-void _objc_setClassLoader(BOOL (*newClassLoader)(const char *))
-{
- _objc_classLoader = newClassLoader;
-}
-
-
-/***********************************************************************
-* objc_getProtocol
-* Get a protocol by name, or NULL.
-**********************************************************************/
-Protocol *objc_getProtocol(const char *name)
-{
- Protocol *result;
- if (!protocol_map) return NULL;
- mutex_lock(&classLock);
- result = (Protocol *)NXMapGet(protocol_map, name);
- mutex_unlock(&classLock);
- return result;
-}
-
-
-/***********************************************************************
-* look_up_class
-* Map a class name to a class using various methods.
-* This is the common implementation of objc_lookUpClass and objc_getClass,
-* and is also used internally to get additional search options.
-* Sequence:
-* 1. class_hash
-* 2. unconnected_class_hash (optional)
-* 3. classLoader callback
-* 4. classHandler callback (optional)
-**********************************************************************/
-id look_up_class(const char *aClassName, BOOL includeUnconnected, BOOL includeClassHandler)
-{
- BOOL includeClassLoader = YES; // class loader cannot be skipped
- id result = nil;
- struct old_class query;
-
- query.name = aClassName;
-
- retry:
-
- if (!result && class_hash) {
- // Check ordinary classes
- mutex_lock (&classLock);
- result = (id)NXHashGet(class_hash, &query);
- mutex_unlock (&classLock);
- }
-
- if (!result && includeUnconnected && unconnected_class_hash) {
- // Check not-yet-connected classes
- mutex_lock(&classLock);
- result = (id)NXHashGet(unconnected_class_hash, &query);
- mutex_unlock(&classLock);
- }
-
- if (!result && includeClassLoader && _objc_classLoader) {
- // Try class loader callback
- if ((*_objc_classLoader)(aClassName)) {
- // Re-try lookup without class loader
- includeClassLoader = NO;
- goto retry;
- }
- }
-
- if (!result && includeClassHandler && objc_classHandler) {
- // Try class handler callback
- if ((*objc_classHandler)(aClassName)) {
- // Re-try lookup without class handler or class loader
- includeClassLoader = NO;
- includeClassHandler = NO;
- goto retry;
- }
- }
-
- return result;
-}
-
-
-/***********************************************************************
-* class_is_connected.
-* Returns TRUE if class cls is connected.
-* A connected class has either a connected superclass or a NULL superclass,
-* and is present in class_hash.
-**********************************************************************/
-static BOOL class_is_connected(struct old_class *cls)
-{
- BOOL result;
- mutex_lock(&classLock);
- result = NXHashMember(class_hash, cls);
- mutex_unlock(&classLock);
- return result;
-}
-
-
-/***********************************************************************
-* _class_isLoadable.
-* Returns TRUE if class cls is ready for its +load method to be called.
-* A class is ready for +load if it is connected.
-**********************************************************************/
-BOOL _class_isLoadable(Class cls)
-{
- return class_is_connected(oldcls(cls));
-}
-
-
-/***********************************************************************
-* pendingClassRefsMapTable. Return a pointer to the lookup table for
-* pending class refs.
-**********************************************************************/
-static inline NXMapTable *pendingClassRefsMapTable(void)
-{
- // Allocate table if needed
- if (!pendingClassRefsMap) {
- pendingClassRefsMap =
- NXCreateMapTableFromZone(NXStrValueMapPrototype,
- 10, _objc_internal_zone ());
- }
-
- // Return table pointer
- return pendingClassRefsMap;
-}
-
-
-/***********************************************************************
-* pendingSubclassesMapTable. Return a pointer to the lookup table for
-* pending subclasses.
-**********************************************************************/
-static inline NXMapTable *pendingSubclassesMapTable(void)
-{
- // Allocate table if needed
- if (!pendingSubclassesMap) {
- pendingSubclassesMap =
- NXCreateMapTableFromZone(NXStrValueMapPrototype,
- 10, _objc_internal_zone ());
- }
-
- // Return table pointer
- return pendingSubclassesMap;
-}
-
-
-/***********************************************************************
-* pendClassInstallation
-* Finish connecting class cls when its superclass becomes connected.
-* Check for multiple pends of the same class because connect_class does not.
-**********************************************************************/
-static void pendClassInstallation(struct old_class *cls, const char *superName)
-{
- NXMapTable *table;
- PendingSubclass *pending;
- PendingSubclass *oldList;
- PendingSubclass *l;
-
- // Create and/or locate pending class lookup table
- table = pendingSubclassesMapTable ();
-
- // Make sure this class isn't already in the pending list.
- oldList = NXMapGet (table, superName);
- for (l = oldList; l != NULL; l = l->next) {
- if (l->subclass == cls) return; // already here, nothing to do
- }
-
- // Create entry referring to this class
- pending = _malloc_internal(sizeof(PendingSubclass));
- pending->subclass = cls;
-
- // Link new entry into head of list of entries for this class
- pending->next = oldList;
-
- // (Re)place entry list in the table
- NXMapKeyCopyingInsert (table, superName, pending);
-}
-
-
-/***********************************************************************
-* pendClassReference
-* Fix up a class ref when the class with the given name becomes connected.
-**********************************************************************/
-static void pendClassReference(struct old_class **ref, const char *className, BOOL isMeta)
-{
- NXMapTable *table;
- PendingClassRef *pending;
-
- // Create and/or locate pending class lookup table
- table = pendingClassRefsMapTable ();
-
- // Create entry containing the class reference
- pending = _malloc_internal(sizeof(PendingClassRef));
- pending->ref = ref;
- if (isMeta) {
- pending->ref = (struct old_class **)((uintptr_t)pending->ref | 1);
- }
-
- // Link new entry into head of list of entries for this class
- pending->next = NXMapGet (table, className);
-
- // (Re)place entry list in the table
- NXMapKeyCopyingInsert (table, className, pending);
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: pended reference to class '%s%s' at %p",
- className, isMeta ? " (meta)" : "", (void *)ref);
- }
-}
-
-
-/***********************************************************************
-* resolve_references_to_class
-* Fix up any pending class refs to this class.
-**********************************************************************/
-static void resolve_references_to_class(struct old_class *cls)
-{
- PendingClassRef *pending;
-
- if (!pendingClassRefsMap) return; // no unresolved refs for any class
-
- pending = NXMapGet(pendingClassRefsMap, cls->name);
- if (!pending) return; // no unresolved refs for this class
-
- NXMapKeyFreeingRemove(pendingClassRefsMap, cls->name);
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: resolving references to class '%s'", cls->name);
- }
-
- while (pending) {
- PendingClassRef *next = pending->next;
- if (pending->ref) {
- BOOL isMeta = ((uintptr_t)pending->ref & 1) ? YES : NO;
- struct old_class **ref =
- (struct old_class **)((uintptr_t)pending->ref & ~(uintptr_t)1);
- *ref = isMeta ? cls->isa : cls;
- }
- _free_internal(pending);
- pending = next;
- }
-
- if (NXCountMapTable(pendingClassRefsMap) == 0) {
- NXFreeMapTable(pendingClassRefsMap);
- pendingClassRefsMap = NULL;
- }
-}
-
-
-/***********************************************************************
-* resolve_subclasses_of_class
-* Fix up any pending subclasses of this class.
-**********************************************************************/
-static void resolve_subclasses_of_class(struct old_class *cls)
-{
- PendingSubclass *pending;
-
- if (!pendingSubclassesMap) return; // no unresolved subclasses
-
- pending = NXMapGet(pendingSubclassesMap, cls->name);
- if (!pending) return; // no unresolved subclasses for this class
-
- NXMapKeyFreeingRemove(pendingSubclassesMap, cls->name);
-
- // Destroy the pending table if it's now empty, to save memory.
- if (NXCountMapTable(pendingSubclassesMap) == 0) {
- NXFreeMapTable(pendingSubclassesMap);
- pendingSubclassesMap = NULL;
- }
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: resolving subclasses of class '%s'", cls->name);
- }
-
- while (pending) {
- PendingSubclass *next = pending->next;
- if (pending->subclass) connect_class(pending->subclass);
- _free_internal(pending);
- pending = next;
- }
-}
-
-
-/***********************************************************************
-* really_connect_class
-* Connect cls to superclass supercls unconditionally.
-* Also adjust the class hash tables and handle pended subclasses.
-*
-* This should be called from connect_class() ONLY.
-**********************************************************************/
-static void really_connect_class(struct old_class *cls,
- struct old_class *supercls)
-{
- struct old_class *oldCls;
-
- // Connect superclass pointers.
- set_superclass(cls, supercls, YES);
-
- // Update GC layouts
- // For paranoia, this is a conservative update:
- // only non-strong -> strong and weak -> strong are corrected.
- if (UseGC && supercls &&
- (cls->info & CLS_EXT) && (supercls->info & CLS_EXT))
- {
- BOOL layoutChanged;
- layout_bitmap ivarBitmap =
- layout_bitmap_create(cls->ivar_layout,
- cls->instance_size,
- cls->instance_size, NO);
-
- layout_bitmap superBitmap =
- layout_bitmap_create(supercls->ivar_layout,
- supercls->instance_size,
- supercls->instance_size, NO);
-
- // non-strong -> strong: bits set in super should be set in sub
- layoutChanged = layout_bitmap_or(ivarBitmap, superBitmap, cls->name);
- layout_bitmap_free(superBitmap);
-
- if (layoutChanged) {
- layout_bitmap weakBitmap = {};
- BOOL weakLayoutChanged = NO;
-
- if (cls->ext && cls->ext->weak_ivar_layout) {
- // weak -> strong: strong bits should be cleared in weak layout
- // This is a subset of non-strong -> strong
- weakBitmap =
- layout_bitmap_create(cls->ext->weak_ivar_layout,
- cls->instance_size,
- cls->instance_size, YES);
-
- weakLayoutChanged =
- layout_bitmap_clear(weakBitmap, ivarBitmap, cls->name);
- } else {
- // no existing weak ivars, so no weak -> strong changes
- }
-
- // Rebuild layout strings.
- if (PrintIvars) {
- _objc_inform("IVARS: gc layout changed "
- "for class %s (super %s)",
- cls->name, supercls->name);
- if (weakLayoutChanged) {
- _objc_inform("IVARS: gc weak layout changed "
- "for class %s (super %s)",
- cls->name, supercls->name);
- }
- }
- cls->ivar_layout = layout_string_create(ivarBitmap);
- if (weakLayoutChanged) {
- cls->ext->weak_ivar_layout = layout_string_create(weakBitmap);
- }
-
- layout_bitmap_free(weakBitmap);
- }
-
- layout_bitmap_free(ivarBitmap);
- }
-
- // Done!
- cls->info |= CLS_CONNECTED;
-
- mutex_lock(&classLock);
-
- // Update hash tables.
- NXHashRemove(unconnected_class_hash, cls);
- oldCls = NXHashInsert(class_hash, cls);
- objc_addRegisteredClass((Class)cls);
-
- // Delete unconnected_class_hash if it is now empty.
- if (NXCountHashTable(unconnected_class_hash) == 0) {
- NXFreeHashTable(unconnected_class_hash);
- unconnected_class_hash = NULL;
- }
-
- // No duplicate classes allowed.
- // Duplicates should have been rejected by _objc_read_classes_from_image.
- assert(!oldCls);
-
- mutex_unlock(&classLock);
-
- // Fix up pended class refs to this class, if any
- resolve_references_to_class(cls);
-
- // Connect newly-connectable subclasses
- resolve_subclasses_of_class(cls);
-
- // GC debugging: make sure all classes with -dealloc also have -finalize
- if (DebugFinalizers) {
- extern IMP findIMPInClass(struct old_class *cls, SEL sel);
- if (findIMPInClass(cls, sel_getUid("dealloc")) &&
- ! findIMPInClass(cls, sel_getUid("finalize")))
- {
- _objc_inform("GC: class '%s' implements -dealloc but not -finalize", cls->name);
- }
- }
-
- // Debugging: if this class has ivars, make sure this class's ivars don't
- // overlap with its super's. This catches some broken fragile base classes.
- // Do not use super->instance_size vs. self->ivar[0] to check this.
- // Ivars may be packed across instance_size boundaries.
- if (DebugFragileSuperclasses && cls->ivars && cls->ivars->ivar_count) {
- struct old_class *ivar_cls = supercls;
-
- // Find closest superclass that has some ivars, if one exists.
- while (ivar_cls &&
- (!ivar_cls->ivars || ivar_cls->ivars->ivar_count == 0))
- {
- ivar_cls = ivar_cls->super_class;
- }
-
- if (ivar_cls) {
- // Compare superclass's last ivar to this class's first ivar
- struct old_ivar *super_ivar =
- &ivar_cls->ivars->ivar_list[ivar_cls->ivars->ivar_count - 1];
- struct old_ivar *self_ivar =
- &cls->ivars->ivar_list[0];
-
- // fixme could be smarter about super's ivar size
- if (self_ivar->ivar_offset <= super_ivar->ivar_offset) {
- _objc_inform("WARNING: ivars of superclass '%s' and "
- "subclass '%s' overlap; superclass may have "
- "changed since subclass was compiled",
- ivar_cls->name, cls->name);
- }
- }
- }
-}
-
-
-/***********************************************************************
-* connect_class
-* Connect class cls to its superclasses, if possible.
-* If cls becomes connected, move it from unconnected_class_hash
-* to connected_class_hash.
-* Returns TRUE if cls is connected.
-* Returns FALSE if cls could not be connected for some reason
-* (missing superclass or still-unconnected superclass)
-**********************************************************************/
-static BOOL connect_class(struct old_class *cls)
-{
- if (class_is_connected(cls)) {
- // This class is already connected to its superclass.
- // Do nothing.
- return TRUE;
- }
- else if (cls->super_class == NULL) {
- // This class is a root class.
- // Connect it to itself.
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: class '%s' now connected (root class)",
- cls->name);
- }
-
- really_connect_class(cls, NULL);
- return TRUE;
- }
- else {
- // This class is not a root class and is not yet connected.
- // Connect it if its superclass and root class are already connected.
- // Otherwise, add this class to the to-be-connected list,
- // pending the completion of its superclass and root class.
-
- // At this point, cls->super_class and cls->isa->isa are still STRINGS
- char *supercls_name = (char *)cls->super_class;
- struct old_class *supercls;
-
- // YES unconnected, YES class handler
- if (NULL == (supercls = oldcls((Class)look_up_class(supercls_name, YES, YES)))) {
- // Superclass does not exist yet.
- // pendClassInstallation will handle duplicate pends of this class
- pendClassInstallation(cls, supercls_name);
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: class '%s' NOT connected (missing super)", cls->name);
- }
- return FALSE;
- }
-
- if (! connect_class(supercls)) {
- // Superclass exists but is not yet connected.
- // pendClassInstallation will handle duplicate pends of this class
- pendClassInstallation(cls, supercls_name);
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: class '%s' NOT connected (unconnected super)", cls->name);
- }
- return FALSE;
- }
-
- // Superclass exists and is connected.
- // Connect this class to the superclass.
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: class '%s' now connected", cls->name);
- }
-
- really_connect_class(cls, supercls);
- return TRUE;
- }
-}
-
-
-/***********************************************************************
-* _objc_read_categories_from_image.
-* Read all categories from the given image.
-* Install them on their parent classes, or register them for later
-* installation.
-* Returns YES if some method caches now need to be flushed.
-**********************************************************************/
-static BOOL _objc_read_categories_from_image (header_info * hi)
-{
- Module mods;
- size_t midx;
- BOOL needFlush = NO;
-
- if (_objcHeaderIsReplacement(hi)) {
- // Ignore any categories in this image
- return NO;
- }
-
-
- // Major loop - process all modules in the header
- mods = hi->mod_ptr;
-
- // NOTE: The module and category lists are traversed backwards
- // to preserve the pre-10.4 processing order. Changing the order
- // would have a small chance of introducing binary compatibility bugs.
- midx = hi->mod_count;
- while (midx-- > 0) {
- unsigned int index;
- unsigned int total;
-
- // Nothing to do for a module without a symbol table
- if (mods[midx].symtab == NULL)
- continue;
-
- // Total entries in symbol table (class entries followed
- // by category entries)
- total = mods[midx].symtab->cls_def_cnt +
- mods[midx].symtab->cat_def_cnt;
-
- // Minor loop - register all categories from given module
- index = total;
- while (index-- > mods[midx].symtab->cls_def_cnt) {
- struct old_category *cat = mods[midx].symtab->defs[index];
- needFlush |= _objc_register_category(cat, (int)mods[midx].version);
- }
- }
-
- return needFlush;
-}
-
-
-/***********************************************************************
-* _objc_read_classes_from_image.
-* Read classes from the given image, perform assorted minor fixups,
-* scan for +load implementation.
-* Does not connect classes to superclasses.
-* Does attach pended categories to the classes.
-* Adds all classes to unconnected_class_hash. class_hash is unchanged.
-**********************************************************************/
-static void _objc_read_classes_from_image(header_info *hi)
-{
- unsigned int index;
- unsigned int midx;
- Module mods;
- int isBundle = headerIsBundle(hi);
-
- if (_objcHeaderIsReplacement(hi)) {
- // Ignore any classes in this image
- return;
- }
-
- // class_hash starts small, enough only for libobjc itself.
- // If other Objective-C libraries are found, immediately resize
- // class_hash, assuming that Foundation and AppKit are about
- // to add lots of classes.
- mutex_lock(&classLock);
- if (hi->mhdr != libobjc_header && _NXHashCapacity(class_hash) < 1024) {
- _NXHashRehashToCapacity(class_hash, 1024);
- }
- mutex_unlock(&classLock);
-
- // Major loop - process all modules in the image
- mods = hi->mod_ptr;
- for (midx = 0; midx < hi->mod_count; midx += 1)
- {
- // Skip module containing no classes
- if (mods[midx].symtab == NULL)
- continue;
-
- // Minor loop - process all the classes in given module
- for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
- {
- struct old_class *newCls, *oldCls;
- BOOL rejected;
-
- // Locate the class description pointer
- newCls = mods[midx].symtab->defs[index];
-
- // Classes loaded from Mach-O bundles can be unloaded later.
- // Nothing uses this class yet, so _class_setInfo is not needed.
- if (isBundle) newCls->info |= CLS_FROM_BUNDLE;
- if (isBundle) newCls->isa->info |= CLS_FROM_BUNDLE;
-
- // Use common static empty cache instead of NULL
- if (newCls->cache == NULL)
- newCls->cache = (Cache) &_objc_empty_cache;
- if (newCls->isa->cache == NULL)
- newCls->isa->cache = (Cache) &_objc_empty_cache;
-
- // Set metaclass version
- newCls->isa->version = mods[midx].version;
-
- // methodLists is NULL or a single list, not an array
- newCls->info |= CLS_NO_METHOD_ARRAY|CLS_NO_PROPERTY_ARRAY;
- newCls->isa->info |= CLS_NO_METHOD_ARRAY|CLS_NO_PROPERTY_ARRAY;
-
- // class has no subclasses for cache flushing
- newCls->info |= CLS_LEAF;
- newCls->isa->info |= CLS_LEAF;
-
- if (mods[midx].version >= 6) {
- // class structure has ivar_layout and ext fields
- newCls->info |= CLS_EXT;
- newCls->isa->info |= CLS_EXT;
- }
-
- // Check for +load implementation before categories are attached
- if (_class_hasLoadMethod((Class)newCls)) {
- newCls->isa->info |= CLS_HAS_LOAD_METHOD;
- }
-
- // Install into unconnected_class_hash.
- mutex_lock(&classLock);
-
- if (future_class_hash) {
- struct old_class *futureCls =
- NXHashRemove(future_class_hash, newCls);
- if (futureCls) {
- // Another class structure for this class was already
- // prepared by objc_getFutureClass(). Use it instead.
- _free_internal((char *)futureCls->name);
- memcpy(futureCls, newCls, sizeof(*newCls));
- setOriginalClassForFutureClass(futureCls, newCls);
- newCls = futureCls;
-
- if (NXCountHashTable(future_class_hash) == 0) {
- NXFreeHashTable(future_class_hash);
- future_class_hash = NULL;
- }
- }
- }
-
- if (!unconnected_class_hash) {
- unconnected_class_hash =
- NXCreateHashTableFromZone(classHashPrototype, 128,
- NULL, _objc_internal_zone());
- }
-
- if ((oldCls = NXHashGet(class_hash, newCls)) ||
- (oldCls = NXHashGet(unconnected_class_hash, newCls)))
- {
- // Another class with this name exists. Complain and reject.
- inform_duplicate(newCls->name, (Class)oldCls, (Class)newCls);
- rejected = YES;
- }
- else {
- NXHashInsert(unconnected_class_hash, newCls);
- rejected = NO;
- }
-
- mutex_unlock(&classLock);
-
- if (!rejected) {
- // Attach pended categories for this class, if any
- resolve_categories_for_class(newCls);
- }
- }
- }
-}
-
-
-/***********************************************************************
-* _objc_connect_classes_from_image.
-* Connect the classes in the given image to their superclasses,
-* or register them for later connection if any superclasses are missing.
-**********************************************************************/
-static void _objc_connect_classes_from_image(header_info *hi)
-{
- unsigned int index;
- unsigned int midx;
- Module mods;
- BOOL replacement = _objcHeaderIsReplacement(hi);
-
- // Major loop - process all modules in the image
- mods = hi->mod_ptr;
- for (midx = 0; midx < hi->mod_count; midx += 1)
- {
- // Skip module containing no classes
- if (mods[midx].symtab == NULL)
- continue;
-
- // Minor loop - process all the classes in given module
- for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
- {
- struct old_class *cls = mods[midx].symtab->defs[index];
- if (! replacement) {
- BOOL connected;
- struct old_class *futureCls = getFutureClassForOriginalClass(cls);
- if (futureCls) {
- // objc_getFutureClass() requested a different class
- // struct. Fix up the original struct's super_class
- // field for [super ...] use, but otherwise perform
- // fixups on the new class struct only.
- const char *super_name = (const char *) cls->super_class;
- if (super_name) cls->super_class = oldcls((Class)objc_getClass(super_name));
- cls = futureCls;
- }
- connected = connect_class(cls);
- if (connected && callbackFunction) {
- (*callbackFunction)((Class)cls, 0);
- }
- } else {
- // Replacement image - fix up super_class only (#3704817)
- // And metaclass's super_class (#5351107)
- const char *super_name = (const char *) cls->super_class;
- if (super_name) {
- cls->super_class = oldcls((Class)objc_getClass(super_name));
- // metaclass's superclass is superclass's metaclass
- cls->isa->super_class = cls->super_class->isa;
- } else {
- // Replacement for a root class
- // cls->super_class already NULL
- // root metaclass's superclass is root class
- cls->isa->super_class = cls;
- }
- }
- }
- }
-}
-
-
-/***********************************************************************
-* _objc_map_class_refs_for_image. Convert the class ref entries from
-* a class name string pointer to a class pointer. If the class does
-* not yet exist, the reference is added to a list of pending references
-* to be fixed up at a later date.
-**********************************************************************/
-static void fix_class_ref(struct old_class **ref, const char *name, BOOL isMeta)
-{
- struct old_class *cls;
-
- // Get pointer to class of this name
- // NO unconnected, YES class loader
- // (real class with weak-missing superclass is unconnected now)
- cls = oldcls((Class)look_up_class(name, NO, YES));
- if (cls) {
- // Referenced class exists. Fix up the reference.
- *ref = isMeta ? cls->isa : cls;
- } else {
- // Referenced class does not exist yet. Insert NULL for now
- // (weak-linking) and fix up the reference if the class arrives later.
- pendClassReference (ref, name, isMeta);
- *ref = NULL;
- }
-}
-
-static void _objc_map_class_refs_for_image (header_info * hi)
-{
- struct old_class **cls_refs;
- size_t count;
- unsigned int index;
-
- // Locate class refs in image
- cls_refs = _getObjcClassRefs (hi, &count);
- if (cls_refs) {
- // Process each class ref
- for (index = 0; index < count; index += 1) {
- // Ref is initially class name char*
- const char *name = (const char *) cls_refs[index];
- if (!name) continue;
- fix_class_ref(&cls_refs[index], name, NO /*never meta*/);
- }
- }
-}
-
-
-/***********************************************************************
-* _objc_remove_pending_class_refs_in_image
-* Delete any pending class ref fixups for class refs in the given image,
-* because the image is about to be unloaded.
-**********************************************************************/
-static void removePendingReferences(struct old_class **refs, size_t count)
-{
- struct old_class **end = refs + count;
-
- if (!refs) return;
- if (!pendingClassRefsMap) return;
-
- // Search the pending class ref table for class refs in this range.
- // The class refs may have already been stomped with NULL,
- // so there's no way to recover the original class name.
-
- {
- const char *key;
- PendingClassRef *pending;
- NXMapState state = NXInitMapState(pendingClassRefsMap);
- while(NXNextMapState(pendingClassRefsMap, &state,
- (const void **)&key, (const void **)&pending))
- {
- for ( ; pending != NULL; pending = pending->next) {
- if (pending->ref >= refs && pending->ref < end) {
- pending->ref = NULL;
- }
- }
- }
- }
-}
-
-static void _objc_remove_pending_class_refs_in_image(header_info *hi)
-{
- struct old_class **cls_refs;
- size_t count;
-
- // Locate class refs in this image
- cls_refs = _getObjcClassRefs(hi, &count);
- removePendingReferences(cls_refs, count);
-}
-
-
-/***********************************************************************
-* map_selrefs. For each selector in the specified array,
-* replace the name pointer with a uniqued selector.
-* If copy is TRUE, all selector data is always copied. This is used
-* for registering selectors from unloadable bundles, so the selector
-* can still be used after the bundle's data segment is unmapped.
-* Returns YES if dst was written to, NO if it was unchanged.
-**********************************************************************/
-static inline void map_selrefs(SEL *sels, size_t count, BOOL copy)
-{
- size_t index;
-
- if (!sels) return;
-
- sel_lock();
-
- // Process each selector
- for (index = 0; index < count; index += 1)
- {
- SEL sel;
-
- // Lookup pointer to uniqued string
- sel = sel_registerNameNoLock((const char *) sels[index], copy);
-
- // Replace this selector with uniqued one (avoid
- // modifying the VM page if this would be a NOP)
- if (sels[index] != sel) {
- sels[index] = sel;
- }
- }
-
- sel_unlock();
-}
-
-
-/***********************************************************************
-* map_method_descs. For each method in the specified method list,
-* replace the name pointer with a uniqued selector.
-* If copy is TRUE, all selector data is always copied. This is used
-* for registering selectors from unloadable bundles, so the selector
-* can still be used after the bundle's data segment is unmapped.
-**********************************************************************/
-static void map_method_descs (struct objc_method_description_list * methods, BOOL copy)
-{
- int index;
-
- if (!methods) return;
-
- sel_lock();
-
- // Process each method
- for (index = 0; index < methods->count; index += 1)
- {
- struct objc_method_description * method;
- SEL sel;
-
- // Get method entry to fix up
- method = &methods->list[index];
-
- // Lookup pointer to uniqued string
- sel = sel_registerNameNoLock((const char *) method->name, copy);
-
- // Replace this selector with uniqued one (avoid
- // modifying the VM page if this would be a NOP)
- if (method->name != sel)
- method->name = sel;
- }
-
- sel_unlock();
-}
-
-
-/***********************************************************************
-* ext_for_protocol
-* Returns the protocol extension for the given protocol.
-* Returns NULL if the protocol has no extension.
-**********************************************************************/
-static struct old_protocol_ext *ext_for_protocol(struct old_protocol *proto)
-{
- if (!proto) return NULL;
- if (!protocol_ext_map) return NULL;
- else return (struct old_protocol_ext *)NXMapGet(protocol_ext_map, proto);
-}
-
-
-/***********************************************************************
-* lookup_method
-* Search a protocol method list for a selector.
-**********************************************************************/
-static struct objc_method_description *
-lookup_method(struct objc_method_description_list *mlist, SEL aSel)
-{
- if (mlist) {
- int i;
- for (i = 0; i < mlist->count; i++) {
- if (mlist->list[i].name == aSel) {
- return mlist->list+i;
- }
- }
- }
- return NULL;
-}
-
-
-/***********************************************************************
-* lookup_protocol_method
-* Search for a selector in a protocol
-* (and optionally recursively all incorporated protocols)
-**********************************************************************/
-struct objc_method_description *
-lookup_protocol_method(struct old_protocol *proto, SEL aSel,
- BOOL isRequiredMethod, BOOL isInstanceMethod,
- BOOL recursive)
-{
- struct objc_method_description *m = NULL;
- struct old_protocol_ext *ext;
-
- if (isRequiredMethod) {
- if (isInstanceMethod) {
- m = lookup_method(proto->instance_methods, aSel);
- } else {
- m = lookup_method(proto->class_methods, aSel);
- }
- } else if ((ext = ext_for_protocol(proto))) {
- if (isInstanceMethod) {
- m = lookup_method(ext->optional_instance_methods, aSel);
- } else {
- m = lookup_method(ext->optional_class_methods, aSel);
- }
- }
-
- if (!m && recursive && proto->protocol_list) {
- int i;
- for (i = 0; !m && i < proto->protocol_list->count; i++) {
- m = lookup_protocol_method(proto->protocol_list->list[i], aSel,
- isRequiredMethod,isInstanceMethod,true);
- }
- }
-
- return m;
-}
-
-
-/***********************************************************************
-* protocol_getName
-* Returns the name of the given protocol.
-**********************************************************************/
-const char *protocol_getName(Protocol *p)
-{
- struct old_protocol *proto = oldprotocol(p);
- if (!proto) return "nil";
- return proto->protocol_name;
-}
-
-
-/***********************************************************************
-* protocol_getMethodDescription
-* Returns the description of a named method.
-* Searches either required or optional methods.
-* Searches either instance or class methods.
-**********************************************************************/
-struct objc_method_description
-protocol_getMethodDescription(Protocol *p, SEL aSel,
- BOOL isRequiredMethod, BOOL isInstanceMethod)
-{
- struct objc_method_description empty = {NULL, NULL};
- struct old_protocol *proto = oldprotocol(p);
- struct objc_method_description *desc;
- if (!proto) return empty;
-
- desc = lookup_protocol_method(proto, aSel,
- isRequiredMethod, isInstanceMethod, true);
- if (desc) return *desc;
- else return empty;
-}
-
-
-/***********************************************************************
-* protocol_copyMethodDescriptionList
-* Returns an array of method descriptions from a protocol.
-* Copies either required or optional methods.
-* Copies either instance or class methods.
-**********************************************************************/
-struct objc_method_description *
-protocol_copyMethodDescriptionList(Protocol *p,
- BOOL isRequiredMethod,
- BOOL isInstanceMethod,
- unsigned int *outCount)
-{
- struct objc_method_description_list *mlist = NULL;
- struct old_protocol *proto = oldprotocol(p);
- struct old_protocol_ext *ext;
- unsigned int i, count;
- struct objc_method_description *result;
-
- if (!proto) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- if (isRequiredMethod) {
- if (isInstanceMethod) {
- mlist = proto->instance_methods;
- } else {
- mlist = proto->class_methods;
- }
- } else if ((ext = ext_for_protocol(proto))) {
- if (isInstanceMethod) {
- mlist = ext->optional_instance_methods;
- } else {
- mlist = ext->optional_class_methods;
- }
- }
-
- if (!mlist) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- count = mlist->count;
- result =
- calloc(count + 1, sizeof(struct objc_method_description));
- for (i = 0; i < count; i++) {
- result[i] = mlist->list[i];
- }
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-objc_property_t protocol_getProperty(Protocol *p, const char *name,
- BOOL isRequiredProperty, BOOL isInstanceProperty)
-{
- struct old_protocol *proto = oldprotocol(p);
- struct old_protocol_ext *ext;
- struct old_protocol_list *proto_list;
-
- if (!proto || !name) return NULL;
-
- if (!isRequiredProperty || !isInstanceProperty) {
- // Only required instance properties are currently supported
- return NULL;
- }
-
- if ((ext = ext_for_protocol(proto))) {
- struct old_property_list *plist;
- if ((plist = ext->instance_properties)) {
- uint32_t i;
- for (i = 0; i < plist->count; i++) {
- struct old_property *prop = property_list_nth(plist, i);
- if (0 == strcmp(name, prop->name)) {
- return (objc_property_t)prop;
- }
- }
- }
- }
-
- if ((proto_list = proto->protocol_list)) {
- int i;
- for (i = 0; i < proto_list->count; i++) {
- objc_property_t prop =
- protocol_getProperty((Protocol *)proto_list->list[i], name,
- isRequiredProperty, isInstanceProperty);
- if (prop) return prop;
- }
- }
-
- return NULL;
-}
-
-
-objc_property_t *protocol_copyPropertyList(Protocol *p, unsigned int *outCount)
-{
- struct old_property **result = NULL;
- struct old_protocol_ext *ext;
- struct old_property_list *plist;
-
- struct old_protocol *proto = oldprotocol(p);
- if (! (ext = ext_for_protocol(proto))) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- plist = ext->instance_properties;
- result = copyPropertyList(plist, outCount);
-
- return (objc_property_t *)result;
-}
-
-
-/***********************************************************************
-* protocol_copyProtocolList
-* Copies this protocol's incorporated protocols.
-* Does not copy those protocol's incorporated protocols in turn.
-**********************************************************************/
-Protocol * __unsafe_unretained *
-protocol_copyProtocolList(Protocol *p, unsigned int *outCount)
-{
- unsigned int count = 0;
- Protocol **result = NULL;
- struct old_protocol *proto = oldprotocol(p);
-
- if (!proto) {
- if (outCount) *outCount = 0;
- return NULL;
- }
-
- if (proto->protocol_list) {
- count = (unsigned int)proto->protocol_list->count;
- }
- if (count > 0) {
- unsigned int i;
- result = malloc((count+1) * sizeof(Protocol *));
-
- for (i = 0; i < count; i++) {
- result[i] = (Protocol *)proto->protocol_list->list[i];
- }
- result[i] = NULL;
- }
-
- if (outCount) *outCount = count;
- return result;
-}
-
-
-BOOL protocol_conformsToProtocol(Protocol *self_gen, Protocol *other_gen)
-{
- struct old_protocol *self = oldprotocol(self_gen);
- struct old_protocol *other = oldprotocol(other_gen);
-
- if (!self || !other) {
- return NO;
- }
-
- if (0 == strcmp(self->protocol_name, other->protocol_name)) {
- return YES;
- }
-
- if (self->protocol_list) {
- int i;
- for (i = 0; i < self->protocol_list->count; i++) {
- struct old_protocol *proto = self->protocol_list->list[i];
- if (0 == strcmp(other->protocol_name, proto->protocol_name)) {
- return YES;
- }
- if (protocol_conformsToProtocol((Protocol *)proto, other_gen)) {
- return YES;
- }
- }
- }
-
- return NO;
-}
-
-
-BOOL protocol_isEqual(Protocol *self, Protocol *other)
-{
- if (self == other) return YES;
- if (!self || !other) return NO;
-
- if (!protocol_conformsToProtocol(self, other)) return NO;
- if (!protocol_conformsToProtocol(other, self)) return NO;
-
- return YES;
-}
-
-
-/***********************************************************************
-* _protocol_getMethodTypeEncoding
-* Return the @encode string for the requested protocol method.
-* Returns NULL if the compiler did not emit any extended @encode data.
-* Locking: runtimeLock must not be held by the caller
-**********************************************************************/
-const char *
-_protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel,
- BOOL isRequiredMethod, BOOL isInstanceMethod)
-{
- struct old_protocol *proto = oldprotocol(proto_gen);
- if (!proto) return NULL;
- struct old_protocol_ext *ext = ext_for_protocol(proto);
- if (!ext) return NULL;
- if (ext->size < offsetof(struct old_protocol_ext, extendedMethodTypes) + sizeof(ext->extendedMethodTypes)) return NULL;
- if (! ext->extendedMethodTypes) return NULL;
-
- struct objc_method_description *m =
- lookup_protocol_method(proto, sel,
- isRequiredMethod, isInstanceMethod, false);
- if (!m) {
- // No method with that name. Search incorporated protocols.
- if (proto->protocol_list) {
- for (int i = 0; i < proto->protocol_list->count; i++) {
- const char *enc =
- _protocol_getMethodTypeEncoding((Protocol *)proto->protocol_list->list[i], sel, isRequiredMethod, isInstanceMethod);
- if (enc) return enc;
- }
- }
- return NULL;
- }
-
- int i = 0;
- if (isRequiredMethod && isInstanceMethod) {
- i += ((uintptr_t)m - (uintptr_t)proto->instance_methods) / sizeof(proto->instance_methods->list[0]);
- goto done;
- } else if (proto->instance_methods) {
- i += proto->instance_methods->count;
- }
-
- if (isRequiredMethod && !isInstanceMethod) {
- i += ((uintptr_t)m - (uintptr_t)proto->class_methods) / sizeof(proto->class_methods->list[0]);
- goto done;
- } else if (proto->class_methods) {
- i += proto->class_methods->count;
- }
-
- if (!isRequiredMethod && isInstanceMethod) {
- i += ((uintptr_t)m - (uintptr_t)ext->optional_instance_methods) / sizeof(ext->optional_instance_methods->list[0]);
- goto done;
- } else if (ext->optional_instance_methods) {
- i += ext->optional_instance_methods->count;
- }
-
- if (!isRequiredMethod && !isInstanceMethod) {
- i += ((uintptr_t)m - (uintptr_t)ext->optional_class_methods) / sizeof(ext->optional_class_methods->list[0]);
- goto done;
- } else if (ext->optional_class_methods) {
- i += ext->optional_class_methods->count;
- }
-
- done:
- return ext->extendedMethodTypes[i];
-}
-
-
-/***********************************************************************
-* objc_allocateProtocol
-* Creates a new protocol. The protocol may not be used until
-* objc_registerProtocol() is called.
-* Returns NULL if a protocol with the same name already exists.
-* Locking: acquires classLock
-**********************************************************************/
-Protocol *
-objc_allocateProtocol(const char *name)
-{
- Class cls = (Class)objc_getClass("__IncompleteProtocol");
-
- mutex_lock(&classLock);
-
- if (NXMapGet(protocol_map, name)) {
- mutex_unlock(&classLock);
- return NULL;
- }
-
- struct old_protocol *result = (struct old_protocol *)
- _calloc_internal(1, sizeof(struct old_protocol)
- + sizeof(struct old_protocol_ext));
- struct old_protocol_ext *ext = (struct old_protocol_ext *)(result+1);
-
- result->isa = cls;
- result->protocol_name = _strdup_internal(name);
- ext->size = sizeof(*ext);
-
- // fixme reserve name without installing
-
- NXMapInsert(protocol_ext_map, result, result+1);
-
- mutex_unlock(&classLock);
-
- return (Protocol *)result;
-}
-
-
-/***********************************************************************
-* objc_registerProtocol
-* Registers a newly-constructed protocol. The protocol is now
-* ready for use and immutable.
-* Locking: acquires classLock
-**********************************************************************/
-void objc_registerProtocol(Protocol *proto_gen)
-{
- struct old_protocol *proto = oldprotocol(proto_gen);
-
- Class oldcls = (Class)objc_getClass("__IncompleteProtocol");
- Class cls = (Class)objc_getClass("Protocol");
-
- mutex_lock(&classLock);
-
- if (proto->isa == cls) {
- _objc_inform("objc_registerProtocol: protocol '%s' was already "
- "registered!", proto->protocol_name);
- mutex_unlock(&classLock);
- return;
- }
- if (proto->isa != oldcls) {
- _objc_inform("objc_registerProtocol: protocol '%s' was not allocated "
- "with objc_allocateProtocol!", proto->protocol_name);
- mutex_unlock(&classLock);
- return;
- }
-
- proto->isa = cls;
-
- NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto);
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* protocol_addProtocol
-* Adds an incorporated protocol to another protocol.
-* No method enforcement is performed.
-* `proto` must be under construction. `addition` must not.
-* Locking: acquires classLock
-**********************************************************************/
-void
-protocol_addProtocol(Protocol *proto_gen, Protocol *addition_gen)
-{
- struct old_protocol *proto = oldprotocol(proto_gen);
- struct old_protocol *addition = oldprotocol(addition_gen);
-
- Class cls = (Class)objc_getClass("__IncompleteProtocol");
-
- if (!proto_gen) return;
- if (!addition_gen) return;
-
- mutex_lock(&classLock);
-
- if (proto->isa != cls) {
- _objc_inform("protocol_addProtocol: modified protocol '%s' is not "
- "under construction!", proto->protocol_name);
- mutex_unlock(&classLock);
- return;
- }
- if (addition->isa == cls) {
- _objc_inform("protocol_addProtocol: added protocol '%s' is still "
- "under construction!", addition->protocol_name);
- mutex_unlock(&classLock);
- return;
- }
-
- struct old_protocol_list *protolist = proto->protocol_list;
- if (protolist) {
- size_t size = sizeof(*protolist)
- + protolist->count * sizeof(protolist->list[0]);
- protolist = (struct old_protocol_list *)
- _realloc_internal(protolist, size);
- } else {
- protolist = (struct old_protocol_list *)
- _calloc_internal(1, sizeof(struct old_protocol_list));
- }
-
- protolist->list[protolist->count++] = addition;
- proto->protocol_list = protolist;
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* protocol_addMethodDescription
-* Adds a method to a protocol. The protocol must be under construction.
-* Locking: acquires classLock
-**********************************************************************/
-static void
-_protocol_addMethod(struct objc_method_description_list **list, SEL name, const char *types)
-{
- if (!*list) {
- *list = (struct objc_method_description_list *)
- _calloc_internal(sizeof(struct objc_method_description_list), 1);
- } else {
- size_t size = sizeof(struct objc_method_description_list)
- + (*list)->count * sizeof(struct objc_method_description);
- *list = (struct objc_method_description_list *)
- _realloc_internal(*list, size);
- }
-
- struct objc_method_description *desc = &(*list)->list[(*list)->count++];
- desc->name = name;
- desc->types = _strdup_internal(types ?: "");
-}
-
-void
-protocol_addMethodDescription(Protocol *proto_gen, SEL name, const char *types,
- BOOL isRequiredMethod, BOOL isInstanceMethod)
-{
- struct old_protocol *proto = oldprotocol(proto_gen);
-
- Class cls = (Class)objc_getClass("__IncompleteProtocol");
-
- if (!proto_gen) return;
-
- mutex_lock(&classLock);
-
- if (proto->isa != cls) {
- _objc_inform("protocol_addMethodDescription: protocol '%s' is not "
- "under construction!", proto->protocol_name);
- mutex_unlock(&classLock);
- return;
- }
-
- if (isRequiredMethod && isInstanceMethod) {
- _protocol_addMethod(&proto->instance_methods, name, types);
- } else if (isRequiredMethod && !isInstanceMethod) {
- _protocol_addMethod(&proto->class_methods, name, types);
- } else if (!isRequiredMethod && isInstanceMethod) {
- struct old_protocol_ext *ext = (struct old_protocol_ext *)(proto+1);
- _protocol_addMethod(&ext->optional_instance_methods, name, types);
- } else /* !isRequiredMethod && !isInstanceMethod) */ {
- struct old_protocol_ext *ext = (struct old_protocol_ext *)(proto+1);
- _protocol_addMethod(&ext->optional_class_methods, name, types);
- }
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* protocol_addProperty
-* Adds a property to a protocol. The protocol must be under construction.
-* Locking: acquires classLock
-**********************************************************************/
-static void
-_protocol_addProperty(struct old_property_list **plist, const char *name,
- const objc_property_attribute_t *attrs,
- unsigned int count)
-{
- if (!*plist) {
- *plist = (struct old_property_list *)
- _calloc_internal(sizeof(struct old_property_list), 1);
- (*plist)->entsize = sizeof(struct old_property);
- } else {
- *plist = (struct old_property_list *)
- _realloc_internal(*plist, sizeof(struct old_property_list)
- + (*plist)->count * (*plist)->entsize);
- }
-
- struct old_property *prop = property_list_nth(*plist, (*plist)->count++);
- prop->name = _strdup_internal(name);
- prop->attributes = copyPropertyAttributeString(attrs, count);
-}
-
-void
-protocol_addProperty(Protocol *proto_gen, const char *name,
- const objc_property_attribute_t *attrs,
- unsigned int count,
- BOOL isRequiredProperty, BOOL isInstanceProperty)
-{
- struct old_protocol *proto = oldprotocol(proto_gen);
-
- Class cls = (Class)objc_getClass("__IncompleteProtocol");
-
- if (!proto) return;
- if (!name) return;
-
- mutex_lock(&classLock);
-
- if (proto->isa != cls) {
- _objc_inform("protocol_addProperty: protocol '%s' is not "
- "under construction!", proto->protocol_name);
- mutex_unlock(&classLock);
- return;
- }
-
- struct old_protocol_ext *ext = ext_for_protocol(proto);
-
- if (isRequiredProperty && isInstanceProperty) {
- _protocol_addProperty(&ext->instance_properties, name, attrs, count);
- }
- //else if (isRequiredProperty && !isInstanceProperty) {
- // _protocol_addProperty(&ext->class_properties, name, attrs, count);
- //} else if (!isRequiredProperty && isInstanceProperty) {
- // _protocol_addProperty(&ext->optional_instance_properties, name, attrs, count);
- //} else /* !isRequiredProperty && !isInstanceProperty) */ {
- // _protocol_addProperty(&ext->optional_class_properties, name, attrs, count);
- //}
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* _objc_fixup_protocol_objects_for_image. For each protocol in the
-* specified image, selectorize the method names and add to the protocol hash.
-**********************************************************************/
-
-static BOOL versionIsExt(uintptr_t version, const char *names, size_t size)
-{
- // CodeWarrior used isa field for string "Protocol"
- // from section __OBJC,__class_names. rdar://4951638
- // gcc (10.4 and earlier) used isa field for version number;
- // the only version number used on Mac OS X was 2.
- // gcc (10.5 and later) uses isa field for ext pointer
-
- if (version < 4096) {
- return NO;
- }
-
- if (version >= (uintptr_t)names && version < (uintptr_t)(names + size)) {
- return NO;
- }
-
- return YES;
-}
-
-static void fix_protocol(struct old_protocol *proto, Class protocolClass,
- BOOL isBundle, const char *names, size_t names_size)
-{
- uintptr_t version;
- if (!proto) return;
-
- version = (uintptr_t)proto->isa;
-
- // Set the protocol's isa
- proto->isa = protocolClass;
-
- // Fix up method lists
- // fixme share across duplicates
- map_method_descs (proto->instance_methods, isBundle);
- map_method_descs (proto->class_methods, isBundle);
-
- // Fix up ext, if any
- if (versionIsExt(version, names, names_size)) {
- struct old_protocol_ext *ext = (struct old_protocol_ext *)version;
- NXMapInsert(protocol_ext_map, proto, ext);
- map_method_descs (ext->optional_instance_methods, isBundle);
- map_method_descs (ext->optional_class_methods, isBundle);
- }
-
- // Record the protocol it if we don't have one with this name yet
- // fixme bundles - copy protocol
- // fixme unloading
- if (!NXMapGet(protocol_map, proto->protocol_name)) {
- NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto);
- if (PrintProtocols) {
- _objc_inform("PROTOCOLS: protocol at %p is %s",
- proto, proto->protocol_name);
- }
- } else {
- // duplicate - do nothing
- if (PrintProtocols) {
- _objc_inform("PROTOCOLS: protocol at %p is %s (duplicate)",
- proto, proto->protocol_name);
- }
- }
-}
-
-static void _objc_fixup_protocol_objects_for_image (header_info * hi)
-{
- Class protocolClass = (Class)objc_getClass("Protocol");
- size_t count, i;
- struct old_protocol **protos;
- int isBundle = headerIsBundle(hi);
- const char *names;
- size_t names_size;
-
- mutex_lock(&classLock);
-
- // Allocate the protocol registry if necessary.
- if (!protocol_map) {
- protocol_map =
- NXCreateMapTableFromZone(NXStrValueMapPrototype, 32,
- _objc_internal_zone());
- }
- if (!protocol_ext_map) {
- protocol_ext_map =
- NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32,
- _objc_internal_zone());
- }
-
- protos = _getObjcProtocols(hi, &count);
- names = _getObjcClassNames(hi, &names_size);
- for (i = 0; i < count; i++) {
- fix_protocol(protos[i], protocolClass, isBundle, names, names_size);
- }
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* _objc_fixup_selector_refs. Register all of the selectors in each
-* image, and fix them all up.
-**********************************************************************/
-static void _objc_fixup_selector_refs (const header_info *hi)
-{
- size_t count;
- SEL *sels;
-
- if (PrintPreopt) {
- if (sel_preoptimizationValid(hi)) {
- _objc_inform("PREOPTIMIZATION: honoring preoptimized selectors in %s",
- hi->fname);
- }
- else if (_objcHeaderOptimizedByDyld(hi)) {
- _objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors in %s",
- hi->fname);
- }
- }
-
- if (sel_preoptimizationValid(hi)) return;
-
- sels = _getObjcSelectorRefs (hi, &count);
-
- map_selrefs(sels, count, headerIsBundle(hi));
-}
-
-static inline BOOL _is_threaded() {
-#if TARGET_OS_WIN32
- return YES;
-#else
- return pthread_is_threaded_np() != 0;
-#endif
-}
-
-#if !TARGET_OS_WIN32
-/***********************************************************************
-* unmap_image
-* Process the given image which is about to be unmapped by dyld.
-* mh is mach_header instead of headerType because that's what
-* dyld_priv.h says even for 64-bit.
-**********************************************************************/
-void
-unmap_image(const struct mach_header *mh, intptr_t vmaddr_slide)
-{
- recursive_mutex_lock(&loadMethodLock);
- unmap_image_nolock(mh);
- recursive_mutex_unlock(&loadMethodLock);
-}
-
-
-/***********************************************************************
-* map_images
-* Process the given images which are being mapped in by dyld.
-* Calls ABI-agnostic code after taking ABI-specific locks.
-**********************************************************************/
-const char *
-map_images(enum dyld_image_states state, uint32_t infoCount,
- const struct dyld_image_info infoList[])
-{
- const char *err;
-
- recursive_mutex_lock(&loadMethodLock);
- err = map_images_nolock(state, infoCount, infoList);
- recursive_mutex_unlock(&loadMethodLock);
-
- return err;
-}
-
-
-/***********************************************************************
-* load_images
-* Process +load in the given images which are being mapped in by dyld.
-* Calls ABI-agnostic code after taking ABI-specific locks.
-*
-* Locking: acquires classLock and loadMethodLock
-**********************************************************************/
-const char *
-load_images(enum dyld_image_states state, uint32_t infoCount,
- const struct dyld_image_info infoList[])
-{
- BOOL found;
-
- recursive_mutex_lock(&loadMethodLock);
-
- // Discover +load methods
- found = load_images_nolock(state, infoCount, infoList);
-
- // Call +load methods (without classLock - re-entrant)
- if (found) {
- call_load_methods();
- }
-
- recursive_mutex_unlock(&loadMethodLock);
-
- return NULL;
-}
-#endif
-
-
-/***********************************************************************
-* _read_images
-* Perform metadata processing for hCount images starting with firstNewHeader
-**********************************************************************/
-void _read_images(header_info **hList, uint32_t hCount)
-{
- uint32_t i;
- BOOL categoriesLoaded = NO;
-
- if (!class_hash) _objc_init_class_hash();
-
- // Parts of this order are important for correctness or performance.
-
- // Read classes from all images.
- for (i = 0; i < hCount; i++) {
- _objc_read_classes_from_image(hList[i]);
- }
-
- // Read categories from all images.
- // But not if any other threads are running - they might
- // call a category method before the fixups below are complete.
- if (!_is_threaded()) {
- BOOL needFlush = NO;
- for (i = 0; i < hCount; i++) {
- needFlush |= _objc_read_categories_from_image(hList[i]);
- }
- if (needFlush) flush_marked_caches();
- categoriesLoaded = YES;
- }
-
- // Connect classes from all images.
- for (i = 0; i < hCount; i++) {
- _objc_connect_classes_from_image(hList[i]);
- }
-
- // Fix up class refs, selector refs, and protocol objects from all images.
- for (i = 0; i < hCount; i++) {
- _objc_map_class_refs_for_image(hList[i]);
- _objc_fixup_selector_refs(hList[i]);
- _objc_fixup_protocol_objects_for_image(hList[i]);
- }
-
- // Read categories from all images.
- // But not if this is the only thread - it's more
- // efficient to attach categories earlier if safe.
- if (!categoriesLoaded) {
- BOOL needFlush = NO;
- for (i = 0; i < hCount; i++) {
- needFlush |= _objc_read_categories_from_image(hList[i]);
- }
- if (needFlush) flush_marked_caches();
- }
-
- // Multi-threaded category load MUST BE LAST to avoid a race.
-}
-
-
-/***********************************************************************
-* prepare_load_methods
-* Schedule +load for classes in this image, any un-+load-ed
-* superclasses in other images, and any categories in this image.
-**********************************************************************/
-// Recursively schedule +load for cls and any un-+load-ed superclasses.
-// cls must already be connected.
-static void schedule_class_load(struct old_class *cls)
-{
- if (cls->info & CLS_LOADED) return;
- if (cls->super_class) schedule_class_load(cls->super_class);
- add_class_to_loadable_list((Class)cls);
- cls->info |= CLS_LOADED;
-}
-
-void prepare_load_methods(header_info *hi)
-{
- Module mods;
- unsigned int midx;
-
-
- if (_objcHeaderIsReplacement(hi)) {
- // Ignore any classes in this image
- return;
- }
-
- // Major loop - process all modules in the image
- mods = hi->mod_ptr;
- for (midx = 0; midx < hi->mod_count; midx += 1)
- {
- unsigned int index;
-
- // Skip module containing no classes
- if (mods[midx].symtab == NULL)
- continue;
-
- // Minor loop - process all the classes in given module
- for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
- {
- // Locate the class description pointer
- struct old_class *cls = mods[midx].symtab->defs[index];
- if (cls->info & CLS_CONNECTED) {
- schedule_class_load(cls);
- }
- }
- }
-
-
- // Major loop - process all modules in the header
- mods = hi->mod_ptr;
-
- // NOTE: The module and category lists are traversed backwards
- // to preserve the pre-10.4 processing order. Changing the order
- // would have a small chance of introducing binary compatibility bugs.
- midx = (unsigned int)hi->mod_count;
- while (midx-- > 0) {
- unsigned int index;
- unsigned int total;
- Symtab symtab = mods[midx].symtab;
-
- // Nothing to do for a module without a symbol table
- if (mods[midx].symtab == NULL)
- continue;
- // Total entries in symbol table (class entries followed
- // by category entries)
- total = mods[midx].symtab->cls_def_cnt +
- mods[midx].symtab->cat_def_cnt;
-
- // Minor loop - register all categories from given module
- index = total;
- while (index-- > mods[midx].symtab->cls_def_cnt) {
- struct old_category *cat = symtab->defs[index];
- add_category_to_loadable_list((Category)cat);
- }
- }
-}
-
-
-#if TARGET_OS_WIN32
-
-void unload_class(struct old_class *cls)
-{
-}
-
-#else
-
-/***********************************************************************
-* _objc_remove_classes_in_image
-* Remove all classes in the given image from the runtime, because
-* the image is about to be unloaded.
-* Things to clean up:
-* class_hash
-* unconnected_class_hash
-* pending subclasses list (only if class is still unconnected)
-* loadable class list
-* class's method caches
-* class refs in all other images
-**********************************************************************/
-// Re-pend any class references in refs that point into [start..end)
-static void rependClassReferences(struct old_class **refs, size_t count,
- uintptr_t start, uintptr_t end)
-{
- size_t i;
-
- if (!refs) return;
-
- // Process each class ref
- for (i = 0; i < count; i++) {
- if ((uintptr_t)(refs[i]) >= start && (uintptr_t)(refs[i]) < end) {
- pendClassReference(&refs[i], refs[i]->name,
- (refs[i]->info & CLS_META) ? YES : NO);
- refs[i] = NULL;
- }
- }
-}
-
-
-void try_free(const void *p)
-{
- if (p && malloc_size(p)) free((void *)p);
-}
-
-// Deallocate all memory in a method list
-static void unload_mlist(struct old_method_list *mlist)
-{
- int i;
- for (i = 0; i < mlist->method_count; i++) {
- try_free(mlist->method_list[i].method_types);
- }
- try_free(mlist);
-}
-
-static void unload_property_list(struct old_property_list *proplist)
-{
- uint32_t i;
-
- if (!proplist) return;
-
- for (i = 0; i < proplist->count; i++) {
- struct old_property *prop = property_list_nth(proplist, i);
- try_free(prop->name);
- try_free(prop->attributes);
- }
- try_free(proplist);
-}
-
-
-// Deallocate all memory in a class.
-void unload_class(struct old_class *cls)
-{
- // Free method cache
- // This dereferences the cache contents; do this before freeing methods
- if (cls->cache && cls->cache != &_objc_empty_cache) {
- _cache_free(cls->cache);
- }
-
- // Free ivar lists
- if (cls->ivars) {
- int i;
- for (i = 0; i < cls->ivars->ivar_count; i++) {
- try_free(cls->ivars->ivar_list[i].ivar_name);
- try_free(cls->ivars->ivar_list[i].ivar_type);
- }
- try_free(cls->ivars);
- }
-
- // Free fixed-up method lists and method list array
- if (cls->methodLists) {
- // more than zero method lists
- if (cls->info & CLS_NO_METHOD_ARRAY) {
- // one method list
- unload_mlist((struct old_method_list *)cls->methodLists);
- }
- else {
- // more than one method list
- struct old_method_list **mlistp;
- for (mlistp = cls->methodLists;
- *mlistp != NULL && *mlistp != END_OF_METHODS_LIST;
- mlistp++)
- {
- unload_mlist(*mlistp);
- }
- free(cls->methodLists);
- }
- }
-
- // Free protocol list
- struct old_protocol_list *protos = cls->protocols;
- while (protos) {
- struct old_protocol_list *dead = protos;
- protos = protos->next;
- try_free(dead);
- }
-
- if ((cls->info & CLS_EXT)) {
- if (cls->ext) {
- // Free property lists and property list array
- if (cls->ext->propertyLists) {
- // more than zero property lists
- if (cls->info & CLS_NO_PROPERTY_ARRAY) {
- // one property list
- struct old_property_list *proplist =
- (struct old_property_list *)cls->ext->propertyLists;
- unload_property_list(proplist);
- } else {
- // more than one property list
- struct old_property_list **plistp;
- for (plistp = cls->ext->propertyLists;
- *plistp != NULL;
- plistp++)
- {
- unload_property_list(*plistp);
- }
- try_free(cls->ext->propertyLists);
- }
- }
-
- // Free weak ivar layout
- try_free(cls->ext->weak_ivar_layout);
-
- // Free ext
- try_free(cls->ext);
- }
-
- // Free non-weak ivar layout
- try_free(cls->ivar_layout);
- }
-
- // Free class name
- try_free(cls->name);
-
- // Free cls
- try_free(cls);
-}
-
-
-static void _objc_remove_classes_in_image(header_info *hi)
-{
- unsigned int index;
- unsigned int midx;
- Module mods;
-
- mutex_lock(&classLock);
-
- // Major loop - process all modules in the image
- mods = hi->mod_ptr;
- for (midx = 0; midx < hi->mod_count; midx += 1)
- {
- // Skip module containing no classes
- if (mods[midx].symtab == NULL)
- continue;
-
- // Minor loop - process all the classes in given module
- for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
- {
- struct old_class *cls;
-
- // Locate the class description pointer
- cls = mods[midx].symtab->defs[index];
-
- // Remove from loadable class list, if present
- remove_class_from_loadable_list((Class)cls);
-
- // Remove from unconnected_class_hash and pending subclasses
- if (unconnected_class_hash && NXHashMember(unconnected_class_hash, cls)) {
- NXHashRemove(unconnected_class_hash, cls);
- if (pendingSubclassesMap) {
- // Find this class in its superclass's pending list
- char *supercls_name = (char *)cls->super_class;
- PendingSubclass *pending =
- NXMapGet(pendingSubclassesMap, supercls_name);
- for ( ; pending != NULL; pending = pending->next) {
- if (pending->subclass == cls) {
- pending->subclass = Nil;
- break;
- }
- }
- }
- }
-
- // Remove from class_hash
- NXHashRemove(class_hash, cls);
- objc_removeRegisteredClass((Class)cls);
-
- // Free heap memory pointed to by the class
- unload_class(cls->isa);
- unload_class(cls);
- }
- }
-
-
- // Search all other images for class refs that point back to this range.
- // Un-fix and re-pend any such class refs.
-
- // Get the location of the dying image's __OBJC segment
- uintptr_t seg;
- unsigned long seg_size;
- seg = (uintptr_t)getsegmentdata(hi->mhdr, "__OBJC", &seg_size);
-
- header_info *other_hi;
- for (other_hi = FirstHeader; other_hi != NULL; other_hi = other_hi->next) {
- struct old_class **other_refs;
- size_t count;
- if (other_hi == hi) continue; // skip the image being unloaded
-
- // Fix class refs in the other image
- other_refs = _getObjcClassRefs(other_hi, &count);
- rependClassReferences(other_refs, count, seg, seg+seg_size);
- }
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* _objc_remove_categories_in_image
-* Remove all categories in the given image from the runtime, because
-* the image is about to be unloaded.
-* Things to clean up:
-* unresolved category list
-* loadable category list
-**********************************************************************/
-static void _objc_remove_categories_in_image(header_info *hi)
-{
- Module mods;
- unsigned int midx;
-
- // Major loop - process all modules in the header
- mods = hi->mod_ptr;
-
- for (midx = 0; midx < hi->mod_count; midx++) {
- unsigned int index;
- unsigned int total;
- Symtab symtab = mods[midx].symtab;
-
- // Nothing to do for a module without a symbol table
- if (symtab == NULL) continue;
-
- // Total entries in symbol table (class entries followed
- // by category entries)
- total = symtab->cls_def_cnt + symtab->cat_def_cnt;
-
- // Minor loop - check all categories from given module
- for (index = symtab->cls_def_cnt; index < total; index++) {
- struct old_category *cat = symtab->defs[index];
-
- // Clean up loadable category list
- remove_category_from_loadable_list((Category)cat);
-
- // Clean up category_hash
- if (category_hash) {
- _objc_unresolved_category *cat_entry =
- NXMapGet(category_hash, cat->class_name);
- for ( ; cat_entry != NULL; cat_entry = cat_entry->next) {
- if (cat_entry->cat == cat) {
- cat_entry->cat = NULL;
- break;
- }
- }
- }
- }
- }
-}
-
-
-/***********************************************************************
-* unload_paranoia
-* Various paranoid debugging checks that look for poorly-behaving
-* unloadable bundles.
-* Called by _objc_unmap_image when OBJC_UNLOAD_DEBUG is set.
-**********************************************************************/
-static void unload_paranoia(header_info *hi)
-{
- // Get the location of the dying image's __OBJC segment
- uintptr_t seg;
- unsigned long seg_size;
- seg = (uintptr_t)getsegmentdata(hi->mhdr, "__OBJC", &seg_size);
-
- _objc_inform("UNLOAD DEBUG: unloading image '%s' [%p..%p]",
- hi->fname, (void *)seg, (void*)(seg+seg_size));
-
- mutex_lock(&classLock);
-
- // Make sure the image contains no categories on surviving classes.
- {
- Module mods;
- unsigned int midx;
-
- // Major loop - process all modules in the header
- mods = hi->mod_ptr;
-
- for (midx = 0; midx < hi->mod_count; midx++) {
- unsigned int index;
- unsigned int total;
- Symtab symtab = mods[midx].symtab;
-
- // Nothing to do for a module without a symbol table
- if (symtab == NULL) continue;
-
- // Total entries in symbol table (class entries followed
- // by category entries)
- total = symtab->cls_def_cnt + symtab->cat_def_cnt;
-
- // Minor loop - check all categories from given module
- for (index = symtab->cls_def_cnt; index < total; index++) {
- struct old_category *cat = symtab->defs[index];
- struct old_class query;
-
- query.name = cat->class_name;
- if (NXHashMember(class_hash, &query)) {
- _objc_inform("UNLOAD DEBUG: dying image contains category '%s(%s)' on surviving class '%s'!", cat->class_name, cat->category_name, cat->class_name);
- }
- }
- }
- }
-
- // Make sure no surviving class is in the dying image.
- // Make sure no surviving class has a superclass in the dying image.
- // fixme check method implementations too
- {
- struct old_class *cls;
- NXHashState state;
-
- state = NXInitHashState(class_hash);
- while (NXNextHashState(class_hash, &state, (void **)&cls)) {
- if ((vm_address_t)cls >= seg &&
- (vm_address_t)cls < seg+seg_size)
- {
- _objc_inform("UNLOAD DEBUG: dying image contains surviving class '%s'!", cls->name);
- }
-
- if ((vm_address_t)cls->super_class >= seg &&
- (vm_address_t)cls->super_class < seg+seg_size)
- {
- _objc_inform("UNLOAD DEBUG: dying image contains superclass '%s' of surviving class '%s'!", cls->super_class->name, cls->name);
- }
- }
- }
-
- mutex_unlock(&classLock);
-}
-
-
-/***********************************************************************
-* _unload_image
-* Only handles MH_BUNDLE for now.
-* Locking: loadMethodLock acquired by unmap_image
-**********************************************************************/
-void _unload_image(header_info *hi)
-{
- recursive_mutex_assert_locked(&loadMethodLock);
-
- // Cleanup:
- // Remove image's classes from the class list and free auxiliary data.
- // Remove image's unresolved or loadable categories and free auxiliary data
- // Remove image's unresolved class refs.
- _objc_remove_classes_in_image(hi);
- _objc_remove_categories_in_image(hi);
- _objc_remove_pending_class_refs_in_image(hi);
-
- // Perform various debugging checks if requested.
- if (DebugUnload) unload_paranoia(hi);
-}
-
-#endif
-
-
-/***********************************************************************
-* objc_addClass. Add the specified class to the table of known classes,
-* after doing a little verification and fixup.
-**********************************************************************/
-void objc_addClass (Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
-
- OBJC_WARN_DEPRECATED;
-
- // Synchronize access to hash table
- mutex_lock (&classLock);
-
- // Make sure both the class and the metaclass have caches!
- // Clear all bits of the info fields except CLS_CLASS and CLS_META.
- // Normally these bits are already clear but if someone tries to cons
- // up their own class on the fly they might need to be cleared.
- if (cls->cache == NULL) {
- cls->cache = (Cache) &_objc_empty_cache;
- cls->info = CLS_CLASS;
- }
-
- if (cls->isa->cache == NULL) {
- cls->isa->cache = (Cache) &_objc_empty_cache;
- cls->isa->info = CLS_META;
- }
-
- // methodLists should be:
- // 1. NULL (Tiger and later only)
- // 2. A -1 terminated method list array
- // In either case, CLS_NO_METHOD_ARRAY remains clear.
- // If the user manipulates the method list directly,
- // they must use the magic private format.
-
- // Add the class to the table
- (void) NXHashInsert (class_hash, cls);
- objc_addRegisteredClass((Class)cls);
-
- // Superclass is no longer a leaf for cache flushing
- if (cls->super_class && (cls->super_class->info & CLS_LEAF)) {
- _class_clearInfo((Class)cls->super_class, CLS_LEAF);
- _class_clearInfo((Class)cls->super_class->isa, CLS_LEAF);
- }
-
- // Desynchronize
- mutex_unlock (&classLock);
-}
-
-/***********************************************************************
-* _objcTweakMethodListPointerForClass.
-* Change the class's method list pointer to a method list array.
-* Does nothing if the method list pointer is already a method list array.
-* If the class is currently in use, methodListLock must be held by the caller.
-**********************************************************************/
-static void _objcTweakMethodListPointerForClass(struct old_class *cls)
-{
- struct old_method_list * originalList;
- const int initialEntries = 4;
- size_t mallocSize;
- struct old_method_list ** ptr;
-
- // Do nothing if methodLists is already an array.
- if (cls->methodLists && !(cls->info & CLS_NO_METHOD_ARRAY)) return;
-
- // Remember existing list
- originalList = (struct old_method_list *) cls->methodLists;
-
- // Allocate and zero a method list array
- mallocSize = sizeof(struct old_method_list *) * initialEntries;
- ptr = (struct old_method_list **) _calloc_internal(1, mallocSize);
-
- // Insert the existing list into the array
- ptr[initialEntries - 1] = END_OF_METHODS_LIST;
- ptr[0] = originalList;
-
- // Replace existing list with array
- cls->methodLists = ptr;
- _class_clearInfo((Class)cls, CLS_NO_METHOD_ARRAY);
-}
-
-
-/***********************************************************************
-* _objc_insertMethods.
-* Adds methods to a class.
-* Does not flush any method caches.
-* Does not take any locks.
-* If the class is already in use, use class_addMethods() instead.
-**********************************************************************/
-void _objc_insertMethods(struct old_class *cls,
- struct old_method_list *mlist,
- struct old_category *cat)
-{
- struct old_method_list ***list;
- struct old_method_list **ptr;
- ptrdiff_t endIndex;
- size_t oldSize;
- size_t newSize;
-
- if (!cls->methodLists) {
- // cls has no methods - simply use this method list
- cls->methodLists = (struct old_method_list **)mlist;
- _class_setInfo((Class)cls, CLS_NO_METHOD_ARRAY);
- return;
- }
-
- // Log any existing methods being replaced
- if (PrintReplacedMethods) {
- int i;
- for (i = 0; i < mlist->method_count; i++) {
- extern IMP findIMPInClass(struct old_class *cls, SEL sel);
- SEL sel = sel_registerName((char *)mlist->method_list[i].method_name);
- IMP newImp = mlist->method_list[i].method_imp;
- IMP oldImp;
-
- if ((oldImp = findIMPInClass(cls, sel))) {
- logReplacedMethod(cls->name, sel, ISMETA(cls),
- cat ? cat->category_name : NULL,
- oldImp, newImp);
- }
- }
- }
-
- // Create method list array if necessary
- _objcTweakMethodListPointerForClass(cls);
-
- list = &cls->methodLists;
-
- // Locate unused entry for insertion point
- ptr = *list;
- while ((*ptr != 0) && (*ptr != END_OF_METHODS_LIST))
- ptr += 1;
-
- // If array is full, add to it
- if (*ptr == END_OF_METHODS_LIST)
- {
- // Calculate old and new dimensions
- endIndex = ptr - *list;
- oldSize = (endIndex + 1) * sizeof(void *);
- newSize = oldSize + sizeof(struct old_method_list *); // only increase by 1
-
- // Grow the method list array by one.
- // This block may be from user code; don't use _realloc_internal
- *list = (struct old_method_list **)realloc(*list, newSize);
-
- // Zero out addition part of new array
- bzero (&((*list)[endIndex]), newSize - oldSize);
-
- // Place new end marker
- (*list)[(newSize/sizeof(void *)) - 1] = END_OF_METHODS_LIST;
-
- // Insertion point corresponds to old array end
- ptr = &((*list)[endIndex]);
- }
-
- // Right shift existing entries by one
- bcopy (*list, (*list) + 1, (uint8_t *)ptr - (uint8_t *)*list);
-
- // Insert at method list at beginning of array
- **list = mlist;
-}
-
-/***********************************************************************
-* _objc_removeMethods.
-* Remove methods from a class.
-* Does not take any locks.
-* Does not flush any method caches.
-* If the class is currently in use, use class_removeMethods() instead.
-**********************************************************************/
-void _objc_removeMethods(struct old_class *cls,
- struct old_method_list *mlist)
-{
- struct old_method_list ***list;
- struct old_method_list **ptr;
-
- if (cls->methodLists == NULL) {
- // cls has no methods
- return;
- }
- if (cls->methodLists == (struct old_method_list **)mlist) {
- // mlist is the class's only method list - erase it
- cls->methodLists = NULL;
- return;
- }
- if (cls->info & CLS_NO_METHOD_ARRAY) {
- // cls has only one method list, and this isn't it - do nothing
- return;
- }
-
- // cls has a method list array - search it
-
- list = &cls->methodLists;
-
- // Locate list in the array
- ptr = *list;
- while (*ptr != mlist) {
- // fix for radar # 2538790
- if ( *ptr == END_OF_METHODS_LIST ) return;
- ptr += 1;
- }
-
- // Remove this entry
- *ptr = 0;
-
- // Left shift the following entries
- while (*(++ptr) != END_OF_METHODS_LIST)
- *(ptr-1) = *ptr;
- *(ptr-1) = 0;
-}
-
-/***********************************************************************
-* _objc_add_category. Install the specified category's methods and
-* protocols into the class it augments.
-* The class is assumed not to be in use yet: no locks are taken and
-* no method caches are flushed.
-**********************************************************************/
-static inline void _objc_add_category(struct old_class *cls, struct old_category *category, int version)
-{
- if (PrintConnecting) {
- _objc_inform("CONNECT: attaching category '%s (%s)'", cls->name, category->category_name);
- }
-
- // Augment instance methods
- if (category->instance_methods)
- _objc_insertMethods (cls, category->instance_methods, category);
-
- // Augment class methods
- if (category->class_methods)
- _objc_insertMethods (cls->isa, category->class_methods, category);
-
- // Augment protocols
- if ((version >= 5) && category->protocols)
- {
- if (cls->isa->version >= 5)
- {
- category->protocols->next = cls->protocols;
- cls->protocols = category->protocols;
- cls->isa->protocols = category->protocols;
- }
- else
- {
- _objc_inform ("unable to add protocols from category %s...\n", category->category_name);
- _objc_inform ("class `%s' must be recompiled\n", category->class_name);
- }
- }
-
- // Augment properties
- if (version >= 7 && category->instance_properties) {
- if (cls->isa->version >= 6) {
- _class_addProperties(cls, category->instance_properties);
- } else {
- _objc_inform ("unable to add properties from category %s...\n", category->category_name);
- _objc_inform ("class `%s' must be recompiled\n", category->class_name);
- }
- }
-}
-
-/***********************************************************************
-* _objc_add_category_flush_caches. Install the specified category's
-* methods into the class it augments, and flush the class' method cache.
-* Return YES if some method caches now need to be flushed.
-**********************************************************************/
-static BOOL _objc_add_category_flush_caches(struct old_class *cls, struct old_category *category, int version)
-{
- BOOL needFlush = NO;
-
- // Install the category's methods into its intended class
- mutex_lock(&methodListLock);
- _objc_add_category (cls, category, version);
- mutex_unlock(&methodListLock);
-
- // Queue for cache flushing so category's methods can get called
- if (category->instance_methods) {
- _class_setInfo((Class)cls, CLS_FLUSH_CACHE);
- needFlush = YES;
- }
- if (category->class_methods) {
- _class_setInfo((Class)cls->isa, CLS_FLUSH_CACHE);
- needFlush = YES;
- }
-
- return needFlush;
-}
-
-
-/***********************************************************************
-* reverse_cat
-* Reverse the given linked list of pending categories.
-* The pending category list is built backwards, and needs to be
-* reversed before actually attaching the categories to a class.
-* Returns the head of the new linked list.
-**********************************************************************/
-static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat)
-{
- _objc_unresolved_category *prev;
- _objc_unresolved_category *cur;
- _objc_unresolved_category *ahead;
-
- if (!cat) return NULL;
-
- prev = NULL;
- cur = cat;
- ahead = cat->next;
-
- while (cur) {
- ahead = cur->next;
- cur->next = prev;
- prev = cur;
- cur = ahead;
- }
-
- return prev;
-}
-
-
-/***********************************************************************
-* resolve_categories_for_class.
-* Install all existing categories intended for the specified class.
-* cls must be a true class and not a metaclass.
-**********************************************************************/
-static void resolve_categories_for_class(struct old_class *cls)
-{
- _objc_unresolved_category * pending;
- _objc_unresolved_category * next;
-
- // Nothing to do if there are no categories at all
- if (!category_hash) return;
-
- // Locate and remove first element in category list
- // associated with this class
- pending = NXMapKeyFreeingRemove (category_hash, cls->name);
-
- // Traverse the list of categories, if any, registered for this class
-
- // The pending list is built backwards. Reverse it and walk forwards.
- pending = reverse_cat(pending);
-
- while (pending) {
- if (pending->cat) {
- // Install the category
- // use the non-flush-cache version since we are only
- // called from the class intialization code
- _objc_add_category(cls, pending->cat, (int)pending->version);
- }
-
- // Delink and reclaim this registration
- next = pending->next;
- _free_internal(pending);
- pending = next;
- }
-}
-
-
-/***********************************************************************
-* _objc_resolve_categories_for_class.
-* Public version of resolve_categories_for_class. This was
-* exported pre-10.4 for Omni et al. to workaround a problem
-* with too-lazy category attachment.
-* cls should be a class, but this function can also cope with metaclasses.
-**********************************************************************/
-void _objc_resolve_categories_for_class(Class cls_gen)
-{
- struct old_class *cls = oldcls(cls_gen);
-
- // If cls is a metaclass, get the class.
- // resolve_categories_for_class() requires a real class to work correctly.
- if (ISMETA(cls)) {
- if (strncmp(cls->name, "_%", 2) == 0) {
- // Posee's meta's name is smashed and isn't in the class_hash,
- // so objc_getClass doesn't work.
- const char *baseName = strchr(cls->name, '%'); // get posee's real name
- cls = oldcls((Class)objc_getClass(baseName));
- } else {
- cls = oldcls((Class)objc_getClass(cls->name));
- }
- }
-
- resolve_categories_for_class(cls);
-}
-
-
-/***********************************************************************
-* _objc_register_category.
-* Process a category read from an image.
-* If the category's class exists, attach the category immediately.
-* Classes that need cache flushing are marked but not flushed.
-* If the category's class does not exist yet, pend the category for
-* later attachment. Pending categories are attached in the order
-* they were discovered.
-* Returns YES if some method caches now need to be flushed.
-**********************************************************************/
-static BOOL _objc_register_category(struct old_category *cat, int version)
-{
- _objc_unresolved_category * new_cat;
- _objc_unresolved_category * old;
- struct old_class *theClass;
-
- // If the category's class exists, attach the category.
- if ((theClass = oldcls((Class)objc_lookUpClass(cat->class_name)))) {
- return _objc_add_category_flush_caches(theClass, cat, version);
- }
-
- // If the category's class exists but is unconnected,
- // then attach the category to the class but don't bother
- // flushing any method caches (because they must be empty).
- // YES unconnected, NO class_handler
- if ((theClass = oldcls((Class)look_up_class(cat->class_name, YES, NO)))) {
- _objc_add_category(theClass, cat, version);
- return NO;
- }
-
-
- // Category's class does not exist yet.
- // Save the category for later attachment.
-
- if (PrintConnecting) {
- _objc_inform("CONNECT: pending category '%s (%s)'", cat->class_name, cat->category_name);
- }
-
- // Create category lookup table if needed
- if (!category_hash)
- category_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
- 128,
- _objc_internal_zone ());
-
- // Locate an existing list of categories, if any, for the class.
- old = NXMapGet (category_hash, cat->class_name);
-
- // Register the category to be fixed up later.
- // The category list is built backwards, and is reversed again
- // by resolve_categories_for_class().
- new_cat = _malloc_internal(sizeof(_objc_unresolved_category));
- new_cat->next = old;
- new_cat->cat = cat;
- new_cat->version = version;
- (void) NXMapKeyCopyingInsert (category_hash, cat->class_name, new_cat);
-
- return NO;
-}
-
-
-const char **
-_objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount)
-{
- Module mods;
- unsigned int m;
- const char **list;
- int count;
- int allocated;
-
- list = NULL;
- count = 0;
- allocated = 0;
-
- mods = hi->mod_ptr;
- for (m = 0; m < hi->mod_count; m++) {
- int d;
-
- if (!mods[m].symtab) continue;
-
- for (d = 0; d < mods[m].symtab->cls_def_cnt; d++) {
- struct old_class *cls = mods[m].symtab->defs[d];
- // fixme what about future-ified classes?
- if (class_is_connected(cls)) {
- if (count == allocated) {
- allocated = allocated*2 + 16;
- list = (const char **)
- realloc((void *)list, allocated * sizeof(char *));
- }
- list[count++] = cls->name;
- }
- }
- }
-
- if (count > 0) {
- // NULL-terminate non-empty list
- if (count == allocated) {
- allocated = allocated+1;
- list = (const char **)
- realloc((void *)list, allocated * sizeof(char *));
- }
- list[count] = NULL;
- }
-
- if (outCount) *outCount = count;
- return list;
-}
-
-Class gdb_class_getClass(Class cls)
-{
- const char *className = cls->name;
- if(!className || !strlen(className)) return Nil;
- Class rCls = look_up_class(className, NO, NO);
- return rCls;
-
-}
-
-Class gdb_object_getClass(id obj)
-{
- Class cls = _object_getClass(obj);
- return gdb_class_getClass(cls);
-}
-
-BOOL gdb_objc_isRuntimeLocked()
-{
- if (mutex_try_lock(&methodListLock)) {
- mutex_unlock(&methodListLock);
- } else
- return YES;
-
- if (mutex_try_lock(&classLock)) {
- mutex_unlock(&classLock);
- } else
- return YES;
-
- if (mutex_try_lock(&cacheUpdateLock)) {
- mutex_unlock(&cacheUpdateLock);
- } else
- return YES;
-
- return NO;
-}
-
-
-/***********************************************************************
-* Lock management
-* Every lock used anywhere must be managed here.
-* Locks not managed here may cause gdb deadlocks.
-**********************************************************************/
-rwlock_t selLock = {};
-mutex_t classLock = MUTEX_INITIALIZER;
-mutex_t methodListLock = MUTEX_INITIALIZER;
-mutex_t cacheUpdateLock = MUTEX_INITIALIZER;
-recursive_mutex_t loadMethodLock = RECURSIVE_MUTEX_INITIALIZER;
-static int debugger_selLock;
-static int debugger_loadMethodLock;
-#define RDONLY 1
-#define RDWR 2
-
-void lock_init(void)
-{
- rwlock_init(&selLock);
- recursive_mutex_init(&loadMethodLock);
-}
-
-
-#if SUPPORT_DEBUGGER_MODE
-
-/***********************************************************************
-* startDebuggerMode
-* Attempt to acquire some locks for debugger mode.
-* Returns 0 if debugger mode failed because too many locks are unavailable.
-*
-* Locks successfully acquired are held until endDebuggerMode().
-* Locks not acquired are off-limits until endDebuggerMode(); any
-* attempt to manipulate them will cause a trap.
-* Locks not handled here may cause deadlocks in gdb.
-**********************************************************************/
-int startDebuggerMode(void)
-{
- int result = DEBUGGER_FULL;
-
- // classLock is required
- // methodListLock is required
- // cacheUpdateLock is required
- // fixme might be able to allow all-or-none
- if (! mutex_try_lock(&classLock)) {
- return DEBUGGER_OFF;
- }
- if (! mutex_try_lock(&methodListLock)) {
- mutex_unlock(&classLock);
- return DEBUGGER_OFF;
- }
- if (! mutex_try_lock(&cacheUpdateLock)) {
- mutex_unlock(&methodListLock);
- mutex_unlock(&classLock);
- return DEBUGGER_OFF;
- }
-
- // side table locks are not optional because we're being conservative
- if (!noSideTableLocksHeld()) {
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&methodListLock);
- mutex_unlock(&classLock);
- return DEBUGGER_OFF;
- }
-
- // selLock is optional
- if (rwlock_try_write(&selLock)) {
- debugger_selLock = RDWR;
- } else if (rwlock_try_read(&selLock)) {
- debugger_selLock = RDONLY;
- result = DEBUGGER_PARTIAL;
- } else {
- debugger_selLock = 0;
- result = DEBUGGER_PARTIAL;
- }
-
- // loadMethodLock is optional
- if (recursive_mutex_try_lock(&loadMethodLock)) {
- debugger_loadMethodLock = RDWR;
- } else {
- debugger_loadMethodLock = 0;
- result = DEBUGGER_PARTIAL;
- }
-
- return result;
-}
-
-/***********************************************************************
-* endDebuggerMode
-* Relinquish locks acquired in startDebuggerMode().
-**********************************************************************/
-void endDebuggerMode(void)
-{
- if (debugger_loadMethodLock) {
- recursive_mutex_unlock(&loadMethodLock);
- debugger_loadMethodLock = 0;
- }
- rwlock_unlock(&selLock, debugger_selLock);
- debugger_selLock = 0;
- mutex_unlock(&classLock);
- mutex_unlock(&methodListLock);
- mutex_unlock(&cacheUpdateLock);
-}
-
-/***********************************************************************
-* isManagedDuringDebugger
-* Returns YES if the given lock is handled specially during debugger
-* mode (i.e. debugger mode tries to acquire it).
-**********************************************************************/
-BOOL isManagedDuringDebugger(void *lock)
-{
- if (lock == &selLock) return YES;
- if (lock == &classLock) return YES;
- if (lock == &methodListLock) return YES;
- if (lock == &cacheUpdateLock) return YES;
- if (lock == &loadMethodLock) return YES;
- return NO;
-}
-
-/***********************************************************************
-* isLockedDuringDebugger
-* Returns YES if the given mutex was acquired by debugger mode.
-* Locking a managed mutex during debugger mode causes a trap unless
-* this returns YES.
-**********************************************************************/
-BOOL isLockedDuringDebugger(void *lock)
-{
- assert(DebuggerMode);
-
- if (lock == &classLock) return YES;
- if (lock == &methodListLock) return YES;
- if (lock == &cacheUpdateLock) return YES;
- if (lock == (mutex_t *)&loadMethodLock) return YES;
- return NO;
-}
-
-/***********************************************************************
-* isReadingDuringDebugger
-* Returns YES if the given rwlock was read-locked by debugger mode.
-* Read-locking a managed rwlock during debugger mode causes a trap unless
-* this returns YES.
-**********************************************************************/
-BOOL isReadingDuringDebugger(rwlock_t *lock)
-{
- assert(DebuggerMode);
-
- // read-lock is allowed even if debugger mode actually write-locked it
- if (debugger_selLock && lock == &selLock) return YES;
-
- return NO;
-}
-
-/***********************************************************************
-* isWritingDuringDebugger
-* Returns YES if the given rwlock was write-locked by debugger mode.
-* Write-locking a managed rwlock during debugger mode causes a trap unless
-* this returns YES.
-**********************************************************************/
-BOOL isWritingDuringDebugger(rwlock_t *lock)
-{
- assert(DebuggerMode);
-
- if (debugger_selLock == RDWR && lock == &selLock) return YES;
-
- return NO;
-}
-
-// SUPPORT_DEBUGGER_MODE
-#endif
-
-#endif
--- /dev/null
+/*
+ * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/***********************************************************************
+* objc-runtime-old.m
+* Support for old-ABI classes and images.
+**********************************************************************/
+
+/***********************************************************************
+ * Class loading and connecting (GrP 2004-2-11)
+ *
+ * When images are loaded (during program startup or otherwise), the
+ * runtime needs to load classes and categories from the images, connect
+ * classes to superclasses and categories to parent classes, and call
+ * +load methods.
+ *
+ * The Objective-C runtime can cope with classes arriving in any order.
+ * That is, a class may be discovered by the runtime before some
+ * superclass is known. To handle out-of-order class loads, the
+ * runtime uses a "pending class" system.
+ *
+ * (Historical note)
+ * Panther and earlier: many classes arrived out-of-order because of
+ * the poorly-ordered callback from dyld. However, the runtime's
+ * pending mechanism only handled "missing superclass" and not
+ * "present superclass but missing higher class". See Radar #3225652.
+ * Tiger: The runtime's pending mechanism was augmented to handle
+ * arbitrary missing classes. In addition, dyld was rewritten and
+ * now sends the callbacks in strictly bottom-up link order.
+ * The pending mechanism may now be needed only for rare and
+ * hard to construct programs.
+ * (End historical note)
+ *
+ * A class when first seen in an image is considered "unconnected".
+ * It is stored in `unconnected_class_hash`. If all of the class's
+ * superclasses exist and are already "connected", then the new class
+ * can be connected to its superclasses and moved to `class_hash` for
+ * normal use. Otherwise, the class waits in `unconnected_class_hash`
+ * until the superclasses finish connecting.
+ *
+ * A "connected" class is
+ * (1) in `class_hash`,
+ * (2) connected to its superclasses,
+ * (3) has no unconnected superclasses,
+ * (4) is otherwise initialized and ready for use, and
+ * (5) is eligible for +load if +load has not already been called.
+ *
+ * An "unconnected" class is
+ * (1) in `unconnected_class_hash`,
+ * (2) not connected to its superclasses,
+ * (3) has an immediate superclass which is either missing or unconnected,
+ * (4) is not ready for use, and
+ * (5) is not yet eligible for +load.
+ *
+ * Image mapping is NOT CURRENTLY THREAD-SAFE with respect to just about
+ * anything. Image mapping IS RE-ENTRANT in several places: superclass
+ * lookup may cause ZeroLink to load another image, and +load calls may
+ * cause dyld to load another image.
+ *
+ * Image mapping sequence:
+ *
+ * Read all classes in all new images.
+ * Add them all to unconnected_class_hash.
+ * Note any +load implementations before categories are attached.
+ * Attach any pending categories.
+ * Read all categories in all new images.
+ * Attach categories whose parent class exists (connected or not),
+ * and pend the rest.
+ * Mark them all eligible for +load (if implemented), even if the
+ * parent class is missing.
+ * Try to connect all classes in all new images.
+ * If the superclass is missing, pend the class
+ * If the superclass is unconnected, try to recursively connect it
+ * If the superclass is connected:
+ * connect the class
+ * mark the class eligible for +load, if implemented
+ * fix up any pended classrefs referring to the class
+ * connect any pended subclasses of the class
+ * Resolve selector refs and class refs in all new images.
+ * Class refs whose classes still do not exist are pended.
+ * Fix up protocol objects in all new images.
+ * Call +load for classes and categories.
+ * May include classes or categories that are not in these images,
+ * but are newly eligible because of these image.
+ * Class +loads will be called superclass-first because of the
+ * superclass-first nature of the connecting process.
+ * Category +load needs to be deferred until the parent class is
+ * connected and has had its +load called.
+ *
+ * Performance: all classes are read before any categories are read.
+ * Fewer categories need be pended for lack of a parent class.
+ *
+ * Performance: all categories are attempted to be attached before
+ * any classes are connected. Fewer class caches need be flushed.
+ * (Unconnected classes and their respective subclasses are guaranteed
+ * to be un-messageable, so their caches will be empty.)
+ *
+ * Performance: all classes are read before any classes are connected.
+ * Fewer classes need be pended for lack of a superclass.
+ *
+ * Correctness: all selector and class refs are fixed before any
+ * protocol fixups or +load methods. libobjc itself contains selector
+ * and class refs which are used in protocol fixup and +load.
+ *
+ * Correctness: +load methods are scheduled in bottom-up link order.
+ * This constraint is in addition to superclass order. Some +load
+ * implementations expect to use another class in a linked-to library,
+ * even if the two classes don't share a direct superclass relationship.
+ *
+ * Correctness: all classes are scanned for +load before any categories
+ * are attached. Otherwise, if a category implements +load and its class
+ * has no class methods, the class's +load scan would find the category's
+ * +load method, which would then be called twice.
+ *
+ * Correctness: pended class refs are not fixed up until the class is
+ * connected. Classes with missing weak superclasses remain unconnected.
+ * Class refs to classes with missing weak superclasses must be nil.
+ * Therefore class refs to unconnected classes must remain un-fixed.
+ *
+ **********************************************************************/
+
+#if !__OBJC2__
+
+#include "objc-private.h"
+#include "objc-runtime-old.h"
+#include "objc-file-old.h"
+#include "objc-cache-old.h"
+#include "objc-loadmethod.h"
+
+
+typedef struct _objc_unresolved_category
+{
+ struct _objc_unresolved_category *next;
+ old_category *cat; // may be nil
+ long version;
+} _objc_unresolved_category;
+
+typedef struct _PendingSubclass
+{
+ Class subclass; // subclass to finish connecting; may be nil
+ struct _PendingSubclass *next;
+} PendingSubclass;
+
+typedef struct _PendingClassRef
+{
+ Class *ref; // class reference to fix up; may be nil
+ // (ref & 1) is a metaclass reference
+ struct _PendingClassRef *next;
+} PendingClassRef;
+
+
+static uintptr_t classHash(void *info, Class data);
+static int classIsEqual(void *info, Class name, Class cls);
+static int _objc_defaultClassHandler(const char *clsName);
+static inline NXMapTable *pendingClassRefsMapTable(void);
+static inline NXMapTable *pendingSubclassesMapTable(void);
+static void pendClassInstallation(Class cls, const char *superName);
+static void pendClassReference(Class *ref, const char *className, BOOL isMeta);
+static void resolve_references_to_class(Class cls);
+static void resolve_subclasses_of_class(Class cls);
+static void really_connect_class(Class cls, Class supercls);
+static BOOL connect_class(Class cls);
+static void map_method_descs (struct objc_method_description_list * methods, BOOL copy);
+static void _objcTweakMethodListPointerForClass(Class cls);
+static inline void _objc_add_category(Class cls, old_category *category, int version);
+static BOOL _objc_add_category_flush_caches(Class cls, old_category *category, int version);
+static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat);
+static void resolve_categories_for_class(Class cls);
+static BOOL _objc_register_category(old_category *cat, int version);
+
+
+// Function called when a class is loaded from an image
+void (*callbackFunction)(Class, Category) = 0;
+
+// Hash table of classes
+NXHashTable * class_hash = 0;
+static NXHashTablePrototype classHashPrototype =
+{
+ (uintptr_t (*) (const void *, const void *)) classHash,
+ (int (*)(const void *, const void *, const void *)) classIsEqual,
+ NXNoEffectFree, 0
+};
+
+// Hash table of unconnected classes
+static NXHashTable *unconnected_class_hash = nil;
+
+// Exported copy of class_hash variable (hook for debugging tools)
+NXHashTable *_objc_debug_class_hash = nil;
+
+// Category and class registries
+// Keys are COPIES of strings, to prevent stale pointers with unloaded bundles
+// Use NXMapKeyCopyingInsert and NXMapKeyFreeingRemove
+static NXMapTable * category_hash = nil;
+
+// Keys are COPIES of strings, to prevent stale pointers with unloaded bundles
+// Use NXMapKeyCopyingInsert and NXMapKeyFreeingRemove
+static NXMapTable * pendingClassRefsMap = nil;
+static NXMapTable * pendingSubclassesMap = nil;
+
+// Protocols
+static NXMapTable *protocol_map = nil; // name -> protocol
+static NXMapTable *protocol_ext_map = nil; // protocol -> protocol ext
+
+// Function pointer objc_getClass calls through when class is not found
+static int (*objc_classHandler) (const char *) = _objc_defaultClassHandler;
+
+// Function pointer called by objc_getClass and objc_lookupClass when
+// class is not found. _objc_classLoader is called before objc_classHandler.
+static BOOL (*_objc_classLoader)(const char *) = nil;
+
+
+/***********************************************************************
+* objc_dump_class_hash. Log names of all known classes.
+**********************************************************************/
+void objc_dump_class_hash(void)
+{
+ NXHashTable *table;
+ unsigned count;
+ Class data;
+ NXHashState state;
+
+ table = class_hash;
+ count = 0;
+ state = NXInitHashState (table);
+ while (NXNextHashState (table, &state, (void **) &data))
+ printf ("class %d: %s\n", ++count, data->getName());
+}
+
+
+/***********************************************************************
+* _objc_init_class_hash. Return the class lookup table, create it if
+* necessary.
+**********************************************************************/
+void _objc_init_class_hash(void)
+{
+ // Do nothing if class hash table already exists
+ if (class_hash)
+ return;
+
+ // class_hash starts small, with only enough capacity for libobjc itself.
+ // If a second library is found by map_images(), class_hash is immediately
+ // resized to capacity 1024 to cut down on rehashes.
+ // Old numbers: A smallish Foundation+AppKit program will have
+ // about 520 classes. Larger apps (like IB or WOB) have more like
+ // 800 classes. Some customers have massive quantities of classes.
+ // Foundation-only programs aren't likely to notice the ~6K loss.
+ class_hash = NXCreateHashTableFromZone (classHashPrototype,
+ 16,
+ nil,
+ _objc_internal_zone ());
+ _objc_debug_class_hash = class_hash;
+}
+
+
+/***********************************************************************
+* objc_getClassList. Return the known classes.
+**********************************************************************/
+int objc_getClassList(Class *buffer, int bufferLen)
+{
+ NXHashState state;
+ Class cls;
+ int cnt, num;
+
+ mutex_lock(&classLock);
+ if (!class_hash) {
+ mutex_unlock(&classLock);
+ return 0;
+ }
+ num = NXCountHashTable(class_hash);
+ if (nil == buffer) {
+ mutex_unlock(&classLock);
+ return num;
+ }
+ cnt = 0;
+ state = NXInitHashState(class_hash);
+ while (cnt < bufferLen &&
+ NXNextHashState(class_hash, &state, (void **)&cls))
+ {
+ buffer[cnt++] = cls;
+ }
+ mutex_unlock(&classLock);
+ return num;
+}
+
+
+/***********************************************************************
+* objc_copyClassList
+* Returns pointers to all classes.
+* This requires all classes be realized, which is regretfully non-lazy.
+*
+* outCount may be nil. *outCount is the number of classes returned.
+* If the returned array is not nil, it is nil-terminated and must be
+* freed with free().
+* Locking: acquires classLock
+**********************************************************************/
+Class *
+objc_copyClassList(unsigned int *outCount)
+{
+ Class *result;
+ unsigned int count;
+
+ mutex_lock(&classLock);
+ result = nil;
+ count = class_hash ? NXCountHashTable(class_hash) : 0;
+
+ if (count > 0) {
+ Class cls;
+ NXHashState state = NXInitHashState(class_hash);
+ result = (Class *)malloc((1+count) * sizeof(Class));
+ count = 0;
+ while (NXNextHashState(class_hash, &state, (void **)&cls)) {
+ result[count++] = cls;
+ }
+ result[count] = nil;
+ }
+ mutex_unlock(&classLock);
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+/***********************************************************************
+* objc_copyProtocolList
+* Returns pointers to all protocols.
+* Locking: acquires classLock
+**********************************************************************/
+Protocol * __unsafe_unretained *
+objc_copyProtocolList(unsigned int *outCount)
+{
+ int count, i;
+ Protocol *proto;
+ const char *name;
+ NXMapState state;
+ Protocol **result;
+
+ mutex_lock(&classLock);
+
+ count = NXCountMapTable(protocol_map);
+ if (count == 0) {
+ mutex_unlock(&classLock);
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ result = (Protocol **)calloc(1 + count, sizeof(Protocol *));
+
+ i = 0;
+ state = NXInitMapState(protocol_map);
+ while (NXNextMapState(protocol_map, &state,
+ (const void **)&name, (const void **)&proto))
+ {
+ result[i++] = proto;
+ }
+
+ result[i++] = nil;
+ assert(i == count+1);
+
+ mutex_unlock(&classLock);
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+/***********************************************************************
+* objc_getClasses. Return class lookup table.
+*
+* NOTE: This function is very dangerous, since you cannot safely use
+* the hashtable without locking it, and the lock is private!
+**********************************************************************/
+void *objc_getClasses(void)
+{
+ OBJC_WARN_DEPRECATED;
+
+ // Return the class lookup hash table
+ return class_hash;
+}
+
+
+/***********************************************************************
+* classHash.
+**********************************************************************/
+static uintptr_t classHash(void *info, Class data)
+{
+ // Nil classes hash to zero
+ if (!data)
+ return 0;
+
+ // Call through to real hash function
+ return _objc_strhash (data->getName());
+}
+
+/***********************************************************************
+* classIsEqual. Returns whether the class names match. If we ever
+* check more than the name, routines like objc_lookUpClass have to
+* change as well.
+**********************************************************************/
+static int classIsEqual(void *info, Class name, Class cls)
+{
+ // Standard string comparison
+ return strcmp(name->getName(), cls->getName()) == 0;
+}
+
+
+// Unresolved future classes
+static NXHashTable *future_class_hash = nil;
+
+// Resolved future<->original classes
+static NXMapTable *future_class_to_original_class_map = nil;
+static NXMapTable *original_class_to_future_class_map = nil;
+
+// CF requests about 20 future classes; HIToolbox requests one.
+#define FUTURE_COUNT 32
+
+
+/***********************************************************************
+* setOriginalClassForFutureClass
+* Record resolution of a future class.
+**********************************************************************/
+static void setOriginalClassForFutureClass(Class futureClass,
+ Class originalClass)
+{
+ if (!future_class_to_original_class_map) {
+ future_class_to_original_class_map =
+ NXCreateMapTableFromZone (NXPtrValueMapPrototype, FUTURE_COUNT,
+ _objc_internal_zone ());
+ original_class_to_future_class_map =
+ NXCreateMapTableFromZone (NXPtrValueMapPrototype, FUTURE_COUNT,
+ _objc_internal_zone ());
+ }
+
+ NXMapInsert (future_class_to_original_class_map,
+ futureClass, originalClass);
+ NXMapInsert (original_class_to_future_class_map,
+ originalClass, futureClass);
+
+ if (PrintFuture) {
+ _objc_inform("FUTURE: using %p instead of %p for %s", (void*)futureClass, (void*)originalClass, originalClass->name);
+ }
+}
+
+/***********************************************************************
+* getOriginalClassForFutureClass
+* getFutureClassForOriginalClass
+* Switch between a future class and its corresponding original class.
+* The future class is the one actually in use.
+* The original class is the one from disk.
+**********************************************************************/
+/*
+static Class
+getOriginalClassForFutureClass(Class futureClass)
+{
+ if (!future_class_to_original_class_map) return Nil;
+ return NXMapGet (future_class_to_original_class_map, futureClass);
+}
+*/
+static Class
+getFutureClassForOriginalClass(Class originalClass)
+{
+ if (!original_class_to_future_class_map) return Nil;
+ return (Class)NXMapGet(original_class_to_future_class_map, originalClass);
+}
+
+
+/***********************************************************************
+* makeFutureClass
+* Initialize the memory in *cls with an unresolved future class with the
+* given name. The memory is recorded in future_class_hash.
+**********************************************************************/
+static void makeFutureClass(Class cls, const char *name)
+{
+ // CF requests about 20 future classes, plus HIToolbox has one.
+ if (!future_class_hash) {
+ future_class_hash =
+ NXCreateHashTableFromZone(classHashPrototype, FUTURE_COUNT,
+ nil, _objc_internal_zone());
+ }
+
+ cls->name = _strdup_internal(name);
+ NXHashInsert(future_class_hash, cls);
+
+ if (PrintFuture) {
+ _objc_inform("FUTURE: reserving %p for %s", (void*)cls, name);
+ }
+}
+
+
+/***********************************************************************
+* _objc_allocateFutureClass
+* Allocate an unresolved future class for the given class name.
+* Returns any existing allocation if one was already made.
+* Assumes the named class doesn't exist yet.
+* Not thread safe.
+**********************************************************************/
+Class _objc_allocateFutureClass(const char *name)
+{
+ Class cls;
+
+ if (future_class_hash) {
+ objc_class query;
+ query.name = name;
+ if ((cls = (Class)NXHashGet(future_class_hash, &query))) {
+ // Already have a future class for this name.
+ return cls;
+ }
+ }
+
+ cls = _calloc_class(sizeof(objc_class));
+ makeFutureClass(cls, name);
+ return cls;
+}
+
+
+/***********************************************************************
+* objc_setFutureClass.
+* Like objc_getFutureClass, but uses the provided memory block.
+* If the class already exists, a posing-like substitution is performed.
+* Not thread safe.
+**********************************************************************/
+void objc_setFutureClass(Class cls, const char *name)
+{
+ Class oldcls;
+ Class newcls = cls; // Not a real class!
+
+ if ((oldcls = look_up_class(name, NO/*unconnected*/, NO/*classhandler*/))) {
+ setOriginalClassForFutureClass(newcls, oldcls);
+ // fixme hack
+ memcpy(newcls, oldcls, sizeof(struct objc_class));
+ newcls->info &= ~CLS_EXT;
+
+ mutex_lock(&classLock);
+ NXHashRemove(class_hash, oldcls);
+ objc_removeRegisteredClass(oldcls);
+ change_class_references(newcls, oldcls, nil, YES);
+ NXHashInsert(class_hash, newcls);
+ objc_addRegisteredClass(newcls);
+ mutex_unlock(&classLock);
+ } else {
+ makeFutureClass(newcls, name);
+ }
+}
+
+
+BOOL _class_isFutureClass(Class cls)
+{
+ return cls && future_class_hash && NXHashGet(future_class_hash, cls);
+}
+
+
+/***********************************************************************
+* _objc_defaultClassHandler. Default objc_classHandler. Does nothing.
+**********************************************************************/
+static int _objc_defaultClassHandler(const char *clsName)
+{
+ // Return zero so objc_getClass doesn't bother re-searching
+ return 0;
+}
+
+/***********************************************************************
+* objc_setClassHandler. Set objc_classHandler to the specified value.
+*
+* NOTE: This should probably deal with userSuppliedHandler being nil,
+* because the objc_classHandler caller does not check... it would bus
+* error. It would make sense to handle nil by restoring the default
+* handler. Is anyone hacking with this, though?
+**********************************************************************/
+void objc_setClassHandler(int (*userSuppliedHandler)(const char *))
+{
+ OBJC_WARN_DEPRECATED;
+
+ objc_classHandler = userSuppliedHandler;
+}
+
+
+/***********************************************************************
+* _objc_setClassLoader
+* Similar to objc_setClassHandler, but objc_classLoader is used for
+* both objc_getClass() and objc_lookupClass(), and objc_classLoader
+* pre-empts objc_classHandler.
+**********************************************************************/
+void _objc_setClassLoader(BOOL (*newClassLoader)(const char *))
+{
+ _objc_classLoader = newClassLoader;
+}
+
+
+/***********************************************************************
+* objc_getProtocol
+* Get a protocol by name, or nil.
+**********************************************************************/
+Protocol *objc_getProtocol(const char *name)
+{
+ Protocol *result;
+ if (!protocol_map) return nil;
+ mutex_lock(&classLock);
+ result = (Protocol *)NXMapGet(protocol_map, name);
+ mutex_unlock(&classLock);
+ return result;
+}
+
+
+/***********************************************************************
+* look_up_class
+* Map a class name to a class using various methods.
+* This is the common implementation of objc_lookUpClass and objc_getClass,
+* and is also used internally to get additional search options.
+* Sequence:
+* 1. class_hash
+* 2. unconnected_class_hash (optional)
+* 3. classLoader callback
+* 4. classHandler callback (optional)
+**********************************************************************/
+Class look_up_class(const char *aClassName, BOOL includeUnconnected, BOOL includeClassHandler)
+{
+ BOOL includeClassLoader = YES; // class loader cannot be skipped
+ Class result = nil;
+ struct objc_class query;
+
+ query.name = aClassName;
+
+ retry:
+
+ if (!result && class_hash) {
+ // Check ordinary classes
+ mutex_lock (&classLock);
+ result = (Class)NXHashGet(class_hash, &query);
+ mutex_unlock (&classLock);
+ }
+
+ if (!result && includeUnconnected && unconnected_class_hash) {
+ // Check not-yet-connected classes
+ mutex_lock(&classLock);
+ result = (Class)NXHashGet(unconnected_class_hash, &query);
+ mutex_unlock(&classLock);
+ }
+
+ if (!result && includeClassLoader && _objc_classLoader) {
+ // Try class loader callback
+ if ((*_objc_classLoader)(aClassName)) {
+ // Re-try lookup without class loader
+ includeClassLoader = NO;
+ goto retry;
+ }
+ }
+
+ if (!result && includeClassHandler && objc_classHandler) {
+ // Try class handler callback
+ if ((*objc_classHandler)(aClassName)) {
+ // Re-try lookup without class handler or class loader
+ includeClassLoader = NO;
+ includeClassHandler = NO;
+ goto retry;
+ }
+ }
+
+ return result;
+}
+
+
+/***********************************************************************
+* objc_class::isConnected
+* Returns TRUE if class cls is connected.
+* A connected class has either a connected superclass or a nil superclass,
+* and is present in class_hash.
+**********************************************************************/
+bool objc_class::isConnected()
+{
+ bool result;
+ mutex_lock(&classLock);
+ result = NXHashMember(class_hash, this);
+ mutex_unlock(&classLock);
+ return result;
+}
+
+
+/***********************************************************************
+* pendingClassRefsMapTable. Return a pointer to the lookup table for
+* pending class refs.
+**********************************************************************/
+static inline NXMapTable *pendingClassRefsMapTable(void)
+{
+ // Allocate table if needed
+ if (!pendingClassRefsMap) {
+ pendingClassRefsMap =
+ NXCreateMapTableFromZone(NXStrValueMapPrototype,
+ 10, _objc_internal_zone ());
+ }
+
+ // Return table pointer
+ return pendingClassRefsMap;
+}
+
+
+/***********************************************************************
+* pendingSubclassesMapTable. Return a pointer to the lookup table for
+* pending subclasses.
+**********************************************************************/
+static inline NXMapTable *pendingSubclassesMapTable(void)
+{
+ // Allocate table if needed
+ if (!pendingSubclassesMap) {
+ pendingSubclassesMap =
+ NXCreateMapTableFromZone(NXStrValueMapPrototype,
+ 10, _objc_internal_zone ());
+ }
+
+ // Return table pointer
+ return pendingSubclassesMap;
+}
+
+
+/***********************************************************************
+* pendClassInstallation
+* Finish connecting class cls when its superclass becomes connected.
+* Check for multiple pends of the same class because connect_class does not.
+**********************************************************************/
+static void pendClassInstallation(Class cls, const char *superName)
+{
+ NXMapTable *table;
+ PendingSubclass *pending;
+ PendingSubclass *oldList;
+ PendingSubclass *l;
+
+ // Create and/or locate pending class lookup table
+ table = pendingSubclassesMapTable ();
+
+ // Make sure this class isn't already in the pending list.
+ oldList = (PendingSubclass *)NXMapGet(table, superName);
+ for (l = oldList; l != nil; l = l->next) {
+ if (l->subclass == cls) return; // already here, nothing to do
+ }
+
+ // Create entry referring to this class
+ pending = (PendingSubclass *)_malloc_internal(sizeof(PendingSubclass));
+ pending->subclass = cls;
+
+ // Link new entry into head of list of entries for this class
+ pending->next = oldList;
+
+ // (Re)place entry list in the table
+ NXMapKeyCopyingInsert (table, superName, pending);
+}
+
+
+/***********************************************************************
+* pendClassReference
+* Fix up a class ref when the class with the given name becomes connected.
+**********************************************************************/
+static void pendClassReference(Class *ref, const char *className, BOOL isMeta)
+{
+ NXMapTable *table;
+ PendingClassRef *pending;
+
+ // Create and/or locate pending class lookup table
+ table = pendingClassRefsMapTable ();
+
+ // Create entry containing the class reference
+ pending = (PendingClassRef *)_malloc_internal(sizeof(PendingClassRef));
+ pending->ref = ref;
+ if (isMeta) {
+ pending->ref = (Class *)((uintptr_t)pending->ref | 1);
+ }
+
+ // Link new entry into head of list of entries for this class
+ pending->next = (PendingClassRef *)NXMapGet(table, className);
+
+ // (Re)place entry list in the table
+ NXMapKeyCopyingInsert (table, className, pending);
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: pended reference to class '%s%s' at %p",
+ className, isMeta ? " (meta)" : "", (void *)ref);
+ }
+}
+
+
+/***********************************************************************
+* resolve_references_to_class
+* Fix up any pending class refs to this class.
+**********************************************************************/
+static void resolve_references_to_class(Class cls)
+{
+ PendingClassRef *pending;
+
+ if (!pendingClassRefsMap) return; // no unresolved refs for any class
+
+ pending = (PendingClassRef *)NXMapGet(pendingClassRefsMap, cls->name);
+ if (!pending) return; // no unresolved refs for this class
+
+ NXMapKeyFreeingRemove(pendingClassRefsMap, cls->name);
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: resolving references to class '%s'", cls->name);
+ }
+
+ while (pending) {
+ PendingClassRef *next = pending->next;
+ if (pending->ref) {
+ BOOL isMeta = ((uintptr_t)pending->ref & 1) ? YES : NO;
+ Class *ref =
+ (Class *)((uintptr_t)pending->ref & ~(uintptr_t)1);
+ *ref = isMeta ? cls->ISA() : cls;
+ }
+ _free_internal(pending);
+ pending = next;
+ }
+
+ if (NXCountMapTable(pendingClassRefsMap) == 0) {
+ NXFreeMapTable(pendingClassRefsMap);
+ pendingClassRefsMap = nil;
+ }
+}
+
+
+/***********************************************************************
+* resolve_subclasses_of_class
+* Fix up any pending subclasses of this class.
+**********************************************************************/
+static void resolve_subclasses_of_class(Class cls)
+{
+ PendingSubclass *pending;
+
+ if (!pendingSubclassesMap) return; // no unresolved subclasses
+
+ pending = (PendingSubclass *)NXMapGet(pendingSubclassesMap, cls->name);
+ if (!pending) return; // no unresolved subclasses for this class
+
+ NXMapKeyFreeingRemove(pendingSubclassesMap, cls->name);
+
+ // Destroy the pending table if it's now empty, to save memory.
+ if (NXCountMapTable(pendingSubclassesMap) == 0) {
+ NXFreeMapTable(pendingSubclassesMap);
+ pendingSubclassesMap = nil;
+ }
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: resolving subclasses of class '%s'", cls->name);
+ }
+
+ while (pending) {
+ PendingSubclass *next = pending->next;
+ if (pending->subclass) connect_class(pending->subclass);
+ _free_internal(pending);
+ pending = next;
+ }
+}
+
+
+/***********************************************************************
+* really_connect_class
+* Connect cls to superclass supercls unconditionally.
+* Also adjust the class hash tables and handle pended subclasses.
+*
+* This should be called from connect_class() ONLY.
+**********************************************************************/
+static void really_connect_class(Class cls,
+ Class supercls)
+{
+ Class oldCls;
+
+ // Connect superclass pointers.
+ set_superclass(cls, supercls, YES);
+
+ // Update GC layouts
+ // For paranoia, this is a conservative update:
+ // only non-strong -> strong and weak -> strong are corrected.
+ if (UseGC && supercls &&
+ (cls->info & CLS_EXT) && (supercls->info & CLS_EXT))
+ {
+ BOOL layoutChanged;
+ layout_bitmap ivarBitmap =
+ layout_bitmap_create(cls->ivar_layout,
+ cls->instance_size,
+ cls->instance_size, NO);
+
+ layout_bitmap superBitmap =
+ layout_bitmap_create(supercls->ivar_layout,
+ supercls->instance_size,
+ supercls->instance_size, NO);
+
+ // non-strong -> strong: bits set in super should be set in sub
+ layoutChanged = layout_bitmap_or(ivarBitmap, superBitmap, cls->name);
+ layout_bitmap_free(superBitmap);
+
+ if (layoutChanged) {
+ layout_bitmap weakBitmap = {};
+ BOOL weakLayoutChanged = NO;
+
+ if (cls->ext && cls->ext->weak_ivar_layout) {
+ // weak -> strong: strong bits should be cleared in weak layout
+ // This is a subset of non-strong -> strong
+ weakBitmap =
+ layout_bitmap_create(cls->ext->weak_ivar_layout,
+ cls->instance_size,
+ cls->instance_size, YES);
+
+ weakLayoutChanged =
+ layout_bitmap_clear(weakBitmap, ivarBitmap, cls->name);
+ } else {
+ // no existing weak ivars, so no weak -> strong changes
+ }
+
+ // Rebuild layout strings.
+ if (PrintIvars) {
+ _objc_inform("IVARS: gc layout changed "
+ "for class %s (super %s)",
+ cls->name, supercls->name);
+ if (weakLayoutChanged) {
+ _objc_inform("IVARS: gc weak layout changed "
+ "for class %s (super %s)",
+ cls->name, supercls->name);
+ }
+ }
+ cls->ivar_layout = layout_string_create(ivarBitmap);
+ if (weakLayoutChanged) {
+ cls->ext->weak_ivar_layout = layout_string_create(weakBitmap);
+ }
+
+ layout_bitmap_free(weakBitmap);
+ }
+
+ layout_bitmap_free(ivarBitmap);
+ }
+
+ // Done!
+ cls->info |= CLS_CONNECTED;
+
+ mutex_lock(&classLock);
+
+ // Update hash tables.
+ NXHashRemove(unconnected_class_hash, cls);
+ oldCls = (Class)NXHashInsert(class_hash, cls);
+ objc_addRegisteredClass(cls);
+
+ // Delete unconnected_class_hash if it is now empty.
+ if (NXCountHashTable(unconnected_class_hash) == 0) {
+ NXFreeHashTable(unconnected_class_hash);
+ unconnected_class_hash = nil;
+ }
+
+ // No duplicate classes allowed.
+ // Duplicates should have been rejected by _objc_read_classes_from_image.
+ assert(!oldCls);
+
+ mutex_unlock(&classLock);
+
+ // Fix up pended class refs to this class, if any
+ resolve_references_to_class(cls);
+
+ // Connect newly-connectable subclasses
+ resolve_subclasses_of_class(cls);
+
+ // GC debugging: make sure all classes with -dealloc also have -finalize
+ if (DebugFinalizers) {
+ extern IMP findIMPInClass(Class cls, SEL sel);
+ if (findIMPInClass(cls, sel_getUid("dealloc")) &&
+ ! findIMPInClass(cls, sel_getUid("finalize")))
+ {
+ _objc_inform("GC: class '%s' implements -dealloc but not -finalize", cls->name);
+ }
+ }
+
+ // Debugging: if this class has ivars, make sure this class's ivars don't
+ // overlap with its super's. This catches some broken fragile base classes.
+ // Do not use super->instance_size vs. self->ivar[0] to check this.
+ // Ivars may be packed across instance_size boundaries.
+ if (DebugFragileSuperclasses && cls->ivars && cls->ivars->ivar_count) {
+ Class ivar_cls = supercls;
+
+ // Find closest superclass that has some ivars, if one exists.
+ while (ivar_cls &&
+ (!ivar_cls->ivars || ivar_cls->ivars->ivar_count == 0))
+ {
+ ivar_cls = ivar_cls->superclass;
+ }
+
+ if (ivar_cls) {
+ // Compare superclass's last ivar to this class's first ivar
+ old_ivar *super_ivar =
+ &ivar_cls->ivars->ivar_list[ivar_cls->ivars->ivar_count - 1];
+ old_ivar *self_ivar =
+ &cls->ivars->ivar_list[0];
+
+ // fixme could be smarter about super's ivar size
+ if (self_ivar->ivar_offset <= super_ivar->ivar_offset) {
+ _objc_inform("WARNING: ivars of superclass '%s' and "
+ "subclass '%s' overlap; superclass may have "
+ "changed since subclass was compiled",
+ ivar_cls->name, cls->name);
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* connect_class
+* Connect class cls to its superclasses, if possible.
+* If cls becomes connected, move it from unconnected_class_hash
+* to connected_class_hash.
+* Returns TRUE if cls is connected.
+* Returns FALSE if cls could not be connected for some reason
+* (missing superclass or still-unconnected superclass)
+**********************************************************************/
+static BOOL connect_class(Class cls)
+{
+ if (cls->isConnected()) {
+ // This class is already connected to its superclass.
+ // Do nothing.
+ return TRUE;
+ }
+ else if (cls->superclass == nil) {
+ // This class is a root class.
+ // Connect it to itself.
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: class '%s' now connected (root class)",
+ cls->name);
+ }
+
+ really_connect_class(cls, nil);
+ return TRUE;
+ }
+ else {
+ // This class is not a root class and is not yet connected.
+ // Connect it if its superclass and root class are already connected.
+ // Otherwise, add this class to the to-be-connected list,
+ // pending the completion of its superclass and root class.
+
+ // At this point, cls->superclass and cls->ISA()->ISA() are still STRINGS
+ char *supercls_name = (char *)cls->superclass;
+ Class supercls;
+
+ // YES unconnected, YES class handler
+ if (nil == (supercls = look_up_class(supercls_name, YES, YES))) {
+ // Superclass does not exist yet.
+ // pendClassInstallation will handle duplicate pends of this class
+ pendClassInstallation(cls, supercls_name);
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: class '%s' NOT connected (missing super)", cls->name);
+ }
+ return FALSE;
+ }
+
+ if (! connect_class(supercls)) {
+ // Superclass exists but is not yet connected.
+ // pendClassInstallation will handle duplicate pends of this class
+ pendClassInstallation(cls, supercls_name);
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: class '%s' NOT connected (unconnected super)", cls->name);
+ }
+ return FALSE;
+ }
+
+ // Superclass exists and is connected.
+ // Connect this class to the superclass.
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: class '%s' now connected", cls->name);
+ }
+
+ really_connect_class(cls, supercls);
+ return TRUE;
+ }
+}
+
+
+/***********************************************************************
+* _objc_read_categories_from_image.
+* Read all categories from the given image.
+* Install them on their parent classes, or register them for later
+* installation.
+* Returns YES if some method caches now need to be flushed.
+**********************************************************************/
+static BOOL _objc_read_categories_from_image (header_info * hi)
+{
+ Module mods;
+ size_t midx;
+ BOOL needFlush = NO;
+
+ if (_objcHeaderIsReplacement(hi)) {
+ // Ignore any categories in this image
+ return NO;
+ }
+
+
+ // Major loop - process all modules in the header
+ mods = hi->mod_ptr;
+
+ // NOTE: The module and category lists are traversed backwards
+ // to preserve the pre-10.4 processing order. Changing the order
+ // would have a small chance of introducing binary compatibility bugs.
+ midx = hi->mod_count;
+ while (midx-- > 0) {
+ unsigned int index;
+ unsigned int total;
+
+ // Nothing to do for a module without a symbol table
+ if (mods[midx].symtab == nil)
+ continue;
+
+ // Total entries in symbol table (class entries followed
+ // by category entries)
+ total = mods[midx].symtab->cls_def_cnt +
+ mods[midx].symtab->cat_def_cnt;
+
+ // Minor loop - register all categories from given module
+ index = total;
+ while (index-- > mods[midx].symtab->cls_def_cnt) {
+ old_category *cat = (old_category *)mods[midx].symtab->defs[index];
+ needFlush |= _objc_register_category(cat, (int)mods[midx].version);
+ }
+ }
+
+ return needFlush;
+}
+
+
+/***********************************************************************
+* _objc_read_classes_from_image.
+* Read classes from the given image, perform assorted minor fixups,
+* scan for +load implementation.
+* Does not connect classes to superclasses.
+* Does attach pended categories to the classes.
+* Adds all classes to unconnected_class_hash. class_hash is unchanged.
+**********************************************************************/
+static void _objc_read_classes_from_image(header_info *hi)
+{
+ unsigned int index;
+ unsigned int midx;
+ Module mods;
+ int isBundle = headerIsBundle(hi);
+
+ if (_objcHeaderIsReplacement(hi)) {
+ // Ignore any classes in this image
+ return;
+ }
+
+ // class_hash starts small, enough only for libobjc itself.
+ // If other Objective-C libraries are found, immediately resize
+ // class_hash, assuming that Foundation and AppKit are about
+ // to add lots of classes.
+ mutex_lock(&classLock);
+ if (hi->mhdr != libobjc_header && _NXHashCapacity(class_hash) < 1024) {
+ _NXHashRehashToCapacity(class_hash, 1024);
+ }
+ mutex_unlock(&classLock);
+
+ // Major loop - process all modules in the image
+ mods = hi->mod_ptr;
+ for (midx = 0; midx < hi->mod_count; midx += 1)
+ {
+ // Skip module containing no classes
+ if (mods[midx].symtab == nil)
+ continue;
+
+ // Minor loop - process all the classes in given module
+ for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
+ {
+ Class newCls, oldCls;
+ BOOL rejected;
+
+ // Locate the class description pointer
+ newCls = (Class)mods[midx].symtab->defs[index];
+
+ // Classes loaded from Mach-O bundles can be unloaded later.
+ // Nothing uses this class yet, so cls->setInfo is not needed.
+ if (isBundle) newCls->info |= CLS_FROM_BUNDLE;
+ if (isBundle) newCls->ISA()->info |= CLS_FROM_BUNDLE;
+
+ // Use common static empty cache instead of nil
+ if (newCls->cache == nil)
+ newCls->cache = (Cache) &_objc_empty_cache;
+ if (newCls->ISA()->cache == nil)
+ newCls->ISA()->cache = (Cache) &_objc_empty_cache;
+
+ // Set metaclass version
+ newCls->ISA()->version = mods[midx].version;
+
+ // methodLists is nil or a single list, not an array
+ newCls->info |= CLS_NO_METHOD_ARRAY|CLS_NO_PROPERTY_ARRAY;
+ newCls->ISA()->info |= CLS_NO_METHOD_ARRAY|CLS_NO_PROPERTY_ARRAY;
+
+ // class has no subclasses for cache flushing
+ newCls->info |= CLS_LEAF;
+ newCls->ISA()->info |= CLS_LEAF;
+
+ if (mods[midx].version >= 6) {
+ // class structure has ivar_layout and ext fields
+ newCls->info |= CLS_EXT;
+ newCls->ISA()->info |= CLS_EXT;
+ }
+
+ // Check for +load implementation before categories are attached
+ if (_class_hasLoadMethod(newCls)) {
+ newCls->ISA()->info |= CLS_HAS_LOAD_METHOD;
+ }
+
+ // Install into unconnected_class_hash.
+ mutex_lock(&classLock);
+
+ if (future_class_hash) {
+ Class futureCls = (Class)
+ NXHashRemove(future_class_hash, newCls);
+ if (futureCls) {
+ // Another class structure for this class was already
+ // prepared by objc_getFutureClass(). Use it instead.
+ _free_internal((char *)futureCls->name);
+ memcpy(futureCls, newCls, sizeof(objc_class));
+ setOriginalClassForFutureClass(futureCls, newCls);
+ newCls = futureCls;
+
+ if (NXCountHashTable(future_class_hash) == 0) {
+ NXFreeHashTable(future_class_hash);
+ future_class_hash = nil;
+ }
+ }
+ }
+
+ if (!unconnected_class_hash) {
+ unconnected_class_hash =
+ NXCreateHashTableFromZone(classHashPrototype, 128,
+ nil, _objc_internal_zone());
+ }
+
+ if ((oldCls = (Class)NXHashGet(class_hash, newCls)) ||
+ (oldCls = (Class)NXHashGet(unconnected_class_hash, newCls)))
+ {
+ // Another class with this name exists. Complain and reject.
+ inform_duplicate(newCls->name, oldCls, newCls);
+ rejected = YES;
+ }
+ else {
+ NXHashInsert(unconnected_class_hash, newCls);
+ rejected = NO;
+ }
+
+ mutex_unlock(&classLock);
+
+ if (!rejected) {
+ // Attach pended categories for this class, if any
+ resolve_categories_for_class(newCls);
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* _objc_connect_classes_from_image.
+* Connect the classes in the given image to their superclasses,
+* or register them for later connection if any superclasses are missing.
+**********************************************************************/
+static void _objc_connect_classes_from_image(header_info *hi)
+{
+ unsigned int index;
+ unsigned int midx;
+ Module mods;
+ BOOL replacement = _objcHeaderIsReplacement(hi);
+
+ // Major loop - process all modules in the image
+ mods = hi->mod_ptr;
+ for (midx = 0; midx < hi->mod_count; midx += 1)
+ {
+ // Skip module containing no classes
+ if (mods[midx].symtab == nil)
+ continue;
+
+ // Minor loop - process all the classes in given module
+ for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
+ {
+ Class cls = (Class)mods[midx].symtab->defs[index];
+ if (! replacement) {
+ BOOL connected;
+ Class futureCls = getFutureClassForOriginalClass(cls);
+ if (futureCls) {
+ // objc_getFutureClass() requested a different class
+ // struct. Fix up the original struct's superclass
+ // field for [super ...] use, but otherwise perform
+ // fixups on the new class struct only.
+ const char *super_name = (const char *) cls->superclass;
+ if (super_name) cls->superclass = objc_getClass(super_name);
+ cls = futureCls;
+ }
+ connected = connect_class(cls);
+ if (connected && callbackFunction) {
+ (*callbackFunction)(cls, 0);
+ }
+ } else {
+ // Replacement image - fix up superclass only (#3704817)
+ // And metaclass's superclass (#5351107)
+ const char *super_name = (const char *) cls->superclass;
+ if (super_name) {
+ cls->superclass = objc_getClass(super_name);
+ // metaclass's superclass is superclass's metaclass
+ cls->ISA()->superclass = cls->superclass->ISA();
+ } else {
+ // Replacement for a root class
+ // cls->superclass already nil
+ // root metaclass's superclass is root class
+ cls->ISA()->superclass = cls;
+ }
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* _objc_map_class_refs_for_image. Convert the class ref entries from
+* a class name string pointer to a class pointer. If the class does
+* not yet exist, the reference is added to a list of pending references
+* to be fixed up at a later date.
+**********************************************************************/
+static void fix_class_ref(Class *ref, const char *name, BOOL isMeta)
+{
+ Class cls;
+
+ // Get pointer to class of this name
+ // NO unconnected, YES class loader
+ // (real class with weak-missing superclass is unconnected now)
+ cls = look_up_class(name, NO, YES);
+ if (cls) {
+ // Referenced class exists. Fix up the reference.
+ *ref = isMeta ? cls->ISA() : cls;
+ } else {
+ // Referenced class does not exist yet. Insert nil for now
+ // (weak-linking) and fix up the reference if the class arrives later.
+ pendClassReference (ref, name, isMeta);
+ *ref = nil;
+ }
+}
+
+static void _objc_map_class_refs_for_image (header_info * hi)
+{
+ Class *cls_refs;
+ size_t count;
+ unsigned int index;
+
+ // Locate class refs in image
+ cls_refs = _getObjcClassRefs (hi, &count);
+ if (cls_refs) {
+ // Process each class ref
+ for (index = 0; index < count; index += 1) {
+ // Ref is initially class name char*
+ const char *name = (const char *) cls_refs[index];
+ if (!name) continue;
+ fix_class_ref(&cls_refs[index], name, NO /*never meta*/);
+ }
+ }
+}
+
+
+/***********************************************************************
+* _objc_remove_pending_class_refs_in_image
+* Delete any pending class ref fixups for class refs in the given image,
+* because the image is about to be unloaded.
+**********************************************************************/
+static void removePendingReferences(Class *refs, size_t count)
+{
+ Class *end = refs + count;
+
+ if (!refs) return;
+ if (!pendingClassRefsMap) return;
+
+ // Search the pending class ref table for class refs in this range.
+ // The class refs may have already been stomped with nil,
+ // so there's no way to recover the original class name.
+
+ {
+ const char *key;
+ PendingClassRef *pending;
+ NXMapState state = NXInitMapState(pendingClassRefsMap);
+ while(NXNextMapState(pendingClassRefsMap, &state,
+ (const void **)&key, (const void **)&pending))
+ {
+ for ( ; pending != nil; pending = pending->next) {
+ if (pending->ref >= refs && pending->ref < end) {
+ pending->ref = nil;
+ }
+ }
+ }
+ }
+}
+
+static void _objc_remove_pending_class_refs_in_image(header_info *hi)
+{
+ Class *cls_refs;
+ size_t count;
+
+ // Locate class refs in this image
+ cls_refs = _getObjcClassRefs(hi, &count);
+ removePendingReferences(cls_refs, count);
+}
+
+
+/***********************************************************************
+* map_selrefs. For each selector in the specified array,
+* replace the name pointer with a uniqued selector.
+* If copy is TRUE, all selector data is always copied. This is used
+* for registering selectors from unloadable bundles, so the selector
+* can still be used after the bundle's data segment is unmapped.
+* Returns YES if dst was written to, NO if it was unchanged.
+**********************************************************************/
+static inline void map_selrefs(SEL *sels, size_t count, BOOL copy)
+{
+ size_t index;
+
+ if (!sels) return;
+
+ sel_lock();
+
+ // Process each selector
+ for (index = 0; index < count; index += 1)
+ {
+ SEL sel;
+
+ // Lookup pointer to uniqued string
+ sel = sel_registerNameNoLock((const char *) sels[index], copy);
+
+ // Replace this selector with uniqued one (avoid
+ // modifying the VM page if this would be a NOP)
+ if (sels[index] != sel) {
+ sels[index] = sel;
+ }
+ }
+
+ sel_unlock();
+}
+
+
+/***********************************************************************
+* map_method_descs. For each method in the specified method list,
+* replace the name pointer with a uniqued selector.
+* If copy is TRUE, all selector data is always copied. This is used
+* for registering selectors from unloadable bundles, so the selector
+* can still be used after the bundle's data segment is unmapped.
+**********************************************************************/
+static void map_method_descs (struct objc_method_description_list * methods, BOOL copy)
+{
+ int index;
+
+ if (!methods) return;
+
+ sel_lock();
+
+ // Process each method
+ for (index = 0; index < methods->count; index += 1)
+ {
+ struct objc_method_description * method;
+ SEL sel;
+
+ // Get method entry to fix up
+ method = &methods->list[index];
+
+ // Lookup pointer to uniqued string
+ sel = sel_registerNameNoLock((const char *) method->name, copy);
+
+ // Replace this selector with uniqued one (avoid
+ // modifying the VM page if this would be a NOP)
+ if (method->name != sel)
+ method->name = sel;
+ }
+
+ sel_unlock();
+}
+
+
+/***********************************************************************
+* ext_for_protocol
+* Returns the protocol extension for the given protocol.
+* Returns nil if the protocol has no extension.
+**********************************************************************/
+static old_protocol_ext *ext_for_protocol(old_protocol *proto)
+{
+ if (!proto) return nil;
+ if (!protocol_ext_map) return nil;
+ else return (old_protocol_ext *)NXMapGet(protocol_ext_map, proto);
+}
+
+
+/***********************************************************************
+* lookup_method
+* Search a protocol method list for a selector.
+**********************************************************************/
+static struct objc_method_description *
+lookup_method(struct objc_method_description_list *mlist, SEL aSel)
+{
+ if (mlist) {
+ int i;
+ for (i = 0; i < mlist->count; i++) {
+ if (mlist->list[i].name == aSel) {
+ return mlist->list+i;
+ }
+ }
+ }
+ return nil;
+}
+
+
+/***********************************************************************
+* lookup_protocol_method
+* Search for a selector in a protocol
+* (and optionally recursively all incorporated protocols)
+**********************************************************************/
+struct objc_method_description *
+lookup_protocol_method(old_protocol *proto, SEL aSel,
+ BOOL isRequiredMethod, BOOL isInstanceMethod,
+ BOOL recursive)
+{
+ struct objc_method_description *m = nil;
+ old_protocol_ext *ext;
+
+ if (isRequiredMethod) {
+ if (isInstanceMethod) {
+ m = lookup_method(proto->instance_methods, aSel);
+ } else {
+ m = lookup_method(proto->class_methods, aSel);
+ }
+ } else if ((ext = ext_for_protocol(proto))) {
+ if (isInstanceMethod) {
+ m = lookup_method(ext->optional_instance_methods, aSel);
+ } else {
+ m = lookup_method(ext->optional_class_methods, aSel);
+ }
+ }
+
+ if (!m && recursive && proto->protocol_list) {
+ int i;
+ for (i = 0; !m && i < proto->protocol_list->count; i++) {
+ m = lookup_protocol_method(proto->protocol_list->list[i], aSel,
+ isRequiredMethod,isInstanceMethod,true);
+ }
+ }
+
+ return m;
+}
+
+
+/***********************************************************************
+* protocol_getName
+* Returns the name of the given protocol.
+**********************************************************************/
+const char *protocol_getName(Protocol *p)
+{
+ old_protocol *proto = oldprotocol(p);
+ if (!proto) return "nil";
+ return proto->protocol_name;
+}
+
+
+/***********************************************************************
+* protocol_getMethodDescription
+* Returns the description of a named method.
+* Searches either required or optional methods.
+* Searches either instance or class methods.
+**********************************************************************/
+struct objc_method_description
+protocol_getMethodDescription(Protocol *p, SEL aSel,
+ BOOL isRequiredMethod, BOOL isInstanceMethod)
+{
+ struct objc_method_description empty = {nil, nil};
+ old_protocol *proto = oldprotocol(p);
+ struct objc_method_description *desc;
+ if (!proto) return empty;
+
+ desc = lookup_protocol_method(proto, aSel,
+ isRequiredMethod, isInstanceMethod, true);
+ if (desc) return *desc;
+ else return empty;
+}
+
+
+/***********************************************************************
+* protocol_copyMethodDescriptionList
+* Returns an array of method descriptions from a protocol.
+* Copies either required or optional methods.
+* Copies either instance or class methods.
+**********************************************************************/
+struct objc_method_description *
+protocol_copyMethodDescriptionList(Protocol *p,
+ BOOL isRequiredMethod,
+ BOOL isInstanceMethod,
+ unsigned int *outCount)
+{
+ struct objc_method_description_list *mlist = nil;
+ old_protocol *proto = oldprotocol(p);
+ old_protocol_ext *ext;
+ unsigned int i, count;
+ struct objc_method_description *result;
+
+ if (!proto) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ if (isRequiredMethod) {
+ if (isInstanceMethod) {
+ mlist = proto->instance_methods;
+ } else {
+ mlist = proto->class_methods;
+ }
+ } else if ((ext = ext_for_protocol(proto))) {
+ if (isInstanceMethod) {
+ mlist = ext->optional_instance_methods;
+ } else {
+ mlist = ext->optional_class_methods;
+ }
+ }
+
+ if (!mlist) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ count = mlist->count;
+ result = (struct objc_method_description *)
+ calloc(count + 1, sizeof(struct objc_method_description));
+ for (i = 0; i < count; i++) {
+ result[i] = mlist->list[i];
+ }
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+objc_property_t protocol_getProperty(Protocol *p, const char *name,
+ BOOL isRequiredProperty, BOOL isInstanceProperty)
+{
+ old_protocol *proto = oldprotocol(p);
+ old_protocol_ext *ext;
+ old_protocol_list *proto_list;
+
+ if (!proto || !name) return nil;
+
+ if (!isRequiredProperty || !isInstanceProperty) {
+ // Only required instance properties are currently supported
+ return nil;
+ }
+
+ if ((ext = ext_for_protocol(proto))) {
+ old_property_list *plist;
+ if ((plist = ext->instance_properties)) {
+ uint32_t i;
+ for (i = 0; i < plist->count; i++) {
+ old_property *prop = property_list_nth(plist, i);
+ if (0 == strcmp(name, prop->name)) {
+ return (objc_property_t)prop;
+ }
+ }
+ }
+ }
+
+ if ((proto_list = proto->protocol_list)) {
+ int i;
+ for (i = 0; i < proto_list->count; i++) {
+ objc_property_t prop =
+ protocol_getProperty((Protocol *)proto_list->list[i], name,
+ isRequiredProperty, isInstanceProperty);
+ if (prop) return prop;
+ }
+ }
+
+ return nil;
+}
+
+
+objc_property_t *protocol_copyPropertyList(Protocol *p, unsigned int *outCount)
+{
+ old_property **result = nil;
+ old_protocol_ext *ext;
+ old_property_list *plist;
+
+ old_protocol *proto = oldprotocol(p);
+ if (! (ext = ext_for_protocol(proto))) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ plist = ext->instance_properties;
+ result = copyPropertyList(plist, outCount);
+
+ return (objc_property_t *)result;
+}
+
+
+/***********************************************************************
+* protocol_copyProtocolList
+* Copies this protocol's incorporated protocols.
+* Does not copy those protocol's incorporated protocols in turn.
+**********************************************************************/
+Protocol * __unsafe_unretained *
+protocol_copyProtocolList(Protocol *p, unsigned int *outCount)
+{
+ unsigned int count = 0;
+ Protocol **result = nil;
+ old_protocol *proto = oldprotocol(p);
+
+ if (!proto) {
+ if (outCount) *outCount = 0;
+ return nil;
+ }
+
+ if (proto->protocol_list) {
+ count = (unsigned int)proto->protocol_list->count;
+ }
+ if (count > 0) {
+ unsigned int i;
+ result = (Protocol **)malloc((count+1) * sizeof(Protocol *));
+
+ for (i = 0; i < count; i++) {
+ result[i] = (Protocol *)proto->protocol_list->list[i];
+ }
+ result[i] = nil;
+ }
+
+ if (outCount) *outCount = count;
+ return result;
+}
+
+
+BOOL protocol_conformsToProtocol(Protocol *self_gen, Protocol *other_gen)
+{
+ old_protocol *self = oldprotocol(self_gen);
+ old_protocol *other = oldprotocol(other_gen);
+
+ if (!self || !other) {
+ return NO;
+ }
+
+ if (0 == strcmp(self->protocol_name, other->protocol_name)) {
+ return YES;
+ }
+
+ if (self->protocol_list) {
+ int i;
+ for (i = 0; i < self->protocol_list->count; i++) {
+ old_protocol *proto = self->protocol_list->list[i];
+ if (0 == strcmp(other->protocol_name, proto->protocol_name)) {
+ return YES;
+ }
+ if (protocol_conformsToProtocol((Protocol *)proto, other_gen)) {
+ return YES;
+ }
+ }
+ }
+
+ return NO;
+}
+
+
+BOOL protocol_isEqual(Protocol *self, Protocol *other)
+{
+ if (self == other) return YES;
+ if (!self || !other) return NO;
+
+ if (!protocol_conformsToProtocol(self, other)) return NO;
+ if (!protocol_conformsToProtocol(other, self)) return NO;
+
+ return YES;
+}
+
+
+/***********************************************************************
+* _protocol_getMethodTypeEncoding
+* Return the @encode string for the requested protocol method.
+* Returns nil if the compiler did not emit any extended @encode data.
+* Locking: runtimeLock must not be held by the caller
+**********************************************************************/
+const char *
+_protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel,
+ BOOL isRequiredMethod, BOOL isInstanceMethod)
+{
+ old_protocol *proto = oldprotocol(proto_gen);
+ if (!proto) return nil;
+ old_protocol_ext *ext = ext_for_protocol(proto);
+ if (!ext) return nil;
+ if (ext->size < offsetof(old_protocol_ext, extendedMethodTypes) + sizeof(ext->extendedMethodTypes)) return nil;
+ if (! ext->extendedMethodTypes) return nil;
+
+ struct objc_method_description *m =
+ lookup_protocol_method(proto, sel,
+ isRequiredMethod, isInstanceMethod, false);
+ if (!m) {
+ // No method with that name. Search incorporated protocols.
+ if (proto->protocol_list) {
+ for (int i = 0; i < proto->protocol_list->count; i++) {
+ const char *enc =
+ _protocol_getMethodTypeEncoding((Protocol *)proto->protocol_list->list[i], sel, isRequiredMethod, isInstanceMethod);
+ if (enc) return enc;
+ }
+ }
+ return nil;
+ }
+
+ int i = 0;
+ if (isRequiredMethod && isInstanceMethod) {
+ i += ((uintptr_t)m - (uintptr_t)proto->instance_methods) / sizeof(proto->instance_methods->list[0]);
+ goto done;
+ } else if (proto->instance_methods) {
+ i += proto->instance_methods->count;
+ }
+
+ if (isRequiredMethod && !isInstanceMethod) {
+ i += ((uintptr_t)m - (uintptr_t)proto->class_methods) / sizeof(proto->class_methods->list[0]);
+ goto done;
+ } else if (proto->class_methods) {
+ i += proto->class_methods->count;
+ }
+
+ if (!isRequiredMethod && isInstanceMethod) {
+ i += ((uintptr_t)m - (uintptr_t)ext->optional_instance_methods) / sizeof(ext->optional_instance_methods->list[0]);
+ goto done;
+ } else if (ext->optional_instance_methods) {
+ i += ext->optional_instance_methods->count;
+ }
+
+ if (!isRequiredMethod && !isInstanceMethod) {
+ i += ((uintptr_t)m - (uintptr_t)ext->optional_class_methods) / sizeof(ext->optional_class_methods->list[0]);
+ goto done;
+ } else if (ext->optional_class_methods) {
+ i += ext->optional_class_methods->count;
+ }
+
+ done:
+ return ext->extendedMethodTypes[i];
+}
+
+
+/***********************************************************************
+* objc_allocateProtocol
+* Creates a new protocol. The protocol may not be used until
+* objc_registerProtocol() is called.
+* Returns nil if a protocol with the same name already exists.
+* Locking: acquires classLock
+**********************************************************************/
+Protocol *
+objc_allocateProtocol(const char *name)
+{
+ Class cls = objc_getClass("__IncompleteProtocol");
+
+ mutex_lock(&classLock);
+
+ if (NXMapGet(protocol_map, name)) {
+ mutex_unlock(&classLock);
+ return nil;
+ }
+
+ old_protocol *result = (old_protocol *)
+ _calloc_internal(1, sizeof(old_protocol)
+ + sizeof(old_protocol_ext));
+ old_protocol_ext *ext = (old_protocol_ext *)(result+1);
+
+ result->isa = cls;
+ result->protocol_name = _strdup_internal(name);
+ ext->size = sizeof(old_protocol_ext);
+
+ // fixme reserve name without installing
+
+ NXMapInsert(protocol_ext_map, result, result+1);
+
+ mutex_unlock(&classLock);
+
+ return (Protocol *)result;
+}
+
+
+/***********************************************************************
+* objc_registerProtocol
+* Registers a newly-constructed protocol. The protocol is now
+* ready for use and immutable.
+* Locking: acquires classLock
+**********************************************************************/
+void objc_registerProtocol(Protocol *proto_gen)
+{
+ old_protocol *proto = oldprotocol(proto_gen);
+
+ Class oldcls = objc_getClass("__IncompleteProtocol");
+ Class cls = objc_getClass("Protocol");
+
+ mutex_lock(&classLock);
+
+ if (proto->isa == cls) {
+ _objc_inform("objc_registerProtocol: protocol '%s' was already "
+ "registered!", proto->protocol_name);
+ mutex_unlock(&classLock);
+ return;
+ }
+ if (proto->isa != oldcls) {
+ _objc_inform("objc_registerProtocol: protocol '%s' was not allocated "
+ "with objc_allocateProtocol!", proto->protocol_name);
+ mutex_unlock(&classLock);
+ return;
+ }
+
+ proto->isa = cls;
+
+ NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto);
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* protocol_addProtocol
+* Adds an incorporated protocol to another protocol.
+* No method enforcement is performed.
+* `proto` must be under construction. `addition` must not.
+* Locking: acquires classLock
+**********************************************************************/
+void
+protocol_addProtocol(Protocol *proto_gen, Protocol *addition_gen)
+{
+ old_protocol *proto = oldprotocol(proto_gen);
+ old_protocol *addition = oldprotocol(addition_gen);
+
+ Class cls = objc_getClass("__IncompleteProtocol");
+
+ if (!proto_gen) return;
+ if (!addition_gen) return;
+
+ mutex_lock(&classLock);
+
+ if (proto->isa != cls) {
+ _objc_inform("protocol_addProtocol: modified protocol '%s' is not "
+ "under construction!", proto->protocol_name);
+ mutex_unlock(&classLock);
+ return;
+ }
+ if (addition->isa == cls) {
+ _objc_inform("protocol_addProtocol: added protocol '%s' is still "
+ "under construction!", addition->protocol_name);
+ mutex_unlock(&classLock);
+ return;
+ }
+
+ old_protocol_list *protolist = proto->protocol_list;
+ if (protolist) {
+ size_t size = sizeof(old_protocol_list)
+ + protolist->count * sizeof(protolist->list[0]);
+ protolist = (old_protocol_list *)
+ _realloc_internal(protolist, size);
+ } else {
+ protolist = (old_protocol_list *)
+ _calloc_internal(1, sizeof(old_protocol_list));
+ }
+
+ protolist->list[protolist->count++] = addition;
+ proto->protocol_list = protolist;
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* protocol_addMethodDescription
+* Adds a method to a protocol. The protocol must be under construction.
+* Locking: acquires classLock
+**********************************************************************/
+static void
+_protocol_addMethod(struct objc_method_description_list **list, SEL name, const char *types)
+{
+ if (!*list) {
+ *list = (struct objc_method_description_list *)
+ _calloc_internal(sizeof(struct objc_method_description_list), 1);
+ } else {
+ size_t size = sizeof(struct objc_method_description_list)
+ + (*list)->count * sizeof(struct objc_method_description);
+ *list = (struct objc_method_description_list *)
+ _realloc_internal(*list, size);
+ }
+
+ struct objc_method_description *desc = &(*list)->list[(*list)->count++];
+ desc->name = name;
+ desc->types = _strdup_internal(types ?: "");
+}
+
+void
+protocol_addMethodDescription(Protocol *proto_gen, SEL name, const char *types,
+ BOOL isRequiredMethod, BOOL isInstanceMethod)
+{
+ old_protocol *proto = oldprotocol(proto_gen);
+
+ Class cls = objc_getClass("__IncompleteProtocol");
+
+ if (!proto_gen) return;
+
+ mutex_lock(&classLock);
+
+ if (proto->isa != cls) {
+ _objc_inform("protocol_addMethodDescription: protocol '%s' is not "
+ "under construction!", proto->protocol_name);
+ mutex_unlock(&classLock);
+ return;
+ }
+
+ if (isRequiredMethod && isInstanceMethod) {
+ _protocol_addMethod(&proto->instance_methods, name, types);
+ } else if (isRequiredMethod && !isInstanceMethod) {
+ _protocol_addMethod(&proto->class_methods, name, types);
+ } else if (!isRequiredMethod && isInstanceMethod) {
+ old_protocol_ext *ext = (old_protocol_ext *)(proto+1);
+ _protocol_addMethod(&ext->optional_instance_methods, name, types);
+ } else /* !isRequiredMethod && !isInstanceMethod) */ {
+ old_protocol_ext *ext = (old_protocol_ext *)(proto+1);
+ _protocol_addMethod(&ext->optional_class_methods, name, types);
+ }
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* protocol_addProperty
+* Adds a property to a protocol. The protocol must be under construction.
+* Locking: acquires classLock
+**********************************************************************/
+static void
+_protocol_addProperty(old_property_list **plist, const char *name,
+ const objc_property_attribute_t *attrs,
+ unsigned int count)
+{
+ if (!*plist) {
+ *plist = (old_property_list *)
+ _calloc_internal(sizeof(old_property_list), 1);
+ (*plist)->entsize = sizeof(old_property);
+ } else {
+ *plist = (old_property_list *)
+ _realloc_internal(*plist, sizeof(old_property_list)
+ + (*plist)->count * (*plist)->entsize);
+ }
+
+ old_property *prop = property_list_nth(*plist, (*plist)->count++);
+ prop->name = _strdup_internal(name);
+ prop->attributes = copyPropertyAttributeString(attrs, count);
+}
+
+void
+protocol_addProperty(Protocol *proto_gen, const char *name,
+ const objc_property_attribute_t *attrs,
+ unsigned int count,
+ BOOL isRequiredProperty, BOOL isInstanceProperty)
+{
+ old_protocol *proto = oldprotocol(proto_gen);
+
+ Class cls = objc_getClass("__IncompleteProtocol");
+
+ if (!proto) return;
+ if (!name) return;
+
+ mutex_lock(&classLock);
+
+ if (proto->isa != cls) {
+ _objc_inform("protocol_addProperty: protocol '%s' is not "
+ "under construction!", proto->protocol_name);
+ mutex_unlock(&classLock);
+ return;
+ }
+
+ old_protocol_ext *ext = ext_for_protocol(proto);
+
+ if (isRequiredProperty && isInstanceProperty) {
+ _protocol_addProperty(&ext->instance_properties, name, attrs, count);
+ }
+ //else if (isRequiredProperty && !isInstanceProperty) {
+ // _protocol_addProperty(&ext->class_properties, name, attrs, count);
+ //} else if (!isRequiredProperty && isInstanceProperty) {
+ // _protocol_addProperty(&ext->optional_instance_properties, name, attrs, count);
+ //} else /* !isRequiredProperty && !isInstanceProperty) */ {
+ // _protocol_addProperty(&ext->optional_class_properties, name, attrs, count);
+ //}
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* _objc_fixup_protocol_objects_for_image. For each protocol in the
+* specified image, selectorize the method names and add to the protocol hash.
+**********************************************************************/
+
+static BOOL versionIsExt(uintptr_t version, const char *names, size_t size)
+{
+ // CodeWarrior used isa field for string "Protocol"
+ // from section __OBJC,__class_names. rdar://4951638
+ // gcc (10.4 and earlier) used isa field for version number;
+ // the only version number used on Mac OS X was 2.
+ // gcc (10.5 and later) uses isa field for ext pointer
+
+ if (version < PAGE_SIZE) {
+ return NO;
+ }
+
+ if (version >= (uintptr_t)names && version < (uintptr_t)(names + size)) {
+ return NO;
+ }
+
+ return YES;
+}
+
+static void fix_protocol(old_protocol *proto, Class protocolClass,
+ BOOL isBundle, const char *names, size_t names_size)
+{
+ uintptr_t version;
+ if (!proto) return;
+
+ version = (uintptr_t)proto->isa;
+
+ // Set the protocol's isa
+ proto->isa = protocolClass;
+
+ // Fix up method lists
+ // fixme share across duplicates
+ map_method_descs (proto->instance_methods, isBundle);
+ map_method_descs (proto->class_methods, isBundle);
+
+ // Fix up ext, if any
+ if (versionIsExt(version, names, names_size)) {
+ old_protocol_ext *ext = (old_protocol_ext *)version;
+ NXMapInsert(protocol_ext_map, proto, ext);
+ map_method_descs (ext->optional_instance_methods, isBundle);
+ map_method_descs (ext->optional_class_methods, isBundle);
+ }
+
+ // Record the protocol it if we don't have one with this name yet
+ // fixme bundles - copy protocol
+ // fixme unloading
+ if (!NXMapGet(protocol_map, proto->protocol_name)) {
+ NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto);
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s",
+ proto, proto->protocol_name);
+ }
+ } else {
+ // duplicate - do nothing
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s (duplicate)",
+ proto, proto->protocol_name);
+ }
+ }
+}
+
+static void _objc_fixup_protocol_objects_for_image (header_info * hi)
+{
+ Class protocolClass = objc_getClass("Protocol");
+ size_t count, i;
+ old_protocol **protos;
+ int isBundle = headerIsBundle(hi);
+ const char *names;
+ size_t names_size;
+
+ mutex_lock(&classLock);
+
+ // Allocate the protocol registry if necessary.
+ if (!protocol_map) {
+ protocol_map =
+ NXCreateMapTableFromZone(NXStrValueMapPrototype, 32,
+ _objc_internal_zone());
+ }
+ if (!protocol_ext_map) {
+ protocol_ext_map =
+ NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32,
+ _objc_internal_zone());
+ }
+
+ protos = _getObjcProtocols(hi, &count);
+ names = _getObjcClassNames(hi, &names_size);
+ for (i = 0; i < count; i++) {
+ fix_protocol(protos[i], protocolClass, isBundle, names, names_size);
+ }
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* _objc_fixup_selector_refs. Register all of the selectors in each
+* image, and fix them all up.
+**********************************************************************/
+static void _objc_fixup_selector_refs (const header_info *hi)
+{
+ size_t count;
+ SEL *sels;
+
+ if (PrintPreopt) {
+ if (sel_preoptimizationValid(hi)) {
+ _objc_inform("PREOPTIMIZATION: honoring preoptimized selectors in %s",
+ hi->fname);
+ }
+ else if (_objcHeaderOptimizedByDyld(hi)) {
+ _objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors in %s",
+ hi->fname);
+ }
+ }
+
+ if (sel_preoptimizationValid(hi)) return;
+
+ sels = _getObjcSelectorRefs (hi, &count);
+
+ map_selrefs(sels, count, headerIsBundle(hi));
+}
+
+static inline BOOL _is_threaded() {
+#if TARGET_OS_WIN32
+ return YES;
+#else
+ return pthread_is_threaded_np() != 0;
+#endif
+}
+
+#if !TARGET_OS_WIN32
+/***********************************************************************
+* unmap_image
+* Process the given image which is about to be unmapped by dyld.
+* mh is mach_header instead of headerType because that's what
+* dyld_priv.h says even for 64-bit.
+**********************************************************************/
+void
+unmap_image(const struct mach_header *mh, intptr_t vmaddr_slide)
+{
+ recursive_mutex_lock(&loadMethodLock);
+ unmap_image_nolock(mh);
+ recursive_mutex_unlock(&loadMethodLock);
+}
+
+
+/***********************************************************************
+* map_images
+* Process the given images which are being mapped in by dyld.
+* Calls ABI-agnostic code after taking ABI-specific locks.
+**********************************************************************/
+const char *
+map_images(enum dyld_image_states state, uint32_t infoCount,
+ const struct dyld_image_info infoList[])
+{
+ const char *err;
+
+ recursive_mutex_lock(&loadMethodLock);
+ err = map_images_nolock(state, infoCount, infoList);
+ recursive_mutex_unlock(&loadMethodLock);
+
+ return err;
+}
+
+
+/***********************************************************************
+* load_images
+* Process +load in the given images which are being mapped in by dyld.
+* Calls ABI-agnostic code after taking ABI-specific locks.
+*
+* Locking: acquires classLock and loadMethodLock
+**********************************************************************/
+const char *
+load_images(enum dyld_image_states state, uint32_t infoCount,
+ const struct dyld_image_info infoList[])
+{
+ BOOL found;
+
+ recursive_mutex_lock(&loadMethodLock);
+
+ // Discover +load methods
+ found = load_images_nolock(state, infoCount, infoList);
+
+ // Call +load methods (without classLock - re-entrant)
+ if (found) {
+ call_load_methods();
+ }
+
+ recursive_mutex_unlock(&loadMethodLock);
+
+ return nil;
+}
+#endif
+
+
+/***********************************************************************
+* _read_images
+* Perform metadata processing for hCount images starting with firstNewHeader
+**********************************************************************/
+void _read_images(header_info **hList, uint32_t hCount)
+{
+ uint32_t i;
+ BOOL categoriesLoaded = NO;
+
+ if (!class_hash) _objc_init_class_hash();
+
+ // Parts of this order are important for correctness or performance.
+
+ // Read classes from all images.
+ for (i = 0; i < hCount; i++) {
+ _objc_read_classes_from_image(hList[i]);
+ }
+
+ // Read categories from all images.
+ // But not if any other threads are running - they might
+ // call a category method before the fixups below are complete.
+ if (!_is_threaded()) {
+ BOOL needFlush = NO;
+ for (i = 0; i < hCount; i++) {
+ needFlush |= _objc_read_categories_from_image(hList[i]);
+ }
+ if (needFlush) flush_marked_caches();
+ categoriesLoaded = YES;
+ }
+
+ // Connect classes from all images.
+ for (i = 0; i < hCount; i++) {
+ _objc_connect_classes_from_image(hList[i]);
+ }
+
+ // Fix up class refs, selector refs, and protocol objects from all images.
+ for (i = 0; i < hCount; i++) {
+ _objc_map_class_refs_for_image(hList[i]);
+ _objc_fixup_selector_refs(hList[i]);
+ _objc_fixup_protocol_objects_for_image(hList[i]);
+ }
+
+ // Read categories from all images.
+ // But not if this is the only thread - it's more
+ // efficient to attach categories earlier if safe.
+ if (!categoriesLoaded) {
+ BOOL needFlush = NO;
+ for (i = 0; i < hCount; i++) {
+ needFlush |= _objc_read_categories_from_image(hList[i]);
+ }
+ if (needFlush) flush_marked_caches();
+ }
+
+ // Multi-threaded category load MUST BE LAST to avoid a race.
+}
+
+
+/***********************************************************************
+* prepare_load_methods
+* Schedule +load for classes in this image, any un-+load-ed
+* superclasses in other images, and any categories in this image.
+**********************************************************************/
+// Recursively schedule +load for cls and any un-+load-ed superclasses.
+// cls must already be connected.
+static void schedule_class_load(Class cls)
+{
+ if (cls->info & CLS_LOADED) return;
+ if (cls->superclass) schedule_class_load(cls->superclass);
+ add_class_to_loadable_list(cls);
+ cls->info |= CLS_LOADED;
+}
+
+void prepare_load_methods(header_info *hi)
+{
+ Module mods;
+ unsigned int midx;
+
+
+ if (_objcHeaderIsReplacement(hi)) {
+ // Ignore any classes in this image
+ return;
+ }
+
+ // Major loop - process all modules in the image
+ mods = hi->mod_ptr;
+ for (midx = 0; midx < hi->mod_count; midx += 1)
+ {
+ unsigned int index;
+
+ // Skip module containing no classes
+ if (mods[midx].symtab == nil)
+ continue;
+
+ // Minor loop - process all the classes in given module
+ for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
+ {
+ // Locate the class description pointer
+ Class cls = (Class)mods[midx].symtab->defs[index];
+ if (cls->info & CLS_CONNECTED) {
+ schedule_class_load(cls);
+ }
+ }
+ }
+
+
+ // Major loop - process all modules in the header
+ mods = hi->mod_ptr;
+
+ // NOTE: The module and category lists are traversed backwards
+ // to preserve the pre-10.4 processing order. Changing the order
+ // would have a small chance of introducing binary compatibility bugs.
+ midx = (unsigned int)hi->mod_count;
+ while (midx-- > 0) {
+ unsigned int index;
+ unsigned int total;
+ Symtab symtab = mods[midx].symtab;
+
+ // Nothing to do for a module without a symbol table
+ if (mods[midx].symtab == nil)
+ continue;
+ // Total entries in symbol table (class entries followed
+ // by category entries)
+ total = mods[midx].symtab->cls_def_cnt +
+ mods[midx].symtab->cat_def_cnt;
+
+ // Minor loop - register all categories from given module
+ index = total;
+ while (index-- > mods[midx].symtab->cls_def_cnt) {
+ old_category *cat = (old_category *)symtab->defs[index];
+ add_category_to_loadable_list((Category)cat);
+ }
+ }
+}
+
+
+#if TARGET_OS_WIN32
+
+void unload_class(Class cls)
+{
+}
+
+#else
+
+/***********************************************************************
+* _objc_remove_classes_in_image
+* Remove all classes in the given image from the runtime, because
+* the image is about to be unloaded.
+* Things to clean up:
+* class_hash
+* unconnected_class_hash
+* pending subclasses list (only if class is still unconnected)
+* loadable class list
+* class's method caches
+* class refs in all other images
+**********************************************************************/
+// Re-pend any class references in refs that point into [start..end)
+static void rependClassReferences(Class *refs, size_t count,
+ uintptr_t start, uintptr_t end)
+{
+ size_t i;
+
+ if (!refs) return;
+
+ // Process each class ref
+ for (i = 0; i < count; i++) {
+ if ((uintptr_t)(refs[i]) >= start && (uintptr_t)(refs[i]) < end) {
+ pendClassReference(&refs[i], refs[i]->name,
+ (refs[i]->info & CLS_META) ? YES : NO);
+ refs[i] = nil;
+ }
+ }
+}
+
+
+void try_free(const void *p)
+{
+ if (p && malloc_size(p)) free((void *)p);
+}
+
+// Deallocate all memory in a method list
+static void unload_mlist(old_method_list *mlist)
+{
+ int i;
+ for (i = 0; i < mlist->method_count; i++) {
+ try_free(mlist->method_list[i].method_types);
+ }
+ try_free(mlist);
+}
+
+static void unload_property_list(old_property_list *proplist)
+{
+ uint32_t i;
+
+ if (!proplist) return;
+
+ for (i = 0; i < proplist->count; i++) {
+ old_property *prop = property_list_nth(proplist, i);
+ try_free(prop->name);
+ try_free(prop->attributes);
+ }
+ try_free(proplist);
+}
+
+
+// Deallocate all memory in a class.
+void unload_class(Class cls)
+{
+ // Free method cache
+ // This dereferences the cache contents; do this before freeing methods
+ if (cls->cache && cls->cache != &_objc_empty_cache) {
+ _cache_free(cls->cache);
+ }
+
+ // Free ivar lists
+ if (cls->ivars) {
+ int i;
+ for (i = 0; i < cls->ivars->ivar_count; i++) {
+ try_free(cls->ivars->ivar_list[i].ivar_name);
+ try_free(cls->ivars->ivar_list[i].ivar_type);
+ }
+ try_free(cls->ivars);
+ }
+
+ // Free fixed-up method lists and method list array
+ if (cls->methodLists) {
+ // more than zero method lists
+ if (cls->info & CLS_NO_METHOD_ARRAY) {
+ // one method list
+ unload_mlist((old_method_list *)cls->methodLists);
+ }
+ else {
+ // more than one method list
+ old_method_list **mlistp;
+ for (mlistp = cls->methodLists;
+ *mlistp != nil && *mlistp != END_OF_METHODS_LIST;
+ mlistp++)
+ {
+ unload_mlist(*mlistp);
+ }
+ free(cls->methodLists);
+ }
+ }
+
+ // Free protocol list
+ old_protocol_list *protos = cls->protocols;
+ while (protos) {
+ old_protocol_list *dead = protos;
+ protos = protos->next;
+ try_free(dead);
+ }
+
+ if ((cls->info & CLS_EXT)) {
+ if (cls->ext) {
+ // Free property lists and property list array
+ if (cls->ext->propertyLists) {
+ // more than zero property lists
+ if (cls->info & CLS_NO_PROPERTY_ARRAY) {
+ // one property list
+ old_property_list *proplist =
+ (old_property_list *)cls->ext->propertyLists;
+ unload_property_list(proplist);
+ } else {
+ // more than one property list
+ old_property_list **plistp;
+ for (plistp = cls->ext->propertyLists;
+ *plistp != nil;
+ plistp++)
+ {
+ unload_property_list(*plistp);
+ }
+ try_free(cls->ext->propertyLists);
+ }
+ }
+
+ // Free weak ivar layout
+ try_free(cls->ext->weak_ivar_layout);
+
+ // Free ext
+ try_free(cls->ext);
+ }
+
+ // Free non-weak ivar layout
+ try_free(cls->ivar_layout);
+ }
+
+ // Free class name
+ try_free(cls->name);
+
+ // Free cls
+ try_free(cls);
+}
+
+
+static void _objc_remove_classes_in_image(header_info *hi)
+{
+ unsigned int index;
+ unsigned int midx;
+ Module mods;
+
+ mutex_lock(&classLock);
+
+ // Major loop - process all modules in the image
+ mods = hi->mod_ptr;
+ for (midx = 0; midx < hi->mod_count; midx += 1)
+ {
+ // Skip module containing no classes
+ if (mods[midx].symtab == nil)
+ continue;
+
+ // Minor loop - process all the classes in given module
+ for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
+ {
+ Class cls;
+
+ // Locate the class description pointer
+ cls = (Class)mods[midx].symtab->defs[index];
+
+ // Remove from loadable class list, if present
+ remove_class_from_loadable_list(cls);
+
+ // Remove from unconnected_class_hash and pending subclasses
+ if (unconnected_class_hash && NXHashMember(unconnected_class_hash, cls)) {
+ NXHashRemove(unconnected_class_hash, cls);
+ if (pendingSubclassesMap) {
+ // Find this class in its superclass's pending list
+ char *supercls_name = (char *)cls->superclass;
+ PendingSubclass *pending = (PendingSubclass *)
+ NXMapGet(pendingSubclassesMap, supercls_name);
+ for ( ; pending != nil; pending = pending->next) {
+ if (pending->subclass == cls) {
+ pending->subclass = Nil;
+ break;
+ }
+ }
+ }
+ }
+
+ // Remove from class_hash
+ NXHashRemove(class_hash, cls);
+ objc_removeRegisteredClass(cls);
+
+ // Free heap memory pointed to by the class
+ unload_class(cls->ISA());
+ unload_class(cls);
+ }
+ }
+
+
+ // Search all other images for class refs that point back to this range.
+ // Un-fix and re-pend any such class refs.
+
+ // Get the location of the dying image's __OBJC segment
+ uintptr_t seg;
+ unsigned long seg_size;
+ seg = (uintptr_t)getsegmentdata(hi->mhdr, "__OBJC", &seg_size);
+
+ header_info *other_hi;
+ for (other_hi = FirstHeader; other_hi != nil; other_hi = other_hi->next) {
+ Class *other_refs;
+ size_t count;
+ if (other_hi == hi) continue; // skip the image being unloaded
+
+ // Fix class refs in the other image
+ other_refs = _getObjcClassRefs(other_hi, &count);
+ rependClassReferences(other_refs, count, seg, seg+seg_size);
+ }
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* _objc_remove_categories_in_image
+* Remove all categories in the given image from the runtime, because
+* the image is about to be unloaded.
+* Things to clean up:
+* unresolved category list
+* loadable category list
+**********************************************************************/
+static void _objc_remove_categories_in_image(header_info *hi)
+{
+ Module mods;
+ unsigned int midx;
+
+ // Major loop - process all modules in the header
+ mods = hi->mod_ptr;
+
+ for (midx = 0; midx < hi->mod_count; midx++) {
+ unsigned int index;
+ unsigned int total;
+ Symtab symtab = mods[midx].symtab;
+
+ // Nothing to do for a module without a symbol table
+ if (symtab == nil) continue;
+
+ // Total entries in symbol table (class entries followed
+ // by category entries)
+ total = symtab->cls_def_cnt + symtab->cat_def_cnt;
+
+ // Minor loop - check all categories from given module
+ for (index = symtab->cls_def_cnt; index < total; index++) {
+ old_category *cat = (old_category *)symtab->defs[index];
+
+ // Clean up loadable category list
+ remove_category_from_loadable_list((Category)cat);
+
+ // Clean up category_hash
+ if (category_hash) {
+ _objc_unresolved_category *cat_entry = (_objc_unresolved_category *)NXMapGet(category_hash, cat->class_name);
+ for ( ; cat_entry != nil; cat_entry = cat_entry->next) {
+ if (cat_entry->cat == cat) {
+ cat_entry->cat = nil;
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+
+/***********************************************************************
+* unload_paranoia
+* Various paranoid debugging checks that look for poorly-behaving
+* unloadable bundles.
+* Called by _objc_unmap_image when OBJC_UNLOAD_DEBUG is set.
+**********************************************************************/
+static void unload_paranoia(header_info *hi)
+{
+ // Get the location of the dying image's __OBJC segment
+ uintptr_t seg;
+ unsigned long seg_size;
+ seg = (uintptr_t)getsegmentdata(hi->mhdr, "__OBJC", &seg_size);
+
+ _objc_inform("UNLOAD DEBUG: unloading image '%s' [%p..%p]",
+ hi->fname, (void *)seg, (void*)(seg+seg_size));
+
+ mutex_lock(&classLock);
+
+ // Make sure the image contains no categories on surviving classes.
+ {
+ Module mods;
+ unsigned int midx;
+
+ // Major loop - process all modules in the header
+ mods = hi->mod_ptr;
+
+ for (midx = 0; midx < hi->mod_count; midx++) {
+ unsigned int index;
+ unsigned int total;
+ Symtab symtab = mods[midx].symtab;
+
+ // Nothing to do for a module without a symbol table
+ if (symtab == nil) continue;
+
+ // Total entries in symbol table (class entries followed
+ // by category entries)
+ total = symtab->cls_def_cnt + symtab->cat_def_cnt;
+
+ // Minor loop - check all categories from given module
+ for (index = symtab->cls_def_cnt; index < total; index++) {
+ old_category *cat = (old_category *)symtab->defs[index];
+ struct objc_class query;
+
+ query.name = cat->class_name;
+ if (NXHashMember(class_hash, &query)) {
+ _objc_inform("UNLOAD DEBUG: dying image contains category '%s(%s)' on surviving class '%s'!", cat->class_name, cat->category_name, cat->class_name);
+ }
+ }
+ }
+ }
+
+ // Make sure no surviving class is in the dying image.
+ // Make sure no surviving class has a superclass in the dying image.
+ // fixme check method implementations too
+ {
+ Class cls;
+ NXHashState state;
+
+ state = NXInitHashState(class_hash);
+ while (NXNextHashState(class_hash, &state, (void **)&cls)) {
+ if ((vm_address_t)cls >= seg &&
+ (vm_address_t)cls < seg+seg_size)
+ {
+ _objc_inform("UNLOAD DEBUG: dying image contains surviving class '%s'!", cls->name);
+ }
+
+ if ((vm_address_t)cls->superclass >= seg &&
+ (vm_address_t)cls->superclass < seg+seg_size)
+ {
+ _objc_inform("UNLOAD DEBUG: dying image contains superclass '%s' of surviving class '%s'!", cls->superclass->name, cls->name);
+ }
+ }
+ }
+
+ mutex_unlock(&classLock);
+}
+
+
+/***********************************************************************
+* _unload_image
+* Only handles MH_BUNDLE for now.
+* Locking: loadMethodLock acquired by unmap_image
+**********************************************************************/
+void _unload_image(header_info *hi)
+{
+ recursive_mutex_assert_locked(&loadMethodLock);
+
+ // Cleanup:
+ // Remove image's classes from the class list and free auxiliary data.
+ // Remove image's unresolved or loadable categories and free auxiliary data
+ // Remove image's unresolved class refs.
+ _objc_remove_classes_in_image(hi);
+ _objc_remove_categories_in_image(hi);
+ _objc_remove_pending_class_refs_in_image(hi);
+
+ // Perform various debugging checks if requested.
+ if (DebugUnload) unload_paranoia(hi);
+}
+
+#endif
+
+
+/***********************************************************************
+* objc_addClass. Add the specified class to the table of known classes,
+* after doing a little verification and fixup.
+**********************************************************************/
+void objc_addClass (Class cls)
+{
+ OBJC_WARN_DEPRECATED;
+
+ // Synchronize access to hash table
+ mutex_lock (&classLock);
+
+ // Make sure both the class and the metaclass have caches!
+ // Clear all bits of the info fields except CLS_CLASS and CLS_META.
+ // Normally these bits are already clear but if someone tries to cons
+ // up their own class on the fly they might need to be cleared.
+ if (cls->cache == nil) {
+ cls->cache = (Cache) &_objc_empty_cache;
+ cls->info = CLS_CLASS;
+ }
+
+ if (cls->ISA()->cache == nil) {
+ cls->ISA()->cache = (Cache) &_objc_empty_cache;
+ cls->ISA()->info = CLS_META;
+ }
+
+ // methodLists should be:
+ // 1. nil (Tiger and later only)
+ // 2. A -1 terminated method list array
+ // In either case, CLS_NO_METHOD_ARRAY remains clear.
+ // If the user manipulates the method list directly,
+ // they must use the magic private format.
+
+ // Add the class to the table
+ (void) NXHashInsert (class_hash, cls);
+ objc_addRegisteredClass(cls);
+
+ // Superclass is no longer a leaf for cache flushing
+ if (cls->superclass && (cls->superclass->info & CLS_LEAF)) {
+ cls->superclass->clearInfo(CLS_LEAF);
+ cls->superclass->ISA()->clearInfo(CLS_LEAF);
+ }
+
+ // Desynchronize
+ mutex_unlock (&classLock);
+}
+
+/***********************************************************************
+* _objcTweakMethodListPointerForClass.
+* Change the class's method list pointer to a method list array.
+* Does nothing if the method list pointer is already a method list array.
+* If the class is currently in use, methodListLock must be held by the caller.
+**********************************************************************/
+static void _objcTweakMethodListPointerForClass(Class cls)
+{
+ old_method_list * originalList;
+ const int initialEntries = 4;
+ size_t mallocSize;
+ old_method_list ** ptr;
+
+ // Do nothing if methodLists is already an array.
+ if (cls->methodLists && !(cls->info & CLS_NO_METHOD_ARRAY)) return;
+
+ // Remember existing list
+ originalList = (old_method_list *) cls->methodLists;
+
+ // Allocate and zero a method list array
+ mallocSize = sizeof(old_method_list *) * initialEntries;
+ ptr = (old_method_list **) _calloc_internal(1, mallocSize);
+
+ // Insert the existing list into the array
+ ptr[initialEntries - 1] = END_OF_METHODS_LIST;
+ ptr[0] = originalList;
+
+ // Replace existing list with array
+ cls->methodLists = ptr;
+ cls->clearInfo(CLS_NO_METHOD_ARRAY);
+}
+
+
+/***********************************************************************
+* _objc_insertMethods.
+* Adds methods to a class.
+* Does not flush any method caches.
+* Does not take any locks.
+* If the class is already in use, use class_addMethods() instead.
+**********************************************************************/
+void _objc_insertMethods(Class cls, old_method_list *mlist, old_category *cat)
+{
+ old_method_list ***list;
+ old_method_list **ptr;
+ ptrdiff_t endIndex;
+ size_t oldSize;
+ size_t newSize;
+
+ if (!cls->methodLists) {
+ // cls has no methods - simply use this method list
+ cls->methodLists = (old_method_list **)mlist;
+ cls->setInfo(CLS_NO_METHOD_ARRAY);
+ return;
+ }
+
+ // Log any existing methods being replaced
+ if (PrintReplacedMethods) {
+ int i;
+ for (i = 0; i < mlist->method_count; i++) {
+ extern IMP findIMPInClass(Class cls, SEL sel);
+ SEL sel = sel_registerName((char *)mlist->method_list[i].method_name);
+ IMP newImp = mlist->method_list[i].method_imp;
+ IMP oldImp;
+
+ if ((oldImp = findIMPInClass(cls, sel))) {
+ logReplacedMethod(cls->name, sel, ISMETA(cls),
+ cat ? cat->category_name : nil,
+ oldImp, newImp);
+ }
+ }
+ }
+
+ // Create method list array if necessary
+ _objcTweakMethodListPointerForClass(cls);
+
+ list = &cls->methodLists;
+
+ // Locate unused entry for insertion point
+ ptr = *list;
+ while ((*ptr != 0) && (*ptr != END_OF_METHODS_LIST))
+ ptr += 1;
+
+ // If array is full, add to it
+ if (*ptr == END_OF_METHODS_LIST)
+ {
+ // Calculate old and new dimensions
+ endIndex = ptr - *list;
+ oldSize = (endIndex + 1) * sizeof(void *);
+ newSize = oldSize + sizeof(old_method_list *); // only increase by 1
+
+ // Grow the method list array by one.
+ // This block may be from user code; don't use _realloc_internal
+ *list = (old_method_list **)realloc(*list, newSize);
+
+ // Zero out addition part of new array
+ bzero (&((*list)[endIndex]), newSize - oldSize);
+
+ // Place new end marker
+ (*list)[(newSize/sizeof(void *)) - 1] = END_OF_METHODS_LIST;
+
+ // Insertion point corresponds to old array end
+ ptr = &((*list)[endIndex]);
+ }
+
+ // Right shift existing entries by one
+ bcopy (*list, (*list) + 1, (uint8_t *)ptr - (uint8_t *)*list);
+
+ // Insert at method list at beginning of array
+ **list = mlist;
+}
+
+/***********************************************************************
+* _objc_removeMethods.
+* Remove methods from a class.
+* Does not take any locks.
+* Does not flush any method caches.
+* If the class is currently in use, use class_removeMethods() instead.
+**********************************************************************/
+void _objc_removeMethods(Class cls, old_method_list *mlist)
+{
+ old_method_list ***list;
+ old_method_list **ptr;
+
+ if (cls->methodLists == nil) {
+ // cls has no methods
+ return;
+ }
+ if (cls->methodLists == (old_method_list **)mlist) {
+ // mlist is the class's only method list - erase it
+ cls->methodLists = nil;
+ return;
+ }
+ if (cls->info & CLS_NO_METHOD_ARRAY) {
+ // cls has only one method list, and this isn't it - do nothing
+ return;
+ }
+
+ // cls has a method list array - search it
+
+ list = &cls->methodLists;
+
+ // Locate list in the array
+ ptr = *list;
+ while (*ptr != mlist) {
+ // fix for radar # 2538790
+ if ( *ptr == END_OF_METHODS_LIST ) return;
+ ptr += 1;
+ }
+
+ // Remove this entry
+ *ptr = 0;
+
+ // Left shift the following entries
+ while (*(++ptr) != END_OF_METHODS_LIST)
+ *(ptr-1) = *ptr;
+ *(ptr-1) = 0;
+}
+
+/***********************************************************************
+* _objc_add_category. Install the specified category's methods and
+* protocols into the class it augments.
+* The class is assumed not to be in use yet: no locks are taken and
+* no method caches are flushed.
+**********************************************************************/
+static inline void _objc_add_category(Class cls, old_category *category, int version)
+{
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: attaching category '%s (%s)'", cls->name, category->category_name);
+ }
+
+ // Augment instance methods
+ if (category->instance_methods)
+ _objc_insertMethods (cls, category->instance_methods, category);
+
+ // Augment class methods
+ if (category->class_methods)
+ _objc_insertMethods (cls->ISA(), category->class_methods, category);
+
+ // Augment protocols
+ if ((version >= 5) && category->protocols)
+ {
+ if (cls->ISA()->version >= 5)
+ {
+ category->protocols->next = cls->protocols;
+ cls->protocols = category->protocols;
+ cls->ISA()->protocols = category->protocols;
+ }
+ else
+ {
+ _objc_inform ("unable to add protocols from category %s...\n", category->category_name);
+ _objc_inform ("class `%s' must be recompiled\n", category->class_name);
+ }
+ }
+
+ // Augment properties
+ if (version >= 7 && category->instance_properties) {
+ if (cls->ISA()->version >= 6) {
+ _class_addProperties(cls, category->instance_properties);
+ } else {
+ _objc_inform ("unable to add properties from category %s...\n", category->category_name);
+ _objc_inform ("class `%s' must be recompiled\n", category->class_name);
+ }
+ }
+}
+
+/***********************************************************************
+* _objc_add_category_flush_caches. Install the specified category's
+* methods into the class it augments, and flush the class' method cache.
+* Return YES if some method caches now need to be flushed.
+**********************************************************************/
+static BOOL _objc_add_category_flush_caches(Class cls, old_category *category, int version)
+{
+ BOOL needFlush = NO;
+
+ // Install the category's methods into its intended class
+ mutex_lock(&methodListLock);
+ _objc_add_category (cls, category, version);
+ mutex_unlock(&methodListLock);
+
+ // Queue for cache flushing so category's methods can get called
+ if (category->instance_methods) {
+ cls->setInfo(CLS_FLUSH_CACHE);
+ needFlush = YES;
+ }
+ if (category->class_methods) {
+ cls->ISA()->setInfo(CLS_FLUSH_CACHE);
+ needFlush = YES;
+ }
+
+ return needFlush;
+}
+
+
+/***********************************************************************
+* reverse_cat
+* Reverse the given linked list of pending categories.
+* The pending category list is built backwards, and needs to be
+* reversed before actually attaching the categories to a class.
+* Returns the head of the new linked list.
+**********************************************************************/
+static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat)
+{
+ _objc_unresolved_category *prev;
+ _objc_unresolved_category *cur;
+ _objc_unresolved_category *ahead;
+
+ if (!cat) return nil;
+
+ prev = nil;
+ cur = cat;
+ ahead = cat->next;
+
+ while (cur) {
+ ahead = cur->next;
+ cur->next = prev;
+ prev = cur;
+ cur = ahead;
+ }
+
+ return prev;
+}
+
+
+/***********************************************************************
+* resolve_categories_for_class.
+* Install all existing categories intended for the specified class.
+* cls must be a true class and not a metaclass.
+**********************************************************************/
+static void resolve_categories_for_class(Class cls)
+{
+ _objc_unresolved_category * pending;
+ _objc_unresolved_category * next;
+
+ // Nothing to do if there are no categories at all
+ if (!category_hash) return;
+
+ // Locate and remove first element in category list
+ // associated with this class
+ pending = (_objc_unresolved_category *)
+ NXMapKeyFreeingRemove (category_hash, cls->name);
+
+ // Traverse the list of categories, if any, registered for this class
+
+ // The pending list is built backwards. Reverse it and walk forwards.
+ pending = reverse_cat(pending);
+
+ while (pending) {
+ if (pending->cat) {
+ // Install the category
+ // use the non-flush-cache version since we are only
+ // called from the class intialization code
+ _objc_add_category(cls, pending->cat, (int)pending->version);
+ }
+
+ // Delink and reclaim this registration
+ next = pending->next;
+ _free_internal(pending);
+ pending = next;
+ }
+}
+
+
+/***********************************************************************
+* _objc_resolve_categories_for_class.
+* Public version of resolve_categories_for_class. This was
+* exported pre-10.4 for Omni et al. to workaround a problem
+* with too-lazy category attachment.
+* cls should be a class, but this function can also cope with metaclasses.
+**********************************************************************/
+void _objc_resolve_categories_for_class(Class cls)
+{
+ // If cls is a metaclass, get the class.
+ // resolve_categories_for_class() requires a real class to work correctly.
+ if (ISMETA(cls)) {
+ if (strncmp(cls->name, "_%", 2) == 0) {
+ // Posee's meta's name is smashed and isn't in the class_hash,
+ // so objc_getClass doesn't work.
+ const char *baseName = strchr(cls->name, '%'); // get posee's real name
+ cls = objc_getClass(baseName);
+ } else {
+ cls = objc_getClass(cls->name);
+ }
+ }
+
+ resolve_categories_for_class(cls);
+}
+
+
+/***********************************************************************
+* _objc_register_category.
+* Process a category read from an image.
+* If the category's class exists, attach the category immediately.
+* Classes that need cache flushing are marked but not flushed.
+* If the category's class does not exist yet, pend the category for
+* later attachment. Pending categories are attached in the order
+* they were discovered.
+* Returns YES if some method caches now need to be flushed.
+**********************************************************************/
+static BOOL _objc_register_category(old_category *cat, int version)
+{
+ _objc_unresolved_category * new_cat;
+ _objc_unresolved_category * old;
+ Class theClass;
+
+ // If the category's class exists, attach the category.
+ if ((theClass = objc_lookUpClass(cat->class_name))) {
+ return _objc_add_category_flush_caches(theClass, cat, version);
+ }
+
+ // If the category's class exists but is unconnected,
+ // then attach the category to the class but don't bother
+ // flushing any method caches (because they must be empty).
+ // YES unconnected, NO class_handler
+ if ((theClass = look_up_class(cat->class_name, YES, NO))) {
+ _objc_add_category(theClass, cat, version);
+ return NO;
+ }
+
+
+ // Category's class does not exist yet.
+ // Save the category for later attachment.
+
+ if (PrintConnecting) {
+ _objc_inform("CONNECT: pending category '%s (%s)'", cat->class_name, cat->category_name);
+ }
+
+ // Create category lookup table if needed
+ if (!category_hash)
+ category_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
+ 128,
+ _objc_internal_zone ());
+
+ // Locate an existing list of categories, if any, for the class.
+ old = (_objc_unresolved_category *)
+ NXMapGet (category_hash, cat->class_name);
+
+ // Register the category to be fixed up later.
+ // The category list is built backwards, and is reversed again
+ // by resolve_categories_for_class().
+ new_cat = (_objc_unresolved_category *)
+ _malloc_internal(sizeof(_objc_unresolved_category));
+ new_cat->next = old;
+ new_cat->cat = cat;
+ new_cat->version = version;
+ (void) NXMapKeyCopyingInsert (category_hash, cat->class_name, new_cat);
+
+ return NO;
+}
+
+
+const char **
+_objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount)
+{
+ Module mods;
+ unsigned int m;
+ const char **list;
+ int count;
+ int allocated;
+
+ list = nil;
+ count = 0;
+ allocated = 0;
+
+ mods = hi->mod_ptr;
+ for (m = 0; m < hi->mod_count; m++) {
+ int d;
+
+ if (!mods[m].symtab) continue;
+
+ for (d = 0; d < mods[m].symtab->cls_def_cnt; d++) {
+ Class cls = (Class)mods[m].symtab->defs[d];
+ // fixme what about future-ified classes?
+ if (cls->isConnected()) {
+ if (count == allocated) {
+ allocated = allocated*2 + 16;
+ list = (const char **)
+ realloc((void *)list, allocated * sizeof(char *));
+ }
+ list[count++] = cls->name;
+ }
+ }
+ }
+
+ if (count > 0) {
+ // nil-terminate non-empty list
+ if (count == allocated) {
+ allocated = allocated+1;
+ list = (const char **)
+ realloc((void *)list, allocated * sizeof(char *));
+ }
+ list[count] = nil;
+ }
+
+ if (outCount) *outCount = count;
+ return list;
+}
+
+Class gdb_class_getClass(Class cls)
+{
+ const char *className = cls->name;
+ if(!className || !strlen(className)) return Nil;
+ Class rCls = look_up_class(className, NO, NO);
+ return rCls;
+
+}
+
+Class gdb_object_getClass(id obj)
+{
+ if (!obj) return nil;
+ return gdb_class_getClass(obj->getIsa());
+}
+
+
+/***********************************************************************
+* Lock management
+**********************************************************************/
+rwlock_t selLock = {};
+mutex_t classLock = MUTEX_INITIALIZER;
+mutex_t methodListLock = MUTEX_INITIALIZER;
+mutex_t cacheUpdateLock = MUTEX_INITIALIZER;
+recursive_mutex_t loadMethodLock = RECURSIVE_MUTEX_INITIALIZER;
+
+void lock_init(void)
+{
+ rwlock_init(&selLock);
+ recursive_mutex_init(&loadMethodLock);
+}
+
+
+#endif
**********************************************************************/
// Settings from environment variables
-#if SUPPORT_ENVIRON
-int PrintImages = -1; // env OBJC_PRINT_IMAGES
-int PrintLoading = -1; // env OBJC_PRINT_LOAD_METHODS
-int PrintInitializing = -1; // env OBJC_PRINT_INITIALIZE_METHODS
-int PrintResolving = -1; // env OBJC_PRINT_RESOLVED_METHODS
-int PrintConnecting = -1; // env OBJC_PRINT_CLASS_SETUP
-int PrintProtocols = -1; // env OBJC_PRINT_PROTOCOL_SETUP
-int PrintIvars = -1; // env OBJC_PRINT_IVAR_SETUP
-int PrintVtables = -1; // env OBJC_PRINT_VTABLE_SETUP
-int PrintVtableImages = -1;//env OBJC_PRINT_VTABLE_IMAGES
-int PrintFuture = -1; // env OBJC_PRINT_FUTURE_CLASSES
-int PrintGC = -1; // env OBJC_PRINT_GC
-int PrintPreopt = -1; // env OBJC_PRINT_PREOPTIMIZATION
-int PrintCxxCtors = -1; // env OBJC_PRINT_CXX_CTORS
-int PrintExceptions = -1; // env OBJC_PRINT_EXCEPTIONS
-int PrintExceptionThrow = -1; // env OBJC_PRINT_EXCEPTION_THROW
-int PrintAltHandlers = -1; // env OBJC_PRINT_ALT_HANDLERS
-int PrintDeprecation = -1;// env OBJC_PRINT_DEPRECATION_WARNINGS
-int PrintReplacedMethods = -1; // env OBJC_PRINT_REPLACED_METHODS
-int PrintCaches = -1; // env OBJC_PRINT_CACHE_SETUP
-int PrintPoolHiwat = -1; // env OBJC_PRINT_POOL_HIGHWATER
-int PrintCustomRR = -1; // env OBJC_PRINT_CUSTOM_RR
-int PrintCustomAWZ = -1; // env OBJC_PRINT_CUSTOM_AWZ
-
-int UseInternalZone = -1; // env OBJC_USE_INTERNAL_ZONE
-
-int DebugUnload = -1; // env OBJC_DEBUG_UNLOAD
-int DebugFragileSuperclasses = -1; // env OBJC_DEBUG_FRAGILE_SUPERCLASSES
-int DebugNilSync = -1; // env OBJC_DEBUG_NIL_SYNC
-int DebugNonFragileIvars = -1; // env OBJC_DEBUG_NONFRAGILE_IVARS
-int DebugAltHandlers = -1;// env OBJC_DEBUG_ALT_HANDLERS
-
-int DisableGC = -1; // env OBJC_DISABLE_GC
-int DisableVtables = -1; // env OBJC_DISABLE_VTABLES
-int DisablePreopt = -1; // env OBJC_DISABLE_PREOPTIMIZATION
-int DebugFinalizers = -1; // env OBJC_DEBUG_FINALIZERS
-#endif
+#define OPTION(var, env, help) bool var = false;
+#include "objc-env.h"
+#undef OPTION
+
+struct option_t {
+ bool* var;
+ const char *env;
+ const char *help;
+ size_t envlen;
+};
+
+const option_t Settings[] = {
+#define OPTION(var, env, help) option_t{&var, #env, help, strlen(#env)},
+#include "objc-env.h"
+#undef OPTION
+};
// objc's key for pthread_getspecific
* which may create a new class.
* Warning: doesn't work if aClassName is the name of a posed-for class's isa!
**********************************************************************/
-id objc_getClass(const char *aClassName)
+Class objc_getClass(const char *aClassName)
{
if (!aClassName) return Nil;
* This is used by ZeroLink, where failing to find a class would be a
* compile-time link error without ZeroLink.
**********************************************************************/
-id objc_getRequiredClass(const char *aClassName)
+Class objc_getRequiredClass(const char *aClassName)
{
- id cls = objc_getClass(aClassName);
+ Class cls = objc_getClass(aClassName);
if (!cls) _objc_fatal("link error: class '%s' not found.", aClassName);
return cls;
}
*
* Formerly objc_getClassWithoutWarning ()
**********************************************************************/
-id objc_lookUpClass(const char *aClassName)
+Class objc_lookUpClass(const char *aClassName)
{
if (!aClassName) return Nil;
// YES unconnected, NO class handler
// (unconnected is OK because it will someday be the real class)
- cls = (Class)look_up_class(name, YES, NO);
+ cls = look_up_class(name, YES, NO);
if (cls) {
if (PrintFuture) {
- _objc_inform("FUTURE: found %p already in use for %s", cls, name);
+ _objc_inform("FUTURE: found %p already in use for %s",
+ (void*)cls, name);
}
return cls;
}
* objc_getMetaClass. Return the id of the meta class the named class.
* Warning: doesn't work if aClassName is the name of a posed-for class's isa!
**********************************************************************/
-id objc_getMetaClass(const char *aClassName)
+Class objc_getMetaClass(const char *aClassName)
{
Class cls;
if (!aClassName) return Nil;
- cls = (Class)objc_getClass (aClassName);
+ cls = objc_getClass (aClassName);
if (!cls)
{
_objc_inform ("class `%s' not linked into application", aClassName);
return Nil;
}
- return (id)cls->isa;
+ return cls->ISA();
}
**********************************************************************/
void environ_init(void)
{
-#if SUPPORT_ENVIRON
- int PrintHelp = (getenv("OBJC_HELP") != NULL);
- int PrintOptions = (getenv("OBJC_PRINT_OPTIONS") != NULL);
- int secure = issetugid();
-
- if (secure) {
- // All environment variables are ignored when setuid or setgid.
+ if (issetugid()) {
+ // All environment variables are silently ignored when setuid or setgid
// This includes OBJC_HELP and OBJC_PRINT_OPTIONS themselves.
+ return;
}
- else {
+
+ bool PrintHelp = false;
+ bool PrintOptions = false;
+
+ // Scan environ[] directly instead of calling getenv() a lot.
+ // This optimizes the case where none are set.
+ for (char **p = *_NSGetEnviron(); *p != nil; p++) {
+ if (0 != strncmp(*p, "OBJC_", 5)) continue;
+
+ if (0 == strncmp(*p, "OBJC_HELP=", 10)) {
+ PrintHelp = true;
+ continue;
+ }
+ if (0 == strncmp(*p, "OBJC_PRINT_OPTIONS=", 19)) {
+ PrintOptions = true;
+ continue;
+ }
+
+ const char *value = strchr(*p, '=');
+ if (!*value) continue;
+ value++;
+
+ for (size_t i = 0; i < sizeof(Settings)/sizeof(Settings[0]); i++) {
+ const option_t *opt = &Settings[i];
+ if ((size_t)(value - *p) == 1+opt->envlen &&
+ 0 == strncmp(*p, opt->env, opt->envlen))
+ {
+ *opt->var = (0 == strcmp(value, "YES"));
+ break;
+ }
+ }
+ }
+
+ // Print OBJC_HELP and OBJC_PRINT_OPTIONS output.
+ if (PrintHelp || PrintOptions) {
if (PrintHelp) {
_objc_inform("Objective-C runtime debugging. Set variable=YES to enable.");
_objc_inform("OBJC_HELP: describe available environment variables");
if (PrintOptions) {
_objc_inform("OBJC_PRINT_OPTIONS is set");
}
- }
-
-#define OPTION(var, env, help) \
- if ( var == -1 ) { \
- char *value = getenv(#env); \
- var = value != NULL && !strcmp("YES", value); \
- if (secure) { \
- if (var) _objc_inform(#env " ignored when running setuid or setgid"); \
- var = 0; \
- } else { \
- if (PrintHelp) _objc_inform(#env ": " help); \
- if (PrintOptions && var) _objc_inform(#env " is set"); \
- } \
- }
-
- OPTION(PrintImages, OBJC_PRINT_IMAGES,
- "log image and library names as they are loaded");
- OPTION(PrintLoading, OBJC_PRINT_LOAD_METHODS,
- "log calls to class and category +load methods");
- OPTION(PrintInitializing, OBJC_PRINT_INITIALIZE_METHODS,
- "log calls to class +initialize methods");
- OPTION(PrintResolving, OBJC_PRINT_RESOLVED_METHODS,
- "log methods created by +resolveClassMethod: and +resolveInstanceMethod:");
- OPTION(PrintConnecting, OBJC_PRINT_CLASS_SETUP,
- "log progress of class and category setup");
- OPTION(PrintProtocols, OBJC_PRINT_PROTOCOL_SETUP,
- "log progress of protocol setup");
- OPTION(PrintIvars, OBJC_PRINT_IVAR_SETUP,
- "log processing of non-fragile ivars");
- OPTION(PrintVtables, OBJC_PRINT_VTABLE_SETUP,
- "log processing of class vtables");
- OPTION(PrintVtableImages, OBJC_PRINT_VTABLE_IMAGES,
- "print vtable images showing overridden methods");
- OPTION(PrintCaches, OBJC_PRINT_CACHE_SETUP,
- "log processing of method caches");
- OPTION(PrintFuture, OBJC_PRINT_FUTURE_CLASSES,
- "log use of future classes for toll-free bridging");
- OPTION(PrintGC, OBJC_PRINT_GC,
- "log some GC operations");
- OPTION(PrintPreopt, OBJC_PRINT_PREOPTIMIZATION,
- "log preoptimization courtesy of dyld shared cache");
- OPTION(PrintCxxCtors, OBJC_PRINT_CXX_CTORS,
- "log calls to C++ ctors and dtors for instance variables");
- OPTION(PrintExceptions, OBJC_PRINT_EXCEPTIONS,
- "log exception handling");
- OPTION(PrintExceptionThrow, OBJC_PRINT_EXCEPTION_THROW,
- "log backtrace of every objc_exception_throw()");
- OPTION(PrintAltHandlers, OBJC_PRINT_ALT_HANDLERS,
- "log processing of exception alt handlers");
- OPTION(PrintReplacedMethods, OBJC_PRINT_REPLACED_METHODS,
- "log methods replaced by category implementations");
- OPTION(PrintDeprecation, OBJC_PRINT_DEPRECATION_WARNINGS,
- "warn about calls to deprecated runtime functions");
- OPTION(PrintPoolHiwat, OBJC_PRINT_POOL_HIGHWATER,
- "log high-water marks for autorelease pools");
- OPTION(PrintCustomRR, OBJC_PRINT_CUSTOM_RR,
- "log classes with un-optimized custom retain/release methods");
- OPTION(PrintCustomAWZ, OBJC_PRINT_CUSTOM_AWZ,
- "log classes with un-optimized custom allocWithZone methods");
-
- OPTION(DebugUnload, OBJC_DEBUG_UNLOAD,
- "warn about poorly-behaving bundles when unloaded");
- OPTION(DebugFragileSuperclasses, OBJC_DEBUG_FRAGILE_SUPERCLASSES,
- "warn about subclasses that may have been broken by subsequent changes to superclasses");
- OPTION(DebugFinalizers, OBJC_DEBUG_FINALIZERS,
- "warn about classes that implement -dealloc but not -finalize");
- OPTION(DebugNilSync, OBJC_DEBUG_NIL_SYNC,
- "warn about @synchronized(nil), which does no synchronization");
- OPTION(DebugNonFragileIvars, OBJC_DEBUG_NONFRAGILE_IVARS,
- "capriciously rearrange non-fragile ivars");
- OPTION(DebugAltHandlers, OBJC_DEBUG_ALT_HANDLERS,
- "record more info about bad alt handler use");
-
- OPTION(UseInternalZone, OBJC_USE_INTERNAL_ZONE,
- "allocate runtime data in a dedicated malloc zone");
-
- OPTION(DisableGC, OBJC_DISABLE_GC,
- "force GC OFF, even if the executable wants it on");
- OPTION(DisableVtables, OBJC_DISABLE_VTABLES,
- "disable vtable dispatch");
- OPTION(DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION,
- "disable preoptimization courtesy of dyld shared cache");
-#undef OPTION
-#endif
+ for (size_t i = 0; i < sizeof(Settings)/sizeof(Settings[0]); i++) {
+ const option_t *opt = &Settings[i];
+ if (PrintHelp) _objc_inform("%s: %s", opt->env, opt->help);
+ if (PrintOptions && *opt->var) _objc_inform("%s is set", opt->env);
+ }
+ }
}
}
-#if defined(__i386__) || defined(__x86_64__)
-
-/**********************************************************************
-* objc_branch_size
-* Returns the number of BYTES needed
-* for a branch from entry to target.
-**********************************************************************/
-size_t objc_branch_size(void *entry, void *target)
-{
- return objc_cond_branch_size(entry, target, COND_ALWAYS);
-}
-
-size_t
-objc_cond_branch_size(void *entry, void *target, unsigned cond)
-{
- // For simplicity, always use 32-bit relative jumps.
- if (cond == COND_ALWAYS) return 5;
- else return 6;
-}
-
-/**********************************************************************
-* objc_write_branch
-* Writes at entry an i386 branch instruction sequence that branches to target.
-* The sequence written will be objc_branch_size(entry, target) BYTES.
-* Returns the number of BYTES written.
-**********************************************************************/
-size_t objc_write_branch(void *entry, void *target)
-{
- return objc_write_cond_branch(entry, target, COND_ALWAYS);
-}
-
-size_t
-objc_write_cond_branch(void *entry, void *target, unsigned cond)
-{
- uint8_t *address = (uint8_t *)entry; // instructions written to here
- intptr_t destination = (intptr_t)target; // branch dest as absolute address
- intptr_t displacement = (intptr_t)destination - ((intptr_t)address + objc_cond_branch_size(entry, target, cond)); // branch dest as relative offset
-
- // For simplicity, always use 32-bit relative jumps
- if (cond != COND_ALWAYS) {
- *address++ = 0x0f; // Jcc prefix
- }
- *address++ = cond;
- *address++ = displacement & 0xff;
- *address++ = (displacement >> 8) & 0xff;
- *address++ = (displacement >> 16) & 0xff;
- *address++ = (displacement >> 24) & 0xff;
-
- return address - (uint8_t *)entry;
-}
-
-// defined __i386__
-#endif
-
-
-
-
#if !__OBJC2__
// GrP fixme
-OBJC_EXTERN Class _objc_getOrigClass(const char *name);
+extern "C" Class _objc_getOrigClass(const char *name);
#endif
const char *class_getImageName(Class cls)
{
if (!cls) return NULL;
#if !__OBJC2__
- cls = _objc_getOrigClass(_class_getName(cls));
+ cls = _objc_getOrigClass(cls->getName());
#endif
#if TARGET_OS_WIN32
- charactersCopied = 0;
- szFileName = malloc(MAX_PATH * sizeof(TCHAR));
-
- origCls = objc_getOrigClass(class_getName(cls));
- classModule = NULL;
- res = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)origCls, &classModule);
- if (res && classModule) {
- charactersCopied = GetModuleFileName(classModule, szFileName, MAX_PATH * sizeof(TCHAR));
- }
- if (classModule) FreeLibrary(classModule);
- if (charactersCopied) {
- return (const char *)szFileName;
- } else
- free(szFileName);
+ charactersCopied = 0;
+ szFileName = malloc(MAX_PATH * sizeof(TCHAR));
+
+ origCls = objc_getOrigClass(cls->getName());
+ classModule = NULL;
+ res = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)origCls, &classModule);
+ if (res && classModule) {
+ charactersCopied = GetModuleFileName(classModule, szFileName, MAX_PATH * sizeof(TCHAR));
+ }
+ if (classModule) FreeLibrary(classModule);
+ if (charactersCopied) {
+ return (const char *)szFileName;
+ } else {
+ free(szFileName);
+ }
return NULL;
#else
return dyld_image_path_containing_address(cls);
**********************************************************************/
void objc_enumerationMutation(id object) {
if (enumerationMutationHandler == nil) {
- _objc_fatal("mutation detected during 'for(... in ...)' enumeration of object %p.", object);
+ _objc_fatal("mutation detected during 'for(... in ...)' enumeration of object %p.", (void*)object);
}
(*enumerationMutationHandler)(object);
}
* Associative Reference Support
**********************************************************************/
-#if SUPPORT_GC
-id objc_getAssociatedObject_gc(id object, const void *key) {
- return (id)auto_zone_get_associative_ref(gc_zone, object, (void *)key);
-}
-#endif
-
id objc_getAssociatedObject_non_gc(id object, const void *key) {
return _object_get_associative_reference(object, (void *)key);
}
-id objc_getAssociatedObject(id object, const void *key) {
-#if SUPPORT_GC
- if (UseGC) {
- return (id)auto_zone_get_associative_ref(gc_zone, object, (void *)key);
- } else
-#endif
- {
- return _object_get_associative_reference(object, (void *)key);
- }
+
+void objc_setAssociatedObject_non_gc(id object, const void *key, id value, objc_AssociationPolicy policy) {
+ _object_set_associative_reference(object, (void *)key, value, policy);
}
+
#if SUPPORT_GC
+
+id objc_getAssociatedObject_gc(id object, const void *key) {
+ return (id)auto_zone_get_associative_ref(gc_zone, object, (void *)key);
+}
+
void objc_setAssociatedObject_gc(id object, const void *key, id value, objc_AssociationPolicy policy) {
if ((policy & OBJC_ASSOCIATION_COPY_NONATOMIC) == OBJC_ASSOCIATION_COPY_NONATOMIC) {
value = ((id(*)(id, SEL))objc_msgSend)(value, SEL_copy);
}
auto_zone_set_associative_ref(gc_zone, object, (void *)key, value);
}
-#endif
-void objc_setAssociatedObject_non_gc(id object, const void *key, id value, objc_AssociationPolicy policy) {
- _object_set_associative_reference(object, (void *)key, value, policy);
+// objc_setAssociatedObject and objc_getAssociatedObject are
+// resolver functions in objc-auto.mm.
+
+#else
+
+id
+objc_getAssociatedObject(id object, const void *key)
+{
+ return objc_getAssociatedObject_non_gc(object, key);
}
-void objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy) {
-#if SUPPORT_GC
- if (UseGC) {
- if ((policy & OBJC_ASSOCIATION_COPY_NONATOMIC) == OBJC_ASSOCIATION_COPY_NONATOMIC) {
- value = ((id(*)(id, SEL))objc_msgSend)(value, SEL_copy);
- }
- auto_zone_set_associative_ref(gc_zone, object, (void *)key, value);
- } else
-#endif
- {
- // Note, creates a retained reference in non-GC.
- _object_set_associative_reference(object, (void *)key, value, policy);
- }
+void
+objc_setAssociatedObject(id object, const void *key, id value,
+ objc_AssociationPolicy policy)
+{
+ objc_setAssociatedObject_non_gc(object, key, value, policy);
}
-void objc_removeAssociatedObjects(id object) {
+#endif
+
+
+void objc_removeAssociatedObjects(id object)
+{
#if SUPPORT_GC
if (UseGC) {
auto_zone_erase_associative_refs(gc_zone, object);
} else
#endif
{
- if (_class_instancesHaveAssociatedObjects(_object_getClass(object))) _object_remove_assocations(object);
- }
-}
-
-BOOL class_instancesHaveAssociatedObjects(Class cls) {
- return _class_instancesHaveAssociatedObjects(cls);
-}
-
-
-/**********************************************************************
-* Debugger mode
-*
-* Debugger mode is used when gdb wants to call runtime functions
-* and other methods while other threads are stopped. The runtime
-* provides best-effort functionality while avoiding deadlocks
-* with the stopped threads. gdb is responsible for ensuring that all
-* threads but one stay stopped.
-*
-* When debugger mode starts, the runtime acquires as many locks as
-* it can. Any locks that can't be acquired are off-limits until
-* debugger mode ends. The locking functions in objc-os.h check each
-* operation and halt if a disallowed lock is used; gdb catches that
-* trap and cleans up.
-*
-* Each ABI is responsible for tracking its locks. Any lock not
-* handled there is a potential gdb deadlock.
-**********************************************************************/
-
-#if SUPPORT_DEBUGGER_MODE
-
-int DebuggerMode = DEBUGGER_OFF;
-objc_thread_t DebuggerModeThread = 0;
-static int DebuggerModeCount;
-
-/**********************************************************************
-* gdb_objc_startDebuggerMode
-* Start debugger mode by taking locks. Return 0 if not enough locks
-* could be acquired.
-**********************************************************************/
-int gdb_objc_startDebuggerMode(uint32_t flags)
-{
- BOOL wantFull = flags & OBJC_DEBUGMODE_FULL;
- if (! DebuggerMode) {
- // Start debugger mode
- int mode = startDebuggerMode(); // Do this FIRST
- if (mode == DEBUGGER_OFF) {
- // sorry
- return 0;
- }
- else if (mode == DEBUGGER_PARTIAL && wantFull) {
- // not good enough
- endDebuggerMode();
- return 0;
- }
- else {
- // w00t
- DebuggerMode = mode;
- DebuggerModeCount = 1;
- DebuggerModeThread = thread_self();
- return 1;
- }
- }
- else if (DebuggerMode == DEBUGGER_PARTIAL && wantFull) {
- // Debugger mode already active, but not as requested - sorry
- return 0;
- }
- else {
- // Debugger mode already active as requested
- if (thread_self() == DebuggerModeThread) {
- DebuggerModeCount++;
- return 1;
- } else {
- _objc_inform("DEBUGGER MODE: debugger is buggy: can't run "
- "debugger mode from two threads!");
- return 0;
+ if (object && object->getIsa()->instancesHaveAssociatedObjects()) {
+ _object_remove_assocations(object);
}
}
}
-
-/**********************************************************************
-* gdb_objc_endDebuggerMode
-* Relinquish locks and end debugger mode.
-**********************************************************************/
-void gdb_objc_endDebuggerMode(void)
-{
- if (DebuggerMode && thread_self() == DebuggerModeThread) {
- if (--DebuggerModeCount == 0) {
- DebuggerMode = NO;
- DebuggerModeThread = 0;
- endDebuggerMode(); // Do this LAST
- }
- } else {
- _objc_inform("DEBUGGER MODE: debugger is buggy: debugger mode "
- "not active for this thread!");
- }
-}
-
-
-/**********************************************************************
-* gdb_objc_debuggerModeFailure
-* Breakpoint hook for gdb when debugger mode can't finish something
-**********************************************************************/
-void gdb_objc_debuggerModeFailure(void)
-{
- _objc_fatal("DEBUGGER MODE: failed");
-}
-
-// SUPPORT_DEBUGGER_MODE
-#endif
--- /dev/null
+/*
+ * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Utilities for registering and looking up selectors. The sole
+ * purpose of the selector tables is a registry whereby there is
+ * exactly one address (selector) associated with a given string
+ * (method name).
+ */
+
+#if !__OBJC2__
+
+#include "objc-private.h"
+#include "objc-sel-set.h"
+
+#if SUPPORT_PREOPT
+#include <objc-shared-cache.h>
+using namespace objc_opt;
+static const objc_selopt_t *builtins = NULL;
+#endif
+
+__BEGIN_DECLS
+
+static size_t SelrefCount = 0;
+
+static const char *_objc_empty_selector = "";
+static struct __objc_sel_set *_objc_selectors = NULL;
+
+
+#if SUPPORT_PREOPT
+void dump_builtins(void)
+{
+ uint32_t occupied = builtins->occupied;
+ uint32_t capacity = builtins->capacity;
+
+ _objc_inform("BUILTIN SELECTORS: %d selectors", occupied);
+ _objc_inform("BUILTIN SELECTORS: %d/%d (%d%%) hash table occupancy",
+ occupied, capacity, (int)(occupied/(double)capacity * 100));
+ _objc_inform("BUILTIN SELECTORS: using __TEXT,__objc_selopt at %p",
+ builtins);
+ _objc_inform("BUILTIN SELECTORS: capacity: %u", builtins->capacity);
+ _objc_inform("BUILTIN SELECTORS: occupied: %u", builtins->occupied);
+ _objc_inform("BUILTIN SELECTORS: shift: %u", builtins->shift);
+ _objc_inform("BUILTIN SELECTORS: mask: 0x%x", builtins->mask);
+ _objc_inform("BUILTIN SELECTORS: zero: %u", builtins->zero);
+ _objc_inform("BUILTIN SELECTORS: salt: 0x%llx", builtins->salt);
+
+ const int32_t *offsets = builtins->offsets();
+ uint32_t i;
+ for (i = 0; i < capacity; i++) {
+ if (offsets[i] != offsetof(objc_stringhash_t, zero)) {
+ const char *str = (const char *)builtins + offsets[i];
+ _objc_inform("BUILTIN SELECTORS: %6d: %+8d %s",
+ i, offsets[i], str);
+ if ((const char *)sel_registerName(str) != str) {
+ _objc_fatal("bogus");
+ }
+ } else {
+ _objc_inform("BUILTIN SELECTORS: %6d: ", i);
+ }
+ }
+}
+#endif
+
+
+static SEL _objc_search_builtins(const char *key)
+{
+#if defined(DUMP_SELECTORS)
+ if (NULL != key) printf("\t\"%s\",\n", key);
+#endif
+
+ if (!key) return (SEL)0;
+#if SUPPORT_IGNORED_SELECTOR_CONSTANT
+ if ((uintptr_t)key == kIgnore) return (SEL)kIgnore;
+ if (ignoreSelectorNamed(key)) return (SEL)kIgnore;
+#endif
+ if ('\0' == *key) return (SEL)_objc_empty_selector;
+
+#if SUPPORT_PREOPT
+ assert(builtins);
+ return (SEL)builtins->get(key);
+#endif
+
+ return (SEL)0;
+}
+
+
+const char *sel_getName(SEL sel) {
+#if SUPPORT_IGNORED_SELECTOR_CONSTANT
+ if ((uintptr_t)sel == kIgnore) return "<ignored selector>";
+#endif
+ return sel ? (const char *)sel : "<null selector>";
+}
+
+
+BOOL sel_isMapped(SEL name)
+{
+ SEL result;
+
+ if (!name) return NO;
+#if SUPPORT_IGNORED_SELECTOR_CONSTANT
+ if ((uintptr_t)name == kIgnore) return YES;
+#endif
+
+ result = _objc_search_builtins((const char *)name);
+ if (result) return YES;
+
+ rwlock_read(&selLock);
+ if (_objc_selectors) {
+ result = __objc_sel_set_get(_objc_selectors, name);
+ }
+ rwlock_unlock_read(&selLock);
+ return result ? YES : NO;
+}
+
+static SEL __sel_registerName(const char *name, int lock, int copy)
+{
+ SEL result = 0;
+
+ if (lock) rwlock_assert_unlocked(&selLock);
+ else rwlock_assert_writing(&selLock);
+
+ if (!name) return (SEL)0;
+ result = _objc_search_builtins(name);
+ if (result) return result;
+
+ if (lock) rwlock_read(&selLock);
+ if (_objc_selectors) {
+ result = __objc_sel_set_get(_objc_selectors, (SEL)name);
+ }
+ if (lock) rwlock_unlock_read(&selLock);
+ if (result) return result;
+
+ // No match. Insert.
+
+ if (lock) rwlock_write(&selLock);
+
+ if (!_objc_selectors) {
+ _objc_selectors = __objc_sel_set_create(SelrefCount);
+ }
+ if (lock) {
+ // Rescan in case it was added while we dropped the lock
+ result = __objc_sel_set_get(_objc_selectors, (SEL)name);
+ }
+ if (!result) {
+ result = (SEL)(copy ? _strdup_internal(name) : name);
+ __objc_sel_set_add(_objc_selectors, result);
+#if defined(DUMP_UNKNOWN_SELECTORS)
+ printf("\t\"%s\",\n", name);
+#endif
+ }
+
+ if (lock) rwlock_unlock_write(&selLock);
+ return result;
+}
+
+
+SEL sel_registerName(const char *name) {
+ return __sel_registerName(name, 1, 1); // YES lock, YES copy
+}
+
+SEL sel_registerNameNoLock(const char *name, BOOL copy) {
+ return __sel_registerName(name, 0, copy); // NO lock, maybe copy
+}
+
+void sel_lock(void)
+{
+ rwlock_write(&selLock);
+}
+
+void sel_unlock(void)
+{
+ rwlock_unlock_write(&selLock);
+}
+
+
+// 2001/1/24
+// the majority of uses of this function (which used to return NULL if not found)
+// did not check for NULL, so, in fact, never return NULL
+//
+SEL sel_getUid(const char *name) {
+ return __sel_registerName(name, 2, 1); // YES lock, YES copy
+}
+
+
+BOOL sel_isEqual(SEL lhs, SEL rhs)
+{
+ return (lhs == rhs) ? YES : NO;
+}
+
+
+/***********************************************************************
+* sel_preoptimizationValid
+* Return YES if this image's selector fixups are valid courtesy
+* of the dyld shared cache.
+**********************************************************************/
+BOOL sel_preoptimizationValid(const header_info *hi)
+{
+#if !SUPPORT_PREOPT
+
+ return NO;
+
+#else
+
+# if SUPPORT_IGNORED_SELECTOR_CONSTANT
+ // shared cache can't fix constant ignored selectors
+ if (UseGC) return NO;
+# endif
+
+ // preoptimization disabled for some reason
+ if (!isPreoptimized()) return NO;
+
+ // image not from shared cache, or not fixed inside shared cache
+ if (!_objcHeaderOptimizedByDyld(hi)) return NO;
+
+ return YES;
+
+#endif
+}
+
+
+/***********************************************************************
+* sel_init
+* Initialize selector tables and register selectors used internally.
+**********************************************************************/
+void sel_init(BOOL wantsGC, size_t selrefCount)
+{
+ // save this value for later
+ SelrefCount = selrefCount;
+
+#if SUPPORT_PREOPT
+ builtins = preoptimizedSelectors();
+#endif
+
+ // Register selectors used by libobjc
+
+ if (wantsGC) {
+ // Registering retain/release/autorelease requires GC decision first.
+ // sel_init doesn't actually need the wantsGC parameter, it just
+ // helps enforce the initialization order.
+ }
+
+#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO)
+#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO)
+
+ sel_lock();
+
+ s(load);
+ s(initialize);
+ t(resolveInstanceMethod:, resolveInstanceMethod);
+ t(resolveClassMethod:, resolveClassMethod);
+ t(.cxx_construct, cxx_construct);
+ t(.cxx_destruct, cxx_destruct);
+ s(retain);
+ s(release);
+ s(autorelease);
+ s(retainCount);
+ s(alloc);
+ t(allocWithZone:, allocWithZone);
+ s(copy);
+ s(new);
+ s(finalize);
+ t(forwardInvocation:, forwardInvocation);
+
+ sel_unlock();
+
+#undef s
+#undef t
+}
+
+__END_DECLS
+
+#endif
-/* This file is automatically generated. Do not edit. */
+#include <mach/vm_param.h>
.section __TEXT,__objc_opt_ro
.align 3
.private_extern __objc_opt_data
.long 0 /* table.selopt_offset */
.long 0 /* table.headeropt_offset */
.long 0 /* table.clsopt_offset */
-.space 4096-16
+.space PAGE_SIZE-16
-/* space for selopt, smax/capacity=131072, blen/mask=65535+1 */
+/* space for selopt, smax/capacity=262144, blen/mask=65535+1 */
.space 65536
-.space 131072 /* checkbytes */
-.space 131072*4 /* offsets */
+.space 262144 /* checkbytes */
+.space 262144*4 /* offsets */
/* space for clsopt, smax/capacity=16384, blen/mask=4095+1 */
-.space 4096
-.space 16384 /* checkbytes */
-.space 16384*12 /* offsets to name and class and header_info */
-.space 4096 /* some duplicate classes */
+.space PAGE_SIZE
+.space 16384 /* checkbytes */
+.space 16384*12 /* offsets to name and class and header_info */
+.space PAGE_SIZE /* some duplicate classes */
.section __DATA,__objc_opt_rw
.private_extern __objc_opt_rw_data
__objc_opt_rw_data:
/* space for header_info structures */
-.space 4096*4
+.space 16384
/*
- * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved.
+ * Copyright (c) 2012 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* @APPLE_LICENSE_HEADER_END@
*/
-/*
- * Utilities for registering and looking up selectors. The sole
- * purpose of the selector tables is a registry whereby there is
- * exactly one address (selector) associated with a given string
- * (method name).
- */
+#if __OBJC2__
-#include "objc.h"
#include "objc-private.h"
-#include "objc-auto.h"
-#include "objc-sel-set.h"
+#include "objc-cache.h"
#if SUPPORT_PREOPT
#include <objc-shared-cache.h>
static const objc_selopt_t *builtins = NULL;
#endif
-__BEGIN_DECLS
+#if SUPPORT_IGNORED_SELECTOR_CONSTANT
+#error sorry
+#endif
+
static size_t SelrefCount = 0;
-static const char *_objc_empty_selector = "";
-static struct __objc_sel_set *_objc_selectors = NULL;
+static NXMapTable *namedSelectors;
+static SEL search_builtins(const char *key);
-#if SUPPORT_PREOPT
-void dump_builtins(void)
+
+/***********************************************************************
+* sel_init
+* Initialize selector tables and register selectors used internally.
+**********************************************************************/
+void sel_init(BOOL wantsGC, size_t selrefCount)
{
- uint32_t occupied = builtins->occupied;
- uint32_t capacity = builtins->capacity;
-
- _objc_inform("BUILTIN SELECTORS: %d selectors", occupied);
- _objc_inform("BUILTIN SELECTORS: %d/%d (%d%%) hash table occupancy",
- occupied, capacity, (int)(occupied/(double)capacity * 100));
- _objc_inform("BUILTIN SELECTORS: using __TEXT,__objc_selopt at %p",
- builtins);
- _objc_inform("BUILTIN SELECTORS: capacity: %u", builtins->capacity);
- _objc_inform("BUILTIN SELECTORS: occupied: %u", builtins->occupied);
- _objc_inform("BUILTIN SELECTORS: shift: %u", builtins->shift);
- _objc_inform("BUILTIN SELECTORS: mask: 0x%x", builtins->mask);
- _objc_inform("BUILTIN SELECTORS: zero: %u", builtins->zero);
- _objc_inform("BUILTIN SELECTORS: salt: 0x%llx", builtins->salt);
-
- const int32_t *offsets = builtins->offsets();
- uint32_t i;
- for (i = 0; i < capacity; i++) {
- if (offsets[i] != offsetof(objc_stringhash_t, zero)) {
- const char *str = (const char *)builtins + offsets[i];
- _objc_inform("BUILTIN SELECTORS: %6d: %+8d %s",
- i, offsets[i], str);
- if ((const char *)sel_registerName(str) != str) {
- _objc_fatal("bogus");
- }
- } else {
- _objc_inform("BUILTIN SELECTORS: %6d: ", i);
- }
- }
-}
+ // save this value for later
+ SelrefCount = selrefCount;
+
+#if SUPPORT_PREOPT
+ builtins = preoptimizedSelectors();
#endif
+ // Register selectors used by libobjc
-static SEL _objc_search_builtins(const char *key)
-{
-#if defined(DUMP_SELECTORS)
- if (NULL != key) printf("\t\"%s\",\n", key);
-#endif
+ if (wantsGC) {
+ // Registering retain/release/autorelease requires GC decision first.
+ // sel_init doesn't actually need the wantsGC parameter, it just
+ // helps enforce the initialization order.
+ }
- if (!key) return (SEL)0;
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
- if ((uintptr_t)key == kIgnore) return (SEL)kIgnore;
- if (ignoreSelectorNamed(key)) return (SEL)kIgnore;
-#endif
- if ('\0' == *key) return (SEL)_objc_empty_selector;
+#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO)
+#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO)
-#if SUPPORT_PREOPT
- assert(builtins);
- return (SEL)builtins->get(key);
-#endif
+ sel_lock();
+
+ s(load);
+ s(initialize);
+ t(resolveInstanceMethod:, resolveInstanceMethod);
+ t(resolveClassMethod:, resolveClassMethod);
+ t(.cxx_construct, cxx_construct);
+ t(.cxx_destruct, cxx_destruct);
+ s(retain);
+ s(release);
+ s(autorelease);
+ s(retainCount);
+ s(alloc);
+ t(allocWithZone:, allocWithZone);
+ s(copy);
+ s(new);
+ s(finalize);
+ t(forwardInvocation:, forwardInvocation);
+
+ sel_unlock();
- return (SEL)0;
+#undef s
+#undef t
}
-const char *sel_getName(SEL sel) {
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
- if ((uintptr_t)sel == kIgnore) return "<ignored selector>";
-#endif
- return sel ? (const char *)sel : "<null selector>";
+static SEL sel_alloc(const char *name, bool copy)
+{
+ rwlock_assert_writing(&selLock);
+ return (SEL)(copy ? _strdup_internal(name) : name);
}
-BOOL sel_isMapped(SEL name)
+const char *sel_getName(SEL sel)
{
- SEL result;
-
- if (!name) return NO;
-#if SUPPORT_IGNORED_SELECTOR_CONSTANT
- if ((uintptr_t)name == kIgnore) return YES;
-#endif
+ if (!sel) return "<null selector>";
+ return (const char *)(const void*)sel;
+}
- result = _objc_search_builtins((const char *)name);
- if (result) return YES;
+
+BOOL sel_isMapped(SEL sel)
+{
+ if (!sel) return NO;
+
+ const char *name = (const char *)sel;
+
+ if (sel == search_builtins(name)) return YES;
rwlock_read(&selLock);
- if (_objc_selectors) {
- result = __objc_sel_set_get(_objc_selectors, name);
- }
+ bool result = (sel == (SEL)NXMapGet(namedSelectors, name));
rwlock_unlock_read(&selLock);
- return result ? YES : NO;
+
+ return result;
+}
+
+
+static SEL search_builtins(const char *name)
+{
+#if SUPPORT_PREOPT
+ if (builtins) return (SEL)builtins->get(name);
+#endif
+ return nil;
}
+
static SEL __sel_registerName(const char *name, int lock, int copy)
{
SEL result = 0;
else rwlock_assert_writing(&selLock);
if (!name) return (SEL)0;
- result = _objc_search_builtins(name);
+
+ result = search_builtins(name);
if (result) return result;
if (lock) rwlock_read(&selLock);
- if (_objc_selectors) {
- result = __objc_sel_set_get(_objc_selectors, (SEL)name);
+ if (namedSelectors) {
+ result = (SEL)NXMapGet(namedSelectors, name);
}
if (lock) rwlock_unlock_read(&selLock);
if (result) return result;
if (lock) rwlock_write(&selLock);
- if (!_objc_selectors) {
- _objc_selectors = __objc_sel_set_create(SelrefCount);
+ if (!namedSelectors) {
+ namedSelectors = NXCreateMapTable(NXStrValueMapPrototype,
+ (unsigned)SelrefCount);
}
if (lock) {
// Rescan in case it was added while we dropped the lock
- result = __objc_sel_set_get(_objc_selectors, (SEL)name);
+ result = (SEL)NXMapGet(namedSelectors, name);
}
if (!result) {
- result = (SEL)(copy ? _strdup_internal(name) : name);
- __objc_sel_set_add(_objc_selectors, result);
-#if defined(DUMP_UNKNOWN_SELECTORS)
- printf("\t\"%s\",\n", name);
-#endif
+ result = sel_alloc(name, copy);
+ // fixme choose a better container (hash not map for starters)
+ NXMapInsert(namedSelectors, sel_getName(result), result);
}
if (lock) rwlock_unlock_write(&selLock);
#else
-# if SUPPORT_IGNORED_SELECTOR_CONSTANT
- // shared cache can't fix constant ignored selectors
- if (UseGC) return NO;
-# endif
-
// preoptimization disabled for some reason
if (!isPreoptimized()) return NO;
}
-/***********************************************************************
-* sel_init
-* Initialize selector tables and register selectors used internally.
-**********************************************************************/
-void sel_init(BOOL wantsGC, size_t selrefCount)
-{
- // save this value for later
- SelrefCount = selrefCount;
-
-#if SUPPORT_PREOPT
- builtins = preoptimizedSelectors();
#endif
-
- // Register selectors used by libobjc
-
- if (wantsGC) {
- // Registering retain/release/autorelease requires GC decision first.
- // sel_init doesn't actually need the wantsGC parameter, it just
- // helps enforce the initialization order.
- }
-
-#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO)
-#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO)
-
- sel_lock();
-
- s(load);
- s(initialize);
- t(resolveInstanceMethod:, resolveInstanceMethod);
- t(resolveClassMethod:, resolveClassMethod);
- t(.cxx_construct, cxx_construct);
- t(.cxx_destruct, cxx_destruct);
- s(retain);
- s(release);
- s(autorelease);
- s(retainCount);
- s(alloc);
- t(allocWithZone:, allocWithZone);
- s(copy);
- s(new);
- s(finalize);
- t(forwardInvocation:, forwardInvocation);
-
- sel_unlock();
-
-#undef s
-#undef t
-}
-
-__END_DECLS
#include <objc/objc.h>
-// Begin synchronizing on 'obj'.
-// Allocates recursive pthread_mutex associated with 'obj' if needed.
-// Returns OBJC_SYNC_SUCCESS once lock is acquired.
+
+/**
+ * Begin synchronizing on 'obj'.
+ * Allocates recursive pthread_mutex associated with 'obj' if needed.
+ *
+ * @param obj The object to begin synchronizing on.
+ *
+ * @return OBJC_SYNC_SUCCESS once lock is acquired.
+ */
OBJC_EXPORT int objc_sync_enter(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_3, __IPHONE_2_0);
-// End synchronizing on 'obj'.
-// Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR
+/**
+ * End synchronizing on 'obj'.
+ *
+ * @param obj The objet to end synchronizing on.
+ *
+ * @return OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR
+ */
OBJC_EXPORT int objc_sync_exit(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_3, __IPHONE_2_0);
typedef struct {
SyncData *data;
- OSSpinLock lock;
+ spinlock_t lock;
- char align[64 - sizeof (OSSpinLock) - sizeof (SyncData *)];
+ char align[64 - sizeof (spinlock_t) - sizeof (SyncData *)];
} SyncList __attribute__((aligned(64)));
// aligned to put locks on separate cache lines
static SyncData* id2data(id object, enum usage why)
{
- OSSpinLock *lockp = &LOCK_FOR_OBJ(object);
+ spinlock_t *lockp = &LOCK_FOR_OBJ(object);
SyncData **listp = &LIST_FOR_OBJ(object);
SyncData* result = NULL;
// We could keep the nodes in some hash table if we find that there are
// more than 20 or so distinct locks active, but we don't do that now.
- OSSpinLockLock(lockp);
+ spinlock_lock(lockp);
{
SyncData* p;
*listp = result;
done:
- OSSpinLockUnlock(lockp);
+ spinlock_unlock(lockp);
if (result) {
// Only new ACQUIRE should get here.
// All RELEASE and CHECK and recursive ACQUIRE are
* @APPLE_LICENSE_HEADER_END@
*/
+#ifndef _OBJC_WEAK_H_
+#define _OBJC_WEAK_H_
+
#include <objc/objc.h>
#include "objc-config.h"
*/
-struct weak_referrer_t {
- id *referrer; // clear this address
-};
-typedef struct weak_referrer_t weak_referrer_t;
+/// The address of a __weak object reference
+typedef id * weak_referrer_t;
-struct weak_referrer_array_t {
- weak_referrer_t *refs;
- size_t num_refs;
- size_t num_allocated;
- size_t max_hash_displacement;
-};
-typedef struct weak_referrer_array_t weak_referrer_array_t;
+#if __LP64__
+#define PTR_MINUS_1 63
+#else
+#define PTR_MINUS_1 31
+#endif
+/**
+ * The internal structure stored in the weak references table.
+ * It maintains and stores
+ * a hash set of weak references pointing to an object.
+ * If out_of_line==0, the set is instead a small inline array.
+ */
+#define WEAK_INLINE_COUNT 4
struct weak_entry_t {
- id referent;
- weak_referrer_array_t referrers;
+ id referent;
+ union {
+ struct {
+ weak_referrer_t *referrers;
+ uintptr_t out_of_line : 1;
+ uintptr_t num_refs : PTR_MINUS_1;
+ uintptr_t mask;
+ uintptr_t max_hash_displacement;
+ };
+ struct {
+ // out_of_line=0 is LSB of one of these (don't care which)
+ weak_referrer_t inline_referrers[WEAK_INLINE_COUNT];
+ };
+ };
};
-typedef struct weak_entry_t weak_entry_t;
+/**
+ * The global weak references table. Stores object ids as keys,
+ * and weak_entry_t structs as their values.
+ */
struct weak_table_t {
- size_t num_weak_refs;
- size_t max_weak_refs;
- struct weak_entry_t *weak_entries;
+ weak_entry_t *weak_entries;
+ size_t num_entries;
+ uintptr_t mask;
+ uintptr_t max_hash_displacement;
};
-typedef struct weak_table_t weak_table_t;
-extern id weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer);
-extern void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer);
+/// Adds an (object, weak pointer) pair to the weak table.
+id weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer);
+
+/// Removes an (object, weak pointer) pair from the weak table.
+void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer);
-extern id arr_read_weak_reference(weak_table_t *weak_table, id *referrer);
-extern void arr_clear_deallocating(weak_table_t *weak_table, id referent);
+/// Assert a weak pointer is valid and retain the object during its use.
+id weak_read_no_lock(weak_table_t *weak_table, id *referrer);
+
+/// Called on object destruction. Sets all remaining weak pointers to nil.
+void weak_clear_no_lock(weak_table_t *weak_table, id referent);
__END_DECLS
+
+#endif /* _OBJC_WEAK_H_ */
* @APPLE_LICENSE_HEADER_END@
*/
-#include "objc-weak.h"
-#include "objc-os.h"
#include "objc-private.h"
+#include "objc-weak.h"
+
#include <stdint.h>
#include <stdbool.h>
#include <sys/types.h>
#include <libkern/OSAtomic.h>
+#define TABLE_SIZE(entry) (entry->mask ? entry->mask + 1 : 0)
-template <typename T> struct WeakAllocator {
- typedef T value_type;
- typedef value_type* pointer;
- typedef const value_type *const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
+static void append_referrer(weak_entry_t *entry, id *new_referrer);
- template <typename U> struct rebind { typedef WeakAllocator<U> other; };
-
- template <typename U> WeakAllocator(const WeakAllocator<U>&) {}
- WeakAllocator() {}
- WeakAllocator(const WeakAllocator&) {}
- ~WeakAllocator() {}
-
- pointer address(reference x) const { return &x; }
- const_pointer address(const_reference x) const {
- return x;
- }
+/**
+ * Unique hash function for object pointers only.
+ *
+ * @param key The object pointer
+ *
+ * @return Size unrestricted hash of pointer.
+ */
+static inline uintptr_t hash_pointer(id key) {
+ uintptr_t k = (uintptr_t)key;
+ return (k >> 4) ^ (k >> 9);
+}
- pointer allocate(size_type n, const_pointer = 0) {
- return static_cast<pointer>(::_malloc_internal(n * sizeof(T)));
- }
+/**
+ * Unique hash function for weak object pointers only.
+ *
+ * @param key The weak object pointer.
+ *
+ * @return Size unrestricted hash of pointer.
+ */
+static inline uintptr_t w_hash_pointer(id *key) {
+ uintptr_t k = (uintptr_t)key;
+ return (sizeof(size_t) == 8) ? (k >> 3) : (k >> 2);
+}
- void deallocate(pointer p, size_type) { ::_free_internal(p); }
+/**
+ * Grow the entry's hash table of referrers. Rehashes each
+ * of the referrers.
+ *
+ * @param entry Weak pointer hash set for a particular object.
+ */
+__attribute__((noinline, used))
+static void grow_refs_and_insert(weak_entry_t *entry, id *new_referrer)
+{
+ assert(entry->out_of_line);
- size_type max_size() const {
- return static_cast<size_type>(-1) / sizeof(T);
- }
+ size_t old_size = TABLE_SIZE(entry);
+ size_t new_size = old_size ? old_size * 2 : 8;
- void construct(pointer p, const value_type& x) {
- new(p) value_type(x);
+ size_t num_refs = entry->num_refs;
+ weak_referrer_t *old_refs = entry->referrers;
+ entry->mask = new_size - 1;
+
+ entry->referrers = (weak_referrer_t *)
+ _calloc_internal(TABLE_SIZE(entry), sizeof(weak_referrer_t));
+ entry->num_refs = 0;
+ entry->max_hash_displacement = 0;
+
+ for (size_t i = 0; i < old_size && num_refs > 0; i++) {
+ if (old_refs[i] != nil) {
+ append_referrer(entry, old_refs[i]);
+ num_refs--;
+ }
}
+ // Insert
+ append_referrer(entry, new_referrer);
+ if (old_refs) _free_internal(old_refs);
+}
- void destroy(pointer p) { p->~value_type(); }
-
- void operator=(const WeakAllocator&);
-
-};
+/**
+ * Add the given referrer to set of weak pointers in this entry.
+ * Does not perform duplicate checking (b/c weak pointers are never
+ * added to a set twice).
+ *
+ * @param entry The entry holding the set of weak pointers.
+ * @param new_referrer The new weak pointer to be added.
+ */
+static void append_referrer(weak_entry_t *entry, id *new_referrer)
+{
+ if (! entry->out_of_line) {
+ // Try to insert inline.
+ for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
+ if (entry->inline_referrers[i] == nil) {
+ entry->inline_referrers[i] = new_referrer;
+ return;
+ }
+ }
-class Range {
-private:
- void *_address; // start of range
- void *_end; // end of the range (one byte beyond last usable space)
-public:
- static void *displace(void *address, ptrdiff_t offset) { return (void *)((char *)address + offset); }
+ // Couldn't insert inline. Allocate out of line.
+ weak_referrer_t *new_referrers = (weak_referrer_t *)
+ _calloc_internal(WEAK_INLINE_COUNT, sizeof(weak_referrer_t));
+ // This constructed table is invalid, but grow_refs_and_insert
+ // will fix it and rehash it.
+ for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
+ new_referrers[i] = entry->inline_referrers[i];
+ }
+ entry->referrers = new_referrers;
+ entry->num_refs = WEAK_INLINE_COUNT;
+ entry->out_of_line = 1;
+ entry->mask = WEAK_INLINE_COUNT-1;
+ entry->max_hash_displacement = 0;
+ }
- //
- // Constructors
- //
- Range() : _address(NULL), _end(NULL) {}
- Range(void *address) : _address(address), _end(address) {}
- Range(void *address, void *end) : _address(address), _end(end) {}
- Range(void *address, size_t size) : _address(address), _end(displace(address, size)) {}
-
- //
- // Accessors
- //
- inline Range& range() { return *this; }
- inline void *address() const { return _address; }
- inline void *end() const { return _end; }
- inline size_t size() const { return (uintptr_t)_end - (uintptr_t)_address; }
- inline void set_address(void *address) { _address = address; }
- inline void set_end(void *end) { _end = end; }
- inline void set_size(size_t size) { _end = displace(_address, size); }
- inline void set_range(void *address, void *end) { _address = address; _end = end; }
- inline void set_range(void *address, size_t size) { _address = address; _end = displace(address, size); }
- inline void set_range(Range range) { _address = range.address(); _end = range.end(); }
- inline void adjust_address(intptr_t delta) { _address = displace(_address, delta); }
- inline void adjust_end(intptr_t delta) { _end = displace(_end, delta); }
- inline void adjust(intptr_t delta) { _address = displace(_address, delta), _end = displace(_end, delta); }
-
-
- //
- // is_empty
- //
- // Returns true if the range is empty.
- //
- inline bool is_empty() { return _address == _end; }
+ assert(entry->out_of_line);
-
- //
- // in_range
- //
- // Returns true if the specified address is in range.
- // This form reduces the number of branches. Works well with invariant lo and hi.
- //
- static inline bool in_range(void *lo, void *hi, void *address) {
- uintptr_t lo_as_int = (uintptr_t)lo;
- uintptr_t hi_as_int = (uintptr_t)hi;
- uintptr_t diff = hi_as_int - lo_as_int;
- uintptr_t address_as_int = (uintptr_t)address;
- return (address_as_int - lo_as_int) < diff;
+ if (entry->num_refs >= TABLE_SIZE(entry) * 3/4) {
+ return grow_refs_and_insert(entry, new_referrer);
}
- inline bool in_range(void *address) const { return in_range(_address, _end, address); }
-
-
- //
- // operator ==
- //
- // Used to locate entry in list or hash table (use is_range for exaxt match.)
- inline bool operator==(const Range *range) const { return _address == range->_address; }
- inline bool operator==(const Range &range) const { return _address == range._address; }
-
-
- //
- // is_range
- //
- // Return true if the ranges are equivalent.
- //
- inline bool is_range(const Range& range) const { return _address == range._address && _end == range._end; }
-
-
- //
- // clear
- //
- // Initialize the range to zero.
- //
- inline void clear() { bzero(address(), size()); }
-
- //
- // expand_range
- //
- // Expand the bounds with the specified range.
- //
- inline void expand_range(void *address) {
- if (_address > address) _address = address;
- if (_end < address) _end = address;
+ size_t index = w_hash_pointer(new_referrer) & (entry->mask);
+ size_t hash_displacement = 0;
+ while (entry->referrers[index] != NULL) {
+ index = (index+1) & entry->mask;
+ hash_displacement++;
}
- inline void expand_range(Range& range) {
- expand_range(range.address());
- expand_range(range.end());
+ if (hash_displacement > entry->max_hash_displacement) {
+ entry->max_hash_displacement = hash_displacement;
}
-
-
- //
- // relative_address
- //
- // Converts an absolute address to an address relative to this address.
- //
- inline void *relative_address(void *address) const { return (void *)((uintptr_t)address - (uintptr_t)_address); }
-
-
- //
- // absolute_address
- //
- // Converts an address relative to this address to an absolute address.
- //
- inline void *absolute_address(void *address) const { return (void *)((uintptr_t)address + (uintptr_t)_address); }
-};
+ weak_referrer_t &ref = entry->referrers[index];
+ ref = new_referrer;
+ entry->num_refs++;
+}
+/**
+ * Remove old_referrer from set of referrers, if it's present.
+ * Does not remove duplicates, because duplicates should not exist.
+ *
+ * @todo this is slow if old_referrer is not present. But, is this ever the case?
+ *
+ * @param entry The entry holding the referrers.
+ * @param old_referrer The referrer to remove.
+ */
+static void remove_referrer(weak_entry_t *entry, id *old_referrer)
+{
+ if (! entry->out_of_line) {
+ for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
+ if (entry->inline_referrers[i] == old_referrer) {
+ entry->inline_referrers[i] = nil;
+ return;
+ }
+ }
+ _objc_inform("attempted to remove unregistered weak referrer %p\n",
+ old_referrer);
+ }
-template<> struct WeakAllocator<void> {
- typedef void value_type;
- typedef void* pointer;
- typedef const void *const_pointer;
- template <typename U> struct rebind { typedef WeakAllocator<U> other; };
-};
+ size_t index = w_hash_pointer(old_referrer) & (entry->mask);
+ size_t hash_displacement = 0;
+ while (entry->referrers[index] != old_referrer) {
+ index = (index+1) & entry->mask;
+ hash_displacement++;
+ if (hash_displacement > entry->max_hash_displacement) {
+ _objc_inform("attempted to remove unregistered weak referrer %p\n",
+ old_referrer);
+ return;
+ }
+ }
+ entry->referrers[index] = nil;
+ entry->num_refs--;
+}
-typedef std::pair<id, id *> WeakPair;
-typedef std::vector<WeakPair, WeakAllocator<WeakPair> > WeakPairVector;
-typedef std::vector<weak_referrer_t, WeakAllocator<WeakPair> > WeakReferrerVector;
+/**
+ * Add new_entry to the object's table of weak references.
+ * Does not check whether the referent is already in the table.
+ */
+static void weak_entry_insert(weak_table_t *weak_table, weak_entry_t *new_entry)
+{
+ weak_entry_t *weak_entries = weak_table->weak_entries;
+ assert(weak_entries != nil);
-static void append_referrer_no_lock(weak_referrer_array_t *list, id *new_referrer);
+ size_t index = hash_pointer(new_entry->referent) & (weak_table->mask);
+ size_t hash_displacement = 0;
+ while (weak_entries[index].referent != nil) {
+ index = (index+1) & weak_table->mask;
+ hash_displacement++;
+ }
-static inline uintptr_t hash_pointer(void *key) {
- uintptr_t k = (uintptr_t)key;
+ weak_entries[index] = *new_entry;
+ weak_table->num_entries++;
- // Code from CFSet.c
-#if __LP64__
- uintptr_t a = 0x4368726973746F70ULL;
- uintptr_t b = 0x686572204B616E65ULL;
-#else
- uintptr_t a = 0x4B616E65UL;
- uintptr_t b = 0x4B616E65UL;
-#endif
- uintptr_t c = 1;
- a += k;
-#if __LP64__
- a -= b; a -= c; a ^= (c >> 43);
- b -= c; b -= a; b ^= (a << 9);
- c -= a; c -= b; c ^= (b >> 8);
- a -= b; a -= c; a ^= (c >> 38);
- b -= c; b -= a; b ^= (a << 23);
- c -= a; c -= b; c ^= (b >> 5);
- a -= b; a -= c; a ^= (c >> 35);
- b -= c; b -= a; b ^= (a << 49);
- c -= a; c -= b; c ^= (b >> 11);
- a -= b; a -= c; a ^= (c >> 12);
- b -= c; b -= a; b ^= (a << 18);
- c -= a; c -= b; c ^= (b >> 22);
-#else
- a -= b; a -= c; a ^= (c >> 13);
- b -= c; b -= a; b ^= (a << 8);
- c -= a; c -= b; c ^= (b >> 13);
- a -= b; a -= c; a ^= (c >> 12);
- b -= c; b -= a; b ^= (a << 16);
- c -= a; c -= b; c ^= (b >> 5);
- a -= b; a -= c; a ^= (c >> 3);
- b -= c; b -= a; b ^= (a << 10);
- c -= a; c -= b; c ^= (b >> 15);
-#endif
- return c;
+ if (hash_displacement > weak_table->max_hash_displacement) {
+ weak_table->max_hash_displacement = hash_displacement;
+ }
}
-// Up until this size the weak referrer array grows one slot at a time. Above this size it grows by doubling.
-#define WEAK_TABLE_DOUBLE_SIZE 8
-// Grow the refs list. Rehashes the entries.
-static void grow_refs(weak_referrer_array_t *list)
+static void weak_resize(weak_table_t *weak_table, size_t new_size)
{
- size_t old_num_allocated = list->num_allocated;
- size_t num_refs = list->num_refs;
- weak_referrer_t *old_refs = list->refs;
- size_t new_allocated = old_num_allocated < WEAK_TABLE_DOUBLE_SIZE ? old_num_allocated + 1 : old_num_allocated + old_num_allocated;
- list->refs = (weak_referrer_t *)_malloc_internal(new_allocated * sizeof(weak_referrer_t));
- list->num_allocated = _malloc_size_internal(list->refs)/sizeof(weak_referrer_t);
- bzero(list->refs, list->num_allocated * sizeof(weak_referrer_t));
- // for larger tables drop one entry from the end to give an odd number of hash buckets for better hashing
- if ((list->num_allocated > WEAK_TABLE_DOUBLE_SIZE) && !(list->num_allocated & 1)) list->num_allocated--;
- list->num_refs = 0;
- list->max_hash_displacement = 0;
+ size_t old_size = TABLE_SIZE(weak_table);
+
+ weak_entry_t *old_entries = weak_table->weak_entries;
+ weak_entry_t *new_entries = (weak_entry_t *)
+ _calloc_internal(new_size, sizeof(weak_entry_t));
+
+ weak_table->mask = new_size - 1;
+ weak_table->weak_entries = new_entries;
+ weak_table->max_hash_displacement = 0;
+ weak_table->num_entries = 0; // restored by weak_entry_insert below
- size_t i;
- for (i=0; i < old_num_allocated && num_refs > 0; i++) {
- if (old_refs[i].referrer != NULL) {
- append_referrer_no_lock(list, old_refs[i].referrer);
- num_refs--;
+ if (old_entries) {
+ weak_entry_t *entry;
+ weak_entry_t *end = old_entries + old_size;
+ for (entry = old_entries; entry < end; entry++) {
+ if (entry->referent) {
+ weak_entry_insert(weak_table, entry);
+ }
}
+ _free_internal(old_entries);
}
- if (old_refs)
- _free_internal(old_refs);
}
-// Add the given referrer to list
-// Does not perform duplicate checking.
-static void append_referrer_no_lock(weak_referrer_array_t *list, id *new_referrer)
+// Grow the given zone's table of weak references if it is full.
+static void weak_grow_maybe(weak_table_t *weak_table)
{
- if ((list->num_refs == list->num_allocated) || ((list->num_refs >= WEAK_TABLE_DOUBLE_SIZE) && (list->num_refs >= list->num_allocated * 2 / 3))) {
- grow_refs(list);
- }
- size_t index = hash_pointer(new_referrer) % list->num_allocated, hash_displacement = 0;
- while (list->refs[index].referrer != NULL) {
- index++;
- hash_displacement++;
- if (index == list->num_allocated)
- index = 0;
- }
- if (list->max_hash_displacement < hash_displacement) {
- list->max_hash_displacement = hash_displacement;
- //malloc_printf("max_hash_displacement: %d allocated: %d\n", list->max_hash_displacement, list->num_allocated);
+ size_t old_size = TABLE_SIZE(weak_table);
+
+ // Grow if at least 3/4 full.
+ if (weak_table->num_entries >= old_size * 3 / 4) {
+ weak_resize(weak_table, old_size ? old_size*2 : 64);
}
- weak_referrer_t &ref = list->refs[index];
- ref.referrer = new_referrer;
- list->num_refs++;
}
-
-// Remove old_referrer from list, if it's present.
-// Does not remove duplicates.
-// fixme this is slow if old_referrer is not present.
-static void remove_referrer_no_lock(weak_referrer_array_t *list, id *old_referrer)
+// Shrink the table if it is mostly empty.
+static void weak_compact_maybe(weak_table_t *weak_table)
{
- size_t index = hash_pointer(old_referrer) % list->num_allocated;
- size_t start_index = index, hash_displacement = 0;
- while (list->refs[index].referrer != old_referrer) {
- index++;
- hash_displacement++;
- if (index == list->num_allocated)
- index = 0;
- if (index == start_index || hash_displacement > list->max_hash_displacement) {
- malloc_printf("attempted to remove unregistered weak referrer %p\n", old_referrer);
- return;
- }
+ size_t old_size = TABLE_SIZE(weak_table);
+
+ // Shrink if larger than 1024 buckets and at most 1/16 full.
+ if (old_size >= 1024 && old_size / 16 >= weak_table->num_entries) {
+ weak_resize(weak_table, old_size / 8);
+ // leaves new table no more than 1/2 full
}
- list->refs[index].referrer = NULL;
- list->num_refs--;
}
-// Add new_entry to the zone's table of weak references.
-// Does not check whether the referent is already in the table.
-// Does not update num_weak_refs.
-static void weak_entry_insert_no_lock(weak_table_t *weak_table, weak_entry_t *new_entry)
+/**
+ * Remove entry from the zone's table of weak references.
+ */
+static void weak_entry_remove(weak_table_t *weak_table, weak_entry_t *entry)
{
- weak_entry_t *weak_entries = weak_table->weak_entries;
- assert(weak_entries != NULL);
+ // remove entry
+ if (entry->out_of_line) _free_internal(entry->referrers);
+ bzero(entry, sizeof(*entry));
- size_t table_size = weak_table->max_weak_refs;
- size_t hash_index = hash_pointer(new_entry->referent) % table_size;
- size_t index = hash_index;
+ weak_table->num_entries--;
- do {
- weak_entry_t *entry = weak_entries + index;
- if (entry->referent == NULL) {
- *entry = *new_entry;
- return;
- }
- index++; if (index == table_size) index = 0;
- } while (index != hash_index);
- malloc_printf("no room for new entry in auto weak ref table!\n");
+ weak_compact_maybe(weak_table);
}
-// Remove entry from the zone's table of weak references, and rehash
-// Does not update num_weak_refs.
-static void weak_entry_remove_no_lock(weak_table_t *weak_table, weak_entry_t *entry)
+/**
+ * Return the weak reference table entry for the given referent.
+ * If there is no entry for referent, return NULL.
+ * Performs a lookup.
+ *
+ * @param weak_table
+ * @param referent The object. Must not be nil.
+ *
+ * @return The table of weak referrers to this object.
+ */
+static weak_entry_t *weak_entry_for_referent(weak_table_t *weak_table, id referent)
{
- // remove entry
- entry->referent = NULL;
- if (entry->referrers.refs) _free_internal(entry->referrers.refs);
- entry->referrers.refs = NULL;
- entry->referrers.num_refs = 0;
- entry->referrers.num_allocated = 0;
+ assert(referent);
- // rehash after entry
weak_entry_t *weak_entries = weak_table->weak_entries;
- size_t table_size = weak_table->max_weak_refs;
- size_t hash_index = entry - weak_entries;
- size_t index = hash_index;
-
- if (!weak_entries) return;
-
- do {
- index++; if (index == table_size) index = 0;
- if (!weak_entries[index].referent) return;
- weak_entry_t slot = weak_entries[index];
- weak_entries[index].referent = NULL;
- weak_entry_insert_no_lock(weak_table, &slot);
- } while (index != hash_index);
-}
+ if (!weak_entries) return nil;
-// Grow the given zone's table of weak references if it is full.
-static void weak_grow_maybe_no_lock(weak_table_t *weak_table)
-{
- if (weak_table->num_weak_refs >= weak_table->max_weak_refs * 3 / 4) {
- // grow table
- size_t old_max = weak_table->max_weak_refs;
- size_t new_max = old_max ? old_max * 2 + 1 : 15;
- weak_entry_t *old_entries = weak_table->weak_entries;
- weak_entry_t *new_entries = (weak_entry_t *)_calloc_internal(new_max, sizeof(weak_entry_t));
- weak_table->max_weak_refs = new_max;
- weak_table->weak_entries = new_entries;
-
- if (old_entries) {
- weak_entry_t *entry;
- weak_entry_t *end = old_entries + old_max;
- for (entry = old_entries; entry < end; entry++) {
- weak_entry_insert_no_lock(weak_table, entry);
- }
- _free_internal(old_entries);
+ size_t index = hash_pointer(referent) & weak_table->mask;
+ size_t hash_displacement = 0;
+ while (weak_table->weak_entries[index].referent != referent) {
+ index = (index+1) & weak_table->mask;
+ hash_displacement++;
+ if (hash_displacement > weak_table->max_hash_displacement) {
+ return nil;
}
}
-}
-
-// Return the weak reference table entry for the given referent.
-// If there is no entry for referent, return NULL.
-static weak_entry_t *weak_entry_for_referent(weak_table_t *weak_table, id referent)
-{
- weak_entry_t *weak_entries = weak_table->weak_entries;
-
- if (!weak_entries) return NULL;
- size_t table_size = weak_table->max_weak_refs;
- size_t hash_index = hash_pointer(referent) % table_size;
- size_t index = hash_index;
-
- do {
- weak_entry_t *entry = weak_entries + index;
- if (entry->referent == referent) return entry;
- if (entry->referent == NULL) return NULL;
- index++; if (index == table_size) index = 0;
- } while (index != hash_index);
-
- return NULL;
+ return &weak_table->weak_entries[index];
}
-// Unregister an already-registered weak reference.
-// This is used when referrer's storage is about to go away, but referent
-// isn't dead yet. (Otherwise, zeroing referrer later would be a
-// bad memory access.)
-// Does nothing if referent/referrer is not a currently active weak reference.
-// Does not zero referrer.
-// fixme currently requires old referent value to be passed in (lame)
-// fixme unregistration should be automatic if referrer is collected
-void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer)
+/**
+ * Unregister an already-registered weak reference.
+ * This is used when referrer's storage is about to go away, but referent
+ * isn't dead yet. (Otherwise, zeroing referrer later would be a
+ * bad memory access.)
+ * Does nothing if referent/referrer is not a currently active weak reference.
+ * Does not zero referrer.
+ *
+ * FIXME currently requires old referent value to be passed in (lame)
+ * FIXME unregistration should be automatic if referrer is collected
+ *
+ * @param weak_table The global weak table.
+ * @param referent The object.
+ * @param referrer The weak reference.
+ */
+void
+weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer)
{
weak_entry_t *entry;
- if ((entry = weak_entry_for_referent(weak_table, referent))) {
- remove_referrer_no_lock(&entry->referrers, referrer);
- if (entry->referrers.num_refs == 0) {
- weak_entry_remove_no_lock(weak_table, entry);
- weak_table->num_weak_refs--;
- }
- }
-
- // Do not set *referrer = NULL. objc_storeWeak() requires that the
- // value not change.
-}
+ if (!referent) return;
-
-void
-arr_clear_deallocating(weak_table_t *weak_table, id referent) {
- {
- weak_entry_t *entry = weak_entry_for_referent(weak_table, referent);
- if (entry == NULL) {
- /// XXX shouldn't happen, but does with mismatched CF/objc
- //printf("XXX no entry for clear deallocating %p\n", referent);
- return;
+ if ((entry = weak_entry_for_referent(weak_table, referent))) {
+ remove_referrer(entry, referrer);
+ bool empty = true;
+ if (entry->out_of_line && entry->num_refs != 0) {
+ empty = false;
}
- // zero out references
- for (size_t i = 0; i < entry->referrers.num_allocated; ++i) {
- id *referrer = entry->referrers.refs[i].referrer;
- if (referrer) {
- if (*referrer == referent) {
- *referrer = nil;
- }
- else if (*referrer) {
- _objc_inform("__weak variable @ %p holds %p instead of %p\n", referrer, *referrer, referent);
+ else {
+ for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
+ if (entry->inline_referrers[i]) {
+ empty = false;
+ break;
}
}
}
-
- weak_entry_remove_no_lock(weak_table, entry);
- weak_table->num_weak_refs--;
+
+ if (empty) {
+ weak_entry_remove(weak_table, entry);
+ }
}
-}
+ // Do not set *referrer = nil. objc_storeWeak() requires that the
+ // value not change.
+}
-id weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer) {
- if (referent && !OBJC_IS_TAGGED_PTR(referent)) {
+/**
+ * Registers a new (object, weak pointer) pair. Creates a new weak
+ * object entry if it does not exist.
+ *
+ * @param weak_table The global weak table.
+ * @param referent The object pointed to by the weak reference.
+ * @param referrer The weak pointer address.
+ */
+id
+weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer)
+{
+ if (referent && !referent->isTaggedPointer()) {
// ensure that the referenced object is viable
BOOL (*allowsWeakReference)(id, SEL) = (BOOL(*)(id, SEL))
- class_getMethodImplementation(object_getClass(referent),
+ object_getMethodImplementation(referent,
@selector(allowsWeakReference));
if ((IMP)allowsWeakReference != _objc_msgForward) {
if (! (*allowsWeakReference)(referent, @selector(allowsWeakReference))) {
- _objc_fatal("cannot form weak reference to instance (%p) of class %s", referent, object_getClassName(referent));
+ _objc_fatal("Cannot form weak reference to instance (%p) of class %s. It is possible that this object was over-released, or is in the process of deallocation.", (void*)referent, object_getClassName(referent));
}
}
else {
- return NULL;
+ return nil;
}
// now remember it and where it is being stored
weak_entry_t *entry;
if ((entry = weak_entry_for_referent(weak_table, referent))) {
- append_referrer_no_lock(&entry->referrers, referrer);
+ append_referrer(entry, referrer);
}
else {
weak_entry_t new_entry;
new_entry.referent = referent;
- new_entry.referrers.refs = NULL;
- new_entry.referrers.num_refs = 0;
- new_entry.referrers.num_allocated = 0;
- append_referrer_no_lock(&new_entry.referrers, referrer);
- weak_table->num_weak_refs++;
- weak_grow_maybe_no_lock(weak_table);
- weak_entry_insert_no_lock(weak_table, &new_entry);
+ new_entry.out_of_line = 0;
+ new_entry.inline_referrers[0] = referrer;
+ for (size_t i = 1; i < WEAK_INLINE_COUNT; i++) {
+ new_entry.inline_referrers[i] = nil;
+ }
+
+ weak_grow_maybe(weak_table);
+ weak_entry_insert(weak_table, &new_entry);
}
}
return referent;
}
+/**
+ * Called by dealloc; nils out all weak pointers that point to the
+ * provided object so that they can no longer be used.
+ *
+ * @param weak_table
+ * @param referent The object being deallocated.
+ */
+void
+weak_clear_no_lock(weak_table_t *weak_table, id referent)
+{
+ weak_entry_t *entry = weak_entry_for_referent(weak_table, referent);
+ if (entry == nil) {
+ /// XXX shouldn't happen, but does with mismatched CF/objc
+ //printf("XXX no entry for clear deallocating %p\n", referent);
+ return;
+ }
-// Automated Retain Release (ARR) support
-
-id
-arr_read_weak_reference(weak_table_t *weak_table, id *referrer) {
- id referent;
- // find entry and mark that it needs retaining
- {
- referent = *referrer;
- if (OBJC_IS_TAGGED_PTR(referent)) return referent;
- weak_entry_t *entry;
- if (referent == NULL || !(entry = weak_entry_for_referent(weak_table, referent))) {
- *referrer = NULL;
- return NULL;
- }
- BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
- class_getMethodImplementation(object_getClass(referent),
- @selector(retainWeakReference));
- if ((IMP)tryRetain != _objc_msgForward) {
- //printf("sending _tryRetain for %p\n", referent);
- if (! (*tryRetain)(referent, @selector(retainWeakReference))) {
- //printf("_tryRetain(%p) tried and failed!\n", referent);
- return NULL;
+ // zero out references
+ weak_referrer_t *referrers;
+ size_t count;
+
+ if (entry->out_of_line) {
+ referrers = entry->referrers;
+ count = TABLE_SIZE(entry);
+ }
+ else {
+ referrers = entry->inline_referrers;
+ count = WEAK_INLINE_COUNT;
+ }
+
+ for (size_t i = 0; i < count; ++i) {
+ id *referrer = referrers[i];
+ if (referrer) {
+ if (*referrer == referent) {
+ *referrer = nil;
+ }
+ else if (*referrer) {
+ _objc_inform("__weak variable @ %p holds %p instead of %p\n", referrer, (void*)*referrer, (void*)referent);
}
- //else printf("_tryRetain(%p) succeeded\n", referent);
- }
- else {
- *referrer = NULL;
- return NULL;
}
}
+
+ weak_entry_remove(weak_table, entry);
+}
+
+
+/**
+ * This function gets called when the value of a weak pointer is being
+ * used in an expression. Called by objc_loadWeakRetained() which is
+ * ultimately called by objc_loadWeak(). The objective is to assert that
+ * there is in fact a weak pointer(s) entry for this particular object being
+ * stored in the weak-table, and to retain that object so it is not deallocated
+ * during the weak pointer's usage.
+ *
+ * @param weak_table
+ * @param referrer The weak pointer address.
+ */
+id
+weak_read_no_lock(weak_table_t *weak_table, id *referrer)
+{
+ id referent = *referrer;
+ if (referent->isTaggedPointer()) return referent;
+
+ weak_entry_t *entry;
+ if (referent == nil || !(entry = weak_entry_for_referent(weak_table, referent))) {
+ *referrer = nil;
+ return nil;
+ }
+
+ BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL))
+ object_getMethodImplementation(referent,
+ @selector(retainWeakReference));
+ if ((IMP)tryRetain == _objc_msgForward) {
+ *referrer = nil;
+ return nil;
+ }
+ if (! (*tryRetain)(referent, @selector(retainWeakReference))) {
+ return nil;
+ }
+
return referent;
}
#include <Availability.h>
#include <objc/objc-api.h>
-
+#if !OBJC_TYPES_DEFINED
+/// An opaque type that represents an Objective-C class.
typedef struct objc_class *Class;
-typedef struct objc_object {
- Class isa;
-} *id;
+/// Represents an instance of a class.
+struct objc_object {
+ Class isa OBJC_ISA_AVAILABILITY;
+};
+
+/// A pointer to an instance of a class.
+typedef struct objc_object *id;
+#endif
-typedef struct objc_selector *SEL;
+/// An opaque type that represents a method selector.
+typedef struct objc_selector *SEL;
+/// A pointer to the function of a method implementation.
#if !OBJC_OLD_DISPATCH_PROTOTYPES
typedef void (*IMP)(void /* id, SEL, ... */ );
#else
#define OBJC_BOOL_DEFINED
-typedef signed char BOOL;
+/// Type to represent a boolean value.
+typedef signed char BOOL;
// BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C"
// even if -funsigned-char is used.
# endif
#endif
-#if ! (defined(__OBJC_GC__) || __has_feature(objc_arr))
+#if ! (defined(__OBJC_GC__) || __has_feature(objc_arc))
#define __strong /* empty */
#endif
-#if !__has_feature(objc_arr)
+#if !__has_feature(objc_arc)
#define __unsafe_unretained /* empty */
#define __autoreleasing /* empty */
#endif
+/**
+ * Returns the name of the method specified by a given selector.
+ *
+ * @param sel A pointer of type \c SEL. Pass the selector whose name you wish to determine.
+ *
+ * @return A C string indicating the name of the selector.
+ */
OBJC_EXPORT const char *sel_getName(SEL sel)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Registers a method with the Objective-C runtime system, maps the method
+ * name to a selector, and returns the selector value.
+ *
+ * @param str A pointer to a C string. Pass the name of the method you wish to register.
+ *
+ * @return A pointer of type SEL specifying the selector for the named method.
+ *
+ * @note You must register a method name with the Objective-C runtime system to obtain the
+ * method’s selector before you can add the method to a class definition. If the method name
+ * has already been registered, this function simply returns the selector.
+ */
OBJC_EXPORT SEL sel_registerName(const char *str)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns the class name of a given object.
+ *
+ * @param obj An Objective-C object.
+ *
+ * @return The name of the class of which \e obj is an instance.
+ */
OBJC_EXPORT const char *object_getClassName(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns a pointer to any extra bytes allocated with an instance given object.
+ *
+ * @param obj An Objective-C object.
+ *
+ * @return A pointer to any extra bytes allocated with \e obj. If \e obj was
+ * not allocated with any extra bytes, then dereferencing the returned pointer is undefined.
+ *
+ * @note This function returns a pointer to any extra bytes allocated with the instance
+ * (as specified by \c class_createInstance with extraBytes>0). This memory follows the
+ * object's ordinary ivars, but may not be adjacent to the last ivar.
+ * @note The returned pointer is guaranteed to be pointer-size aligned, even if the area following
+ * the object's last ivar is less aligned than that. Alignment greater than pointer-size is never
+ * guaranteed, even if the area following the object's last ivar is more aligned than that.
+ * @note In a garbage-collected environment, the memory is scanned conservatively.
+ */
OBJC_EXPORT void *object_getIndexedIvars(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Identifies a selector as being valid or invalid.
+ *
+ * @param sel The selector you want to identify.
+ *
+ * @return YES if selector is valid and has a function implementation, NO otherwise.
+ *
+ * @warning On some platforms, an invalid reference (to invalid memory addresses) can cause
+ * a crash.
+ */
OBJC_EXPORT BOOL sel_isMapped(SEL sel)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Registers a method name with the Objective-C runtime system.
+ *
+ * @param str A pointer to a C string. Pass the name of the method you wish to register.
+ *
+ * @return A pointer of type SEL specifying the selector for the named method.
+ *
+ * @note The implementation of this method is identical to the implementation of \c sel_registerName.
+ * @note Prior to OS X version 10.0, this method tried to find the selector mapped to the given name
+ * and returned \c NULL if the selector was not found. This was changed for safety, because it was
+ * observed that many of the callers of this function did not check the return value for \c NULL.
+ */
OBJC_EXPORT SEL sel_getUid(const char *str)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
#include <stdint.h>
#include <stddef.h>
#include <Availability.h>
-#include <AvailabilityMacros.h>
#include <TargetConditionals.h>
#if TARGET_OS_MAC
/* Types */
+#if !OBJC_TYPES_DEFINED
+
+/// An opaque type that represents a method in a class definition.
typedef struct objc_method *Method;
+
+/// An opaque type that represents an instance variable.
typedef struct objc_ivar *Ivar;
+
+/// An opaque type that represents a category.
typedef struct objc_category *Category;
+
+/// An opaque type that represents an Objective-C declared property.
typedef struct objc_property *objc_property_t;
struct objc_class {
- Class isa;
+ Class isa OBJC_ISA_AVAILABILITY;
#if !__OBJC2__
Class super_class OBJC2_UNAVAILABLE;
} OBJC2_UNAVAILABLE;
/* Use `Class` instead of `struct objc_class *` */
+#endif
+
#ifdef __OBJC__
@class Protocol;
#else
typedef struct objc_object Protocol;
#endif
+/// Defines a method
struct objc_method_description {
- SEL name;
- char *types;
+ SEL name; /**< The name of the method */
+ char *types; /**< The types of the method arguments */
};
+/// Defines a property attribute
typedef struct {
- const char *name;
- const char *value;
+ const char *name; /**< The name of the attribute */
+ const char *value; /**< The value of the attribute (usually empty) */
} objc_property_attribute_t;
/* Functions */
+/* Working with Instances */
+
+/**
+ * Returns a copy of a given object.
+ *
+ * @param obj An Objective-C object.
+ * @param size The size of the object \e obj.
+ *
+ * @return A copy of \e obj.
+ */
OBJC_EXPORT id object_copy(id obj, size_t size)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
OBJC_ARC_UNAVAILABLE;
+
+/**
+ * Frees the memory occupied by a given object.
+ *
+ * @param obj An Objective-C object.
+ *
+ * @return nil
+ */
OBJC_EXPORT id object_dispose(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
OBJC_ARC_UNAVAILABLE;
+/**
+ * Returns the class of an object.
+ *
+ * @param obj The object you want to inspect.
+ *
+ * @return The class object of which \e object is an instance,
+ * or \c Nil if \e object is \c nil.
+ */
OBJC_EXPORT Class object_getClass(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Sets the class of an object.
+ *
+ * @param obj The object to modify.
+ * @param cls A class object.
+ *
+ * @return The previous value of \e object's class, or \c Nil if \e object is \c nil.
+ */
OBJC_EXPORT Class object_setClass(id obj, Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns the class name of a given object.
+ *
+ * @param obj An Objective-C object.
+ *
+ * @return The name of the class of which \e obj is an instance.
+ */
OBJC_EXPORT const char *object_getClassName(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns a pointer to any extra bytes allocated with an instance given object.
+ *
+ * @param obj An Objective-C object.
+ *
+ * @return A pointer to any extra bytes allocated with \e obj. If \e obj was
+ * not allocated with any extra bytes, then dereferencing the returned pointer is undefined.
+ *
+ * @note This function returns a pointer to any extra bytes allocated with the instance
+ * (as specified by \c class_createInstance with extraBytes>0). This memory follows the
+ * object's ordinary ivars, but may not be adjacent to the last ivar.
+ * @note The returned pointer is guaranteed to be pointer-size aligned, even if the area following
+ * the object's last ivar is less aligned than that. Alignment greater than pointer-size is never
+ * guaranteed, even if the area following the object's last ivar is more aligned than that.
+ * @note In a garbage-collected environment, the memory is scanned conservatively.
+ */
OBJC_EXPORT void *object_getIndexedIvars(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
OBJC_ARC_UNAVAILABLE;
+/**
+ * Reads the value of an instance variable in an object.
+ *
+ * @param obj The object containing the instance variable whose value you want to read.
+ * @param ivar The Ivar describing the instance variable whose value you want to read.
+ *
+ * @return The value of the instance variable specified by \e ivar, or \c nil if \e object is \c nil.
+ *
+ * @note \c object_getIvar is faster than \c object_getInstanceVariable if the Ivar
+ * for the instance variable is already known.
+ */
OBJC_EXPORT id object_getIvar(id obj, Ivar ivar)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Sets the value of an instance variable in an object.
+ *
+ * @param obj The object containing the instance variable whose value you want to set.
+ * @param ivar The Ivar describing the instance variable whose value you want to set.
+ * @param value The new value for the instance variable.
+ *
+ * @note \c object_setIvar is faster than \c object_setInstanceVariable if the Ivar
+ * for the instance variable is already known.
+ */
OBJC_EXPORT void object_setIvar(id obj, Ivar ivar, id value)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Changes the value of an instance variable of a class instance.
+ *
+ * @param obj A pointer to an instance of a class. Pass the object containing
+ * the instance variable whose value you wish to modify.
+ * @param name A C string. Pass the name of the instance variable whose value you wish to modify.
+ * @param value The new value for the instance variable.
+ *
+ * @return A pointer to the \c Ivar data structure that defines the type and
+ * name of the instance variable specified by \e name.
+ */
OBJC_EXPORT Ivar object_setInstanceVariable(id obj, const char *name, void *value)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
OBJC_ARC_UNAVAILABLE;
+
+/**
+ * Obtains the value of an instance variable of a class instance.
+ *
+ * @param obj A pointer to an instance of a class. Pass the object containing
+ * the instance variable whose value you wish to obtain.
+ * @param name A C string. Pass the name of the instance variable whose value you wish to obtain.
+ * @param outValue On return, contains a pointer to the value of the instance variable.
+ *
+ * @return A pointer to the \c Ivar data structure that defines the type and name of
+ * the instance variable specified by \e name.
+ */
OBJC_EXPORT Ivar object_getInstanceVariable(id obj, const char *name, void **outValue)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
OBJC_ARC_UNAVAILABLE;
-OBJC_EXPORT id objc_getClass(const char *name)
+
+/* Obtaining Class Definitions */
+
+/**
+ * Returns the class definition of a specified class.
+ *
+ * @param name The name of the class to look up.
+ *
+ * @return The Class object for the named class, or \c nil
+ * if the class is not registered with the Objective-C runtime.
+ *
+ * @note \c objc_getClass is different from \c objc_lookUpClass in that if the class
+ * is not registered, \c objc_getClass calls the class handler callback and then checks
+ * a second time to see whether the class is registered. \c objc_lookUpClass does
+ * not call the class handler callback.
+ *
+ * @warning Earlier implementations of this function (prior to OS X v10.0)
+ * terminate the program if the class does not exist.
+ */
+OBJC_EXPORT Class objc_getClass(const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
-OBJC_EXPORT id objc_getMetaClass(const char *name)
+
+/**
+ * Returns the metaclass definition of a specified class.
+ *
+ * @param name The name of the class to look up.
+ *
+ * @return The \c Class object for the metaclass of the named class, or \c nil if the class
+ * is not registered with the Objective-C runtime.
+ *
+ * @note If the definition for the named class is not registered, this function calls the class handler
+ * callback and then checks a second time to see if the class is registered. However, every class
+ * definition must have a valid metaclass definition, and so the metaclass definition is always returned,
+ * whether it’s valid or not.
+ */
+OBJC_EXPORT Class objc_getMetaClass(const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
-OBJC_EXPORT id objc_lookUpClass(const char *name)
+
+/**
+ * Returns the class definition of a specified class.
+ *
+ * @param name The name of the class to look up.
+ *
+ * @return The Class object for the named class, or \c nil if the class
+ * is not registered with the Objective-C runtime.
+ *
+ * @note \c objc_getClass is different from this function in that if the class is not
+ * registered, \c objc_getClass calls the class handler callback and then checks a second
+ * time to see whether the class is registered. This function does not call the class handler callback.
+ */
+OBJC_EXPORT Class objc_lookUpClass(const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
-OBJC_EXPORT id objc_getRequiredClass(const char *name)
+
+/**
+ * Returns the class definition of a specified class.
+ *
+ * @param name The name of the class to look up.
+ *
+ * @return The Class object for the named class.
+ *
+ * @note This function is the same as \c objc_getClass, but kills the process if the class is not found.
+ * @note This function is used by ZeroLink, where failing to find a class would be a compile-time link error without ZeroLink.
+ */
+OBJC_EXPORT Class objc_getRequiredClass(const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
-OBJC_EXPORT Class objc_getFutureClass(const char *name)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
- OBJC_ARC_UNAVAILABLE;
-OBJC_EXPORT void objc_setFutureClass(Class cls, const char *name)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
- OBJC_ARC_UNAVAILABLE;
+
+/**
+ * Obtains the list of registered class definitions.
+ *
+ * @param buffer An array of \c Class values. On output, each \c Class value points to
+ * one class definition, up to either \e bufferCount or the total number of registered classes,
+ * whichever is less. You can pass \c NULL to obtain the total number of registered class
+ * definitions without actually retrieving any class definitions.
+ * @param bufferCount An integer value. Pass the number of pointers for which you have allocated space
+ * in \e buffer. On return, this function fills in only this number of elements. If this number is less
+ * than the number of registered classes, this function returns an arbitrary subset of the registered classes.
+ *
+ * @return An integer value indicating the total number of registered classes.
+ *
+ * @note The Objective-C runtime library automatically registers all the classes defined in your source code.
+ * You can create class definitions at runtime and register them with the \c objc_addClass function.
+ *
+ * @warning You cannot assume that class objects you get from this function are classes that inherit from \c NSObject,
+ * so you cannot safely call any methods on such classes without detecting that the method is implemented first.
+ */
OBJC_EXPORT int objc_getClassList(Class *buffer, int bufferCount)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Creates and returns a list of pointers to all registered class definitions.
+ *
+ * @param outCount An integer pointer used to store the number of classes returned by
+ * this function in the list. It can be \c nil.
+ *
+ * @return A nil terminated array of classes. It must be freed with \c free().
+ *
+ * @see objc_getClassList
+ */
OBJC_EXPORT Class *objc_copyClassList(unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_3_1);
-OBJC_EXPORT Protocol *objc_getProtocol(const char *name)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT Protocol * __unsafe_unretained *objc_copyProtocolList(unsigned int *outCount)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/* Working with Classes */
+
+/**
+ * Returns the name of a class.
+ *
+ * @param cls A class object.
+ *
+ * @return The name of the class, or the empty string if \e cls is \c Nil.
+ */
OBJC_EXPORT const char *class_getName(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a Boolean value that indicates whether a class object is a metaclass.
+ *
+ * @param cls A class object.
+ *
+ * @return \c YES if \e cls is a metaclass, \c NO if \e cls is a non-meta class,
+ * \c NO if \e cls is \c Nil.
+ */
OBJC_EXPORT BOOL class_isMetaClass(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the superclass of a class.
+ *
+ * @param cls A class object.
+ *
+ * @return The superclass of the class, or \c Nil if
+ * \e cls is a root class, or \c Nil if \e cls is \c Nil.
+ *
+ * @note You should usually use \c NSObject's \c superclass method instead of this function.
+ */
OBJC_EXPORT Class class_getSuperclass(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Sets the superclass of a given class.
+ *
+ * @param cls The class whose superclass you want to set.
+ * @param newSuper The new superclass for cls.
+ *
+ * @return The old superclass for cls.
+ *
+ * @warning You should not use this function.
+ */
OBJC_EXPORT Class class_setSuperclass(Class cls, Class newSuper)
- AVAILABLE_MAC_OS_X_VERSION_10_5_AND_LATER_BUT_DEPRECATED;
+ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_5, __IPHONE_2_0,__IPHONE_2_0);
+/**
+ * Returns the version number of a class definition.
+ *
+ * @param cls A pointer to a \c Class data structure. Pass
+ * the class definition for which you wish to obtain the version.
+ *
+ * @return An integer indicating the version number of the class definition.
+ *
+ * @see class_setVersion
+ */
OBJC_EXPORT int class_getVersion(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Sets the version number of a class definition.
+ *
+ * @param cls A pointer to an Class data structure.
+ * Pass the class definition for which you wish to set the version.
+ * @param version An integer. Pass the new version number of the class definition.
+ *
+ * @note You can use the version number of the class definition to provide versioning of the
+ * interface that your class represents to other classes. This is especially useful for object
+ * serialization (that is, archiving of the object in a flattened form), where it is important to
+ * recognize changes to the layout of the instance variables in different class-definition versions.
+ * @note Classes derived from the Foundation framework \c NSObject class can set the class-definition
+ * version number using the \c setVersion: class method, which is implemented using the \c class_setVersion function.
+ */
OBJC_EXPORT void class_setVersion(Class cls, int version)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+/**
+ * Returns the size of instances of a class.
+ *
+ * @param cls A class object.
+ *
+ * @return The size in bytes of instances of the class \e cls, or \c 0 if \e cls is \c Nil.
+ */
OBJC_EXPORT size_t class_getInstanceSize(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns the \c Ivar for a specified instance variable of a given class.
+ *
+ * @param cls The class whose instance variable you wish to obtain.
+ * @param name The name of the instance variable definition to obtain.
+ *
+ * @return A pointer to an \c Ivar data structure containing information about
+ * the instance variable specified by \e name.
+ */
OBJC_EXPORT Ivar class_getInstanceVariable(Class cls, const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns the Ivar for a specified class variable of a given class.
+ *
+ * @param cls The class definition whose class variable you wish to obtain.
+ * @param name The name of the class variable definition to obtain.
+ *
+ * @return A pointer to an \c Ivar data structure containing information about the class variable specified by \e name.
+ */
OBJC_EXPORT Ivar class_getClassVariable(Class cls, const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Describes the instance variables declared by a class.
+ *
+ * @param cls The class to inspect.
+ * @param outCount On return, contains the length of the returned array.
+ * If outCount is NULL, the length is not returned.
+ *
+ * @return An array of pointers of type Ivar describing the instance variables declared by the class.
+ * Any instance variables declared by superclasses are not included. The array contains *outCount
+ * pointers followed by a NULL terminator. You must free the array with free().
+ *
+ * If the class declares no instance variables, or cls is Nil, NULL is returned and *outCount is 0.
+ */
OBJC_EXPORT Ivar *class_copyIvarList(Class cls, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns a specified instance method for a given class.
+ *
+ * @param cls The class you want to inspect.
+ * @param name The selector of the method you want to retrieve.
+ *
+ * @return The method that corresponds to the implementation of the selector specified by
+ * \e name for the class specified by \e cls, or \c NULL if the specified class or its
+ * superclasses do not contain an instance method with the specified selector.
+ *
+ * @note This function searches superclasses for implementations, whereas \c class_copyMethodList does not.
+ */
OBJC_EXPORT Method class_getInstanceMethod(Class cls, SEL name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns a pointer to the data structure describing a given class method for a given class.
+ *
+ * @param cls A pointer to a class definition. Pass the class that contains the method you want to retrieve.
+ * @param name A pointer of type \c SEL. Pass the selector of the method you want to retrieve.
+ *
+ * @return A pointer to the \c Method data structure that corresponds to the implementation of the
+ * selector specified by aSelector for the class specified by aClass, or NULL if the specified
+ * class or its superclasses do not contain an instance method with the specified selector.
+ *
+ * @note Note that this function searches superclasses for implementations,
+ * whereas \c class_copyMethodList does not.
+ */
OBJC_EXPORT Method class_getClassMethod(Class cls, SEL name)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns the function pointer that would be called if a
+ * particular message were sent to an instance of a class.
+ *
+ * @param cls The class you want to inspect.
+ * @param name A selector.
+ *
+ * @return The function pointer that would be called if \c [object name] were called
+ * with an instance of the class, or \c NULL if \e cls is \c Nil.
+ *
+ * @note \c class_getMethodImplementation may be faster than \c method_getImplementation(class_getInstanceMethod(cls, name)).
+ * @note The function pointer returned may be a function internal to the runtime instead of
+ * an actual method implementation. For example, if instances of the class do not respond to
+ * the selector, the function pointer returned will be part of the runtime's message forwarding machinery.
+ */
OBJC_EXPORT IMP class_getMethodImplementation(Class cls, SEL name)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the function pointer that would be called if a particular
+ * message were sent to an instance of a class.
+ *
+ * @param cls The class you want to inspect.
+ * @param name A selector.
+ *
+ * @return The function pointer that would be called if \c [object name] were called
+ * with an instance of the class, or \c NULL if \e cls is \c Nil.
+ */
OBJC_EXPORT IMP class_getMethodImplementation_stret(Class cls, SEL name)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a Boolean value that indicates whether instances of a class respond to a particular selector.
+ *
+ * @param cls The class you want to inspect.
+ * @param sel A selector.
+ *
+ * @return \c YES if instances of the class respond to the selector, otherwise \c NO.
+ *
+ * @note You should usually use \c NSObject's \c respondsToSelector: or \c instancesRespondToSelector:
+ * methods instead of this function.
+ */
OBJC_EXPORT BOOL class_respondsToSelector(Class cls, SEL sel)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Describes the instance methods implemented by a class.
+ *
+ * @param cls The class you want to inspect.
+ * @param outCount On return, contains the length of the returned array.
+ * If outCount is NULL, the length is not returned.
+ *
+ * @return An array of pointers of type Method describing the instance methods
+ * implemented by the class—any instance methods implemented by superclasses are not included.
+ * The array contains *outCount pointers followed by a NULL terminator. You must free the array with free().
+ *
+ * If cls implements no instance methods, or cls is Nil, returns NULL and *outCount is 0.
+ *
+ * @note To get the class methods of a class, use \c class_copyMethodList(object_getClass(cls), &count).
+ * @note To get the implementations of methods that may be implemented by superclasses,
+ * use \c class_getInstanceMethod or \c class_getClassMethod.
+ */
OBJC_EXPORT Method *class_copyMethodList(Class cls, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns a Boolean value that indicates whether a class conforms to a given protocol.
+ *
+ * @param cls The class you want to inspect.
+ * @param protocol A protocol.
+ *
+ * @return YES if cls conforms to protocol, otherwise NO.
+ *
+ * @note You should usually use NSObject's conformsToProtocol: method instead of this function.
+ */
OBJC_EXPORT BOOL class_conformsToProtocol(Class cls, Protocol *protocol)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Describes the protocols adopted by a class.
+ *
+ * @param cls The class you want to inspect.
+ * @param outCount On return, contains the length of the returned array.
+ * If outCount is NULL, the length is not returned.
+ *
+ * @return An array of pointers of type Protocol* describing the protocols adopted
+ * by the class. Any protocols adopted by superclasses or other protocols are not included.
+ * The array contains *outCount pointers followed by a NULL terminator. You must free the array with free().
+ *
+ * If cls adopts no protocols, or cls is Nil, returns NULL and *outCount is 0.
+ */
OBJC_EXPORT Protocol * __unsafe_unretained *class_copyProtocolList(Class cls, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns a property with a given name of a given class.
+ *
+ * @param cls The class you want to inspect.
+ * @param name The name of the property you want to inspect.
+ *
+ * @return A pointer of type \c objc_property_t describing the property, or
+ * \c NULL if the class does not declare a property with that name,
+ * or \c NULL if \e cls is \c Nil.
+ */
OBJC_EXPORT objc_property_t class_getProperty(Class cls, const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Describes the properties declared by a class.
+ *
+ * @param cls The class you want to inspect.
+ * @param outCount On return, contains the length of the returned array.
+ * If \e outCount is \c NULL, the length is not returned.
+ *
+ * @return An array of pointers of type \c objc_property_t describing the properties
+ * declared by the class. Any properties declared by superclasses are not included.
+ * The array contains \c *outCount pointers followed by a \c NULL terminator. You must free the array with \c free().
+ *
+ * If \e cls declares no properties, or \e cls is \c Nil, returns \c NULL and \c *outCount is \c 0.
+ */
OBJC_EXPORT objc_property_t *class_copyPropertyList(Class cls, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns a description of the \c Ivar layout for a given class.
+ *
+ * @param cls The class to inspect.
+ *
+ * @return A description of the \c Ivar layout for \e cls.
+ */
OBJC_EXPORT const uint8_t *class_getIvarLayout(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT const uint8_t *class_getWeakIvarLayout(Class cls)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT id class_createInstance(Class cls, size_t extraBytes)
- __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
- OBJC_ARC_UNAVAILABLE;
-OBJC_EXPORT id objc_constructInstance(Class cls, void *bytes)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0)
- OBJC_ARC_UNAVAILABLE;
-OBJC_EXPORT void *objc_destructInstance(id obj)
- __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0)
- OBJC_ARC_UNAVAILABLE;
-
-OBJC_EXPORT Class objc_allocateClassPair(Class superclass, const char *name,
- size_t extraBytes)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT void objc_registerClassPair(Class cls)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT Class objc_duplicateClass(Class original, const char *name,
- size_t extraBytes)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT void objc_disposeClassPair(Class cls)
+/**
+ * Returns a description of the layout of weak Ivars for a given class.
+ *
+ * @param cls The class to inspect.
+ *
+ * @return A description of the layout of the weak \c Ivars for \e cls.
+ */
+OBJC_EXPORT const uint8_t *class_getWeakIvarLayout(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Adds a new method to a class with a given name and implementation.
+ *
+ * @param cls The class to which to add a method.
+ * @param name A selector that specifies the name of the method being added.
+ * @param imp A function which is the implementation of the new method. The function must take at least two arguments—self and _cmd.
+ * @param types An array of characters that describe the types of the arguments to the method.
+ *
+ * @return YES if the method was added successfully, otherwise NO
+ * (for example, the class already contains a method implementation with that name).
+ *
+ * @note class_addMethod will add an override of a superclass's implementation,
+ * but will not replace an existing implementation in this class.
+ * To change an existing implementation, use method_setImplementation.
+ */
OBJC_EXPORT BOOL class_addMethod(Class cls, SEL name, IMP imp,
const char *types)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Replaces the implementation of a method for a given class.
+ *
+ * @param cls The class you want to modify.
+ * @param name A selector that identifies the method whose implementation you want to replace.
+ * @param imp The new implementation for the method identified by name for the class identified by cls.
+ * @param types An array of characters that describe the types of the arguments to the method.
+ * Since the function must take at least two arguments—self and _cmd, the second and third characters
+ * must be “@:” (the first character is the return type).
+ *
+ * @return The previous implementation of the method identified by \e name for the class identified by \e cls.
+ *
+ * @note This function behaves in two different ways:
+ * - If the method identified by \e name does not yet exist, it is added as if \c class_addMethod were called.
+ * The type encoding specified by \e types is used as given.
+ * - If the method identified by \e name does exist, its \c IMP is replaced as if \c method_setImplementation were called.
+ * The type encoding specified by \e types is ignored.
+ */
OBJC_EXPORT IMP class_replaceMethod(Class cls, SEL name, IMP imp,
const char *types)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Adds a new instance variable to a class.
+ *
+ * @return YES if the instance variable was added successfully, otherwise NO
+ * (for example, the class already contains an instance variable with that name).
+ *
+ * @note This function may only be called after objc_allocateClassPair and before objc_registerClassPair.
+ * Adding an instance variable to an existing class is not supported.
+ * @note The class must not be a metaclass. Adding an instance variable to a metaclass is not supported.
+ * @note The instance variable's minimum alignment in bytes is 1<<align. The minimum alignment of an instance
+ * variable depends on the ivar's type and the machine architecture.
+ * For variables of any pointer type, pass log2(sizeof(pointer_type)).
+ */
OBJC_EXPORT BOOL class_addIvar(Class cls, const char *name, size_t size,
uint8_t alignment, const char *types)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Adds a protocol to a class.
+ *
+ * @param cls The class to modify.
+ * @param protocol The protocol to add to \e cls.
+ *
+ * @return \c YES if the method was added successfully, otherwise \c NO
+ * (for example, the class already conforms to that protocol).
+ */
OBJC_EXPORT BOOL class_addProtocol(Class cls, Protocol *protocol)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Adds a property to a class.
+ *
+ * @param cls The class to modify.
+ * @param name The name of the property.
+ * @param attributes An array of property attributes.
+ * @param attributeCount The number of attributes in \e attributes.
+ *
+ * @return \c YES if the property was added successfully, otherwise \c NO
+ * (for example, the class already has that property).
+ */
OBJC_EXPORT BOOL class_addProperty(Class cls, const char *name, const objc_property_attribute_t *attributes, unsigned int attributeCount)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Replace a property of a class.
+ *
+ * @param cls The class to modify.
+ * @param name The name of the property.
+ * @param attributes An array of property attributes.
+ * @param attributeCount The number of attributes in \e attributes.
+ */
OBJC_EXPORT void class_replaceProperty(Class cls, const char *name, const objc_property_attribute_t *attributes, unsigned int attributeCount)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Sets the Ivar layout for a given class.
+ *
+ * @param cls The class to modify.
+ * @param layout The layout of the \c Ivars for \e cls.
+ */
OBJC_EXPORT void class_setIvarLayout(Class cls, const uint8_t *layout)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Sets the layout for weak Ivars for a given class.
+ *
+ * @param cls The class to modify.
+ * @param layout The layout of the weak Ivars for \e cls.
+ */
OBJC_EXPORT void class_setWeakIvarLayout(Class cls, const uint8_t *layout)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Used by CoreFoundation's toll-free bridging.
+ * Return the id of the named class.
+ *
+ * @return The id of the named class, or an uninitialized class
+ * structure that will be used for the class when and if it does
+ * get loaded.
+ *
+ * @warning Do not call this function yourself.
+ */
+OBJC_EXPORT Class objc_getFutureClass(const char *name)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+ OBJC_ARC_UNAVAILABLE;
+
+/**
+ * Used by CoreFoundation's toll-free bridging.
+ *
+ * @warning Do not call this function yourself.
+ */
+OBJC_EXPORT void objc_setFutureClass(Class cls, const char *name)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+ OBJC_ARC_UNAVAILABLE;
+
+
+/* Instantiating Classes */
+
+/**
+ * Creates an instance of a class, allocating memory for the class in the
+ * default malloc memory zone.
+ *
+ * @param cls The class that you wish to allocate an instance of.
+ * @param extraBytes An integer indicating the number of extra bytes to allocate.
+ * The additional bytes can be used to store additional instance variables beyond
+ * those defined in the class definition.
+ *
+ * @return An instance of the class \e cls.
+ */
+OBJC_EXPORT id class_createInstance(Class cls, size_t extraBytes)
+ __OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0)
+ OBJC_ARC_UNAVAILABLE;
+
+/**
+ * Creates an instance of a class at the specific location provided.
+ *
+ * @param cls The class that you wish to allocate an instance of.
+ * @param bytes The location at which to allocate an instance of \e cls.
+ * Must point to at least \c class_getInstanceSize(cls) bytes of well-aligned,
+ * zero-filled memory.
+ *
+ * @return \e bytes on success, \c nil otherwise. (For example, \e cls or \e bytes
+ * might be \c nil)
+ *
+ * @note \c class_createInstance and \c class_createInstances preflight this.
+ *
+ * @see class_createInstance
+ */
+OBJC_EXPORT id objc_constructInstance(Class cls, void *bytes)
+ __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0)
+ OBJC_ARC_UNAVAILABLE;
+
+/**
+ * Destroys an instance of a class without freeing memory and removes any
+ * associated references this instance might have had.
+ *
+ * @param obj The class instance to destroy.
+ *
+ * @return \e obj. Does nothing if \e obj is nil.
+ *
+ * @warning GC does not call this. If you edit this, also edit finalize.
+ *
+ * @note CF and other clients do call this under GC.
+ */
+OBJC_EXPORT void *objc_destructInstance(id obj)
+ __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_0)
+ OBJC_ARC_UNAVAILABLE;
+
+
+/* Adding Classes */
+
+/**
+ * Creates a new class and metaclass.
+ *
+ * @param superclass The class to use as the new class's superclass, or \c Nil to create a new root class.
+ * @param name The string to use as the new class's name. The string will be copied.
+ * @param extraBytes The number of bytes to allocate for indexed ivars at the end of
+ * the class and metaclass objects. This should usually be \c 0.
+ *
+ * @return The new class, or Nil if the class could not be created (for example, the desired name is already in use).
+ *
+ * @note You can get a pointer to the new metaclass by calling \c object_getClass(newClass).
+ * @note To create a new class, start by calling \c objc_allocateClassPair.
+ * Then set the class's attributes with functions like \c class_addMethod and \c class_addIvar.
+ * When you are done building the class, call \c objc_registerClassPair. The new class is now ready for use.
+ * @note Instance methods and instance variables should be added to the class itself.
+ * Class methods should be added to the metaclass.
+ */
+OBJC_EXPORT Class objc_allocateClassPair(Class superclass, const char *name,
+ size_t extraBytes)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Registers a class that was allocated using \c objc_allocateClassPair.
+ *
+ * @param cls The class you want to register.
+ */
+OBJC_EXPORT void objc_registerClassPair(Class cls)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Used by Foundation's Key-Value Observing.
+ *
+ * @warning Do not call this function yourself.
+ */
+OBJC_EXPORT Class objc_duplicateClass(Class original, const char *name, size_t extraBytes)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Destroy a class and its associated metaclass.
+ *
+ * @param cls The class to be destroyed. It must have been allocated with
+ * \c objc_allocateClassPair
+ *
+ * @warning Do not call if instances of this class or a subclass exist.
+ */
+OBJC_EXPORT void objc_disposeClassPair(Class cls)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+
+/* Working with Methods */
+/**
+ * Returns the name of a method.
+ *
+ * @param m The method to inspect.
+ *
+ * @return A pointer of type SEL.
+ *
+ * @note To get the method name as a C string, call \c sel_getName(method_getName(method)).
+ */
OBJC_EXPORT SEL method_getName(Method m)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the implementation of a method.
+ *
+ * @param m The method to inspect.
+ *
+ * @return A function pointer of type IMP.
+ */
OBJC_EXPORT IMP method_getImplementation(Method m)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a string describing a method's parameter and return types.
+ *
+ * @param m The method to inspect.
+ *
+ * @return A C string. The string may be \c NULL.
+ */
OBJC_EXPORT const char *method_getTypeEncoding(Method m)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Returns the number of arguments accepted by a method.
+ *
+ * @param m A pointer to a \c Method data structure. Pass the method in question.
+ *
+ * @return An integer containing the number of arguments accepted by the given method.
+ */
OBJC_EXPORT unsigned int method_getNumberOfArguments(Method m)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns a string describing a method's return type.
+ *
+ * @param m The method to inspect.
+ *
+ * @return A C string describing the return type. You must free the string with \c free().
+ */
OBJC_EXPORT char *method_copyReturnType(Method m)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a string describing a single parameter type of a method.
+ *
+ * @param m The method to inspect.
+ * @param index The index of the parameter to inspect.
+ *
+ * @return A C string describing the type of the parameter at index \e index, or \c NULL
+ * if method has no parameter index \e index. You must free the string with \c free().
+ */
OBJC_EXPORT char *method_copyArgumentType(Method m, unsigned int index)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns by reference a string describing a method's return type.
+ *
+ * @param m The method you want to inquire about.
+ * @param dst The reference string to store the description.
+ * @param dst_len The maximum number of characters that can be stored in \e dst.
+ *
+ * @note The method's return type string is copied to \e dst.
+ * \e dst is filled as if \c strncpy(dst, parameter_type, dst_len) were called.
+ */
OBJC_EXPORT void method_getReturnType(Method m, char *dst, size_t dst_len)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns by reference a string describing a single parameter type of a method.
+ *
+ * @param m The method you want to inquire about.
+ * @param index The index of the parameter you want to inquire about.
+ * @param dst The reference string to store the description.
+ * @param dst_len The maximum number of characters that can be stored in \e dst.
+ *
+ * @note The parameter type string is copied to \e dst. \e dst is filled as if \c strncpy(dst, parameter_type, dst_len)
+ * were called. If the method contains no parameter with that index, \e dst is filled as
+ * if \c strncpy(dst, "", dst_len) were called.
+ */
OBJC_EXPORT void method_getArgumentType(Method m, unsigned int index,
char *dst, size_t dst_len)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
OBJC_EXPORT struct objc_method_description *method_getDescription(Method m)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Sets the implementation of a method.
+ *
+ * @param m The method for which to set an implementation.
+ * @param imp The implemention to set to this method.
+ *
+ * @return The previous implementation of the method.
+ */
OBJC_EXPORT IMP method_setImplementation(Method m, IMP imp)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Exchanges the implementations of two methods.
+ *
+ * @param m1 Method to exchange with second method.
+ * @param m2 Method to exchange with first method.
+ *
+ * @note This is an atomic version of the following:
+ * \code
+ * IMP imp1 = method_getImplementation(m1);
+ * IMP imp2 = method_getImplementation(m2);
+ * method_setImplementation(m1, imp2);
+ * method_setImplementation(m2, imp1);
+ * \endcode
+ */
OBJC_EXPORT void method_exchangeImplementations(Method m1, Method m2)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/* Working with Instance Variables */
+
+/**
+ * Returns the name of an instance variable.
+ *
+ * @param v The instance variable you want to enquire about.
+ *
+ * @return A C string containing the instance variable's name.
+ */
OBJC_EXPORT const char *ivar_getName(Ivar v)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the type string of an instance variable.
+ *
+ * @param v The instance variable you want to enquire about.
+ *
+ * @return A C string containing the instance variable's type encoding.
+ *
+ * @note For possible values, see Objective-C Runtime Programming Guide > Type Encodings.
+ */
OBJC_EXPORT const char *ivar_getTypeEncoding(Ivar v)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the offset of an instance variable.
+ *
+ * @param v The instance variable you want to enquire about.
+ *
+ * @return The offset of \e v.
+ *
+ * @note For instance variables of type \c id or other object types, call \c object_getIvar
+ * and \c object_setIvar instead of using this offset to access the instance variable data directly.
+ */
OBJC_EXPORT ptrdiff_t ivar_getOffset(Ivar v)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/* Working with Properties */
+
+/**
+ * Returns the name of a property.
+ *
+ * @param property The property you want to inquire about.
+ *
+ * @return A C string containing the property's name.
+ */
OBJC_EXPORT const char *property_getName(objc_property_t property)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the attribute string of a property.
+ *
+ * @param property A property.
+ *
+ * @return A C string containing the property's attributes.
+ *
+ * @note The format of the attribute string is described in Declared Properties in Objective-C Runtime Programming Guide.
+ */
OBJC_EXPORT const char *property_getAttributes(objc_property_t property)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns an array of property attributes for a property.
+ *
+ * @param property The property whose attributes you want copied.
+ * @param outCount The number of attributes returned in the array.
+ *
+ * @return An array of property attributes; must be free'd() by the caller.
+ */
OBJC_EXPORT objc_property_attribute_t *property_copyAttributeList(objc_property_t property, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Returns the value of a property attribute given the attribute name.
+ *
+ * @param property The property whose attribute value you are interested in.
+ * @param attributeName C string representing the attribute name.
+ *
+ * @return The value string of the attribute \e attributeName if it exists in
+ * \e property, \c nil otherwise.
+ */
OBJC_EXPORT char *property_copyAttributeValue(objc_property_t property, const char *attributeName)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/* Working with Protocols */
+
+/**
+ * Returns a specified protocol.
+ *
+ * @param name The name of a protocol.
+ *
+ * @return The protocol named \e name, or \c NULL if no protocol named \e name could be found.
+ *
+ * @note This function acquires the runtime lock.
+ */
+OBJC_EXPORT Protocol *objc_getProtocol(const char *name)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns an array of all the protocols known to the runtime.
+ *
+ * @param outCount Upon return, contains the number of protocols in the returned array.
+ *
+ * @return A C array of all the protocols known to the runtime. The array contains \c *outCount
+ * pointers followed by a \c NULL terminator. You must free the list with \c free().
+ *
+ * @note This function acquires the runtime lock.
+ */
+OBJC_EXPORT Protocol * __unsafe_unretained *objc_copyProtocolList(unsigned int *outCount)
+ __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a Boolean value that indicates whether one protocol conforms to another protocol.
+ *
+ * @param proto A protocol.
+ * @param other A protocol.
+ *
+ * @return \c YES if \e proto conforms to \e other, otherwise \c NO.
+ *
+ * @note One protocol can incorporate other protocols using the same syntax
+ * that classes use to adopt a protocol:
+ * \code
+ * @protocol ProtocolName < protocol list >
+ * \endcode
+ * All the protocols listed between angle brackets are considered part of the ProtocolName protocol.
+ */
OBJC_EXPORT BOOL protocol_conformsToProtocol(Protocol *proto, Protocol *other)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a Boolean value that indicates whether two protocols are equal.
+ *
+ * @param proto A protocol.
+ * @param other A protocol.
+ *
+ * @return \c YES if \e proto is the same as \e other, otherwise \c NO.
+ */
OBJC_EXPORT BOOL protocol_isEqual(Protocol *proto, Protocol *other)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the name of a protocol.
+ *
+ * @param p A protocol.
+ *
+ * @return The name of the protocol \e p as a C string.
+ */
OBJC_EXPORT const char *protocol_getName(Protocol *p)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns a method description structure for a specified method of a given protocol.
+ *
+ * @param p A protocol.
+ * @param aSel A selector.
+ * @param isRequiredMethod A Boolean value that indicates whether aSel is a required method.
+ * @param isInstanceMethod A Boolean value that indicates whether aSel is an instance method.
+ *
+ * @return An \c objc_method_description structure that describes the method specified by \e aSel,
+ * \e isRequiredMethod, and \e isInstanceMethod for the protocol \e p.
+ * If the protocol does not contain the specified method, returns an \c objc_method_description structure
+ * with the value \c {NULL, \c NULL}.
+ *
+ * @note Methods in other protocols adopted by this protocol are not included.
+ */
OBJC_EXPORT struct objc_method_description protocol_getMethodDescription(Protocol *p, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns an array of method descriptions of methods meeting a given specification for a given protocol.
+ *
+ * @param p A protocol.
+ * @param isRequiredMethod A Boolean value that indicates whether returned methods should
+ * be required methods (pass YES to specify required methods).
+ * @param isInstanceMethod A Boolean value that indicates whether returned methods should
+ * be instance methods (pass YES to specify instance methods).
+ * @param outCount Upon return, contains the number of method description structures in the returned array.
+ *
+ * @return A C array of \c objc_method_description structures containing the names and types of \e p's methods
+ * specified by \e isRequiredMethod and \e isInstanceMethod. The array contains \c *outCount pointers followed
+ * by a \c NULL terminator. You must free the list with \c free().
+ * If the protocol declares no methods that meet the specification, \c NULL is returned and \c *outCount is 0.
+ *
+ * @note Methods in other protocols adopted by this protocol are not included.
+ */
OBJC_EXPORT struct objc_method_description *protocol_copyMethodDescriptionList(Protocol *p, BOOL isRequiredMethod, BOOL isInstanceMethod, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the specified property of a given protocol.
+ *
+ * @param proto A protocol.
+ * @param name The name of a property.
+ * @param isRequiredProperty A Boolean value that indicates whether name is a required property.
+ * @param isInstanceProperty A Boolean value that indicates whether name is a required property.
+ *
+ * @return The property specified by \e name, \e isRequiredProperty, and \e isInstanceProperty for \e proto,
+ * or \c NULL if none of \e proto's properties meets the specification.
+ */
OBJC_EXPORT objc_property_t protocol_getProperty(Protocol *proto, const char *name, BOOL isRequiredProperty, BOOL isInstanceProperty)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns an array of the properties declared by a protocol.
+ *
+ * @param proto A protocol.
+ * @param outCount Upon return, contains the number of elements in the returned array.
+ *
+ * @return A C array of pointers of type \c objc_property_t describing the properties declared by \e proto.
+ * Any properties declared by other protocols adopted by this protocol are not included. The array contains
+ * \c *outCount pointers followed by a \c NULL terminator. You must free the array with \c free().
+ * If the protocol declares no properties, \c NULL is returned and \c *outCount is \c 0.
+ */
OBJC_EXPORT objc_property_t *protocol_copyPropertyList(Protocol *proto, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns an array of the protocols adopted by a protocol.
+ *
+ * @param proto A protocol.
+ * @param outCount Upon return, contains the number of elements in the returned array.
+ *
+ * @return A C array of protocols adopted by \e proto. The array contains \e *outCount pointers
+ * followed by a \c NULL terminator. You must free the array with \c free().
+ * If the protocol declares no properties, \c NULL is returned and \c *outCount is \c 0.
+ */
OBJC_EXPORT Protocol * __unsafe_unretained *protocol_copyProtocolList(Protocol *proto, unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Creates a new protocol instance that cannot be used until registered with
+ * \c objc_registerProtocol()
+ *
+ * @param name The name of the protocol to create.
+ *
+ * @return The Protocol instance on success, \c nil if a protocol
+ * with the same name already exists.
+ * @note There is no dispose method for this.
+ */
OBJC_EXPORT Protocol *objc_allocateProtocol(const char *name)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Registers a newly constructed protocol with the runtime. The protocol
+ * will be ready for use and is immutable after this.
+ *
+ * @param proto The protocol you want to register.
+ */
OBJC_EXPORT void objc_registerProtocol(Protocol *proto)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Adds a method to a protocol. The protocol must be under construction.
+ *
+ * @param proto The protocol to add a method to.
+ * @param name The name of the method to add.
+ * @param types A C string that represents the method signature.
+ * @param isRequiredMethod YES if the method is not an optional method.
+ * @param isInstanceMethod YES if the method is an instance method.
+ */
OBJC_EXPORT void protocol_addMethodDescription(Protocol *proto, SEL name, const char *types, BOOL isRequiredMethod, BOOL isInstanceMethod)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Adds an incorporated protocol to another protocol. The protocol being
+ * added to must still be under construction, while the additional protocol
+ * must be already constructed.
+ *
+ * @param proto The protocol you want to add to, it must be under construction.
+ * @param addition The protocol you want to incorporate into \e proto, it must be registered.
+ */
OBJC_EXPORT void protocol_addProtocol(Protocol *proto, Protocol *addition)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Adds a property to a protocol. The protocol must be under construction.
+ *
+ * @param proto The protocol to add a property to.
+ * @param name The name of the property.
+ * @param attributes An array of property attributes.
+ * @param attributeCount The number of attributes in \e attributes.
+ * @param isRequiredProperty YES if the property (accessor methods) is not optional.
+ * @param isInstanceProperty YES if the property (accessor methods) are instance methods.
+ * This is the only case allowed fo a property, as a result, setting this to NO will
+ * not add the property to the protocol at all.
+ */
OBJC_EXPORT void protocol_addProperty(Protocol *proto, const char *name, const objc_property_attribute_t *attributes, unsigned int attributeCount, BOOL isRequiredProperty, BOOL isInstanceProperty)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/* Working with Libraries */
+
+/**
+ * Returns the names of all the loaded Objective-C frameworks and dynamic
+ * libraries.
+ *
+ * @param outCount The number of names returned.
+ *
+ * @return An array of C strings of names. Must be free()'d by caller.
+ */
OBJC_EXPORT const char **objc_copyImageNames(unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the dynamic library name a class originated from.
+ *
+ * @param cls The class you are inquiring about.
+ *
+ * @return The name of the library containing this class.
+ */
OBJC_EXPORT const char *class_getImageName(Class cls)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Returns the names of all the classes within a library.
+ *
+ * @param image The library or framework you are inquiring about.
+ * @param outCount The number of class names returned.
+ *
+ * @return An array of C strings representing the class names.
+ */
OBJC_EXPORT const char **objc_copyClassNamesForImage(const char *image,
unsigned int *outCount)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/* Working with Selectors */
+
+/**
+ * Returns the name of the method specified by a given selector.
+ *
+ * @param sel A pointer of type \c SEL. Pass the selector whose name you wish to determine.
+ *
+ * @return A C string indicating the name of the selector.
+ */
OBJC_EXPORT const char *sel_getName(SEL sel)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Registers a method name with the Objective-C runtime system.
+ *
+ * @param str A pointer to a C string. Pass the name of the method you wish to register.
+ *
+ * @return A pointer of type SEL specifying the selector for the named method.
+ *
+ * @note The implementation of this method is identical to the implementation of \c sel_registerName.
+ * @note Prior to OS X version 10.0, this method tried to find the selector mapped to the given name
+ * and returned \c NULL if the selector was not found. This was changed for safety, because it was
+ * observed that many of the callers of this function did not check the return value for \c NULL.
+ */
OBJC_EXPORT SEL sel_getUid(const char *str)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Registers a method with the Objective-C runtime system, maps the method
+ * name to a selector, and returns the selector value.
+ *
+ * @param str A pointer to a C string. Pass the name of the method you wish to register.
+ *
+ * @return A pointer of type SEL specifying the selector for the named method.
+ *
+ * @note You must register a method name with the Objective-C runtime system to obtain the
+ * method’s selector before you can add the method to a class definition. If the method name
+ * has already been registered, this function simply returns the selector.
+ */
OBJC_EXPORT SEL sel_registerName(const char *str)
__OSX_AVAILABLE_STARTING(__MAC_10_0, __IPHONE_2_0);
+
+/**
+ * Returns a Boolean value that indicates whether two selectors are equal.
+ *
+ * @param lhs The selector to compare with rhs.
+ * @param rhs The selector to compare with lhs.
+ *
+ * @return \c YES if \e rhs and \e rhs are equal, otherwise \c NO.
+ *
+ * @note sel_isEqual is equivalent to ==.
+ */
OBJC_EXPORT BOOL sel_isEqual(SEL lhs, SEL rhs)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-OBJC_EXPORT void objc_enumerationMutation(id)
+
+/* Objective-C Language Features */
+
+/**
+ * This function is inserted by the compiler when a mutation
+ * is detected during a foreach iteration. It gets called
+ * when a mutation occurs, and the enumerationMutationHandler
+ * is enacted if it is set up. A fatal error occurs if a handler is not set up.
+ *
+ * @param obj The object being mutated.
+ *
+ */
+OBJC_EXPORT void objc_enumerationMutation(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/**
+ * Sets the current mutation handler.
+ *
+ * @param handler Function pointer to the new mutation handler.
+ */
OBJC_EXPORT void objc_setEnumerationMutationHandler(void (*handler)(id))
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Set the function to be called by objc_msgForward.
+ *
+ * @param fwd Function to be jumped to by objc_msgForward.
+ * @param fwd_stret Function to be jumped to by objc_msgForward_stret.
+ *
+ * @see message.h::_objc_msgForward
+ */
OBJC_EXPORT void objc_setForwardHandler(void *fwd, void *fwd_stret)
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/**
+ * Creates a pointer to a function that will call the block
+ * when the method is called.
+ *
+ * @param block The block that implements this method. Its signature should
+ * be: method_return_type ^(id self, method_args...).
+ * The selector is not available as a parameter to this block.
+ * The block is copied with \c Block_copy().
+ *
+ * @return The IMP that calls this block. Must be disposed of with
+ * \c imp_removeBlock.
+ */
OBJC_EXPORT IMP imp_implementationWithBlock(id block)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Return the block associated with an IMP that was created using
+ * \c imp_implementationWithBlock.
+ *
+ * @param anImp The IMP that calls this block.
+ *
+ * @return The block called by \e anImp.
+ */
OBJC_EXPORT id imp_getBlock(IMP anImp)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+
+/**
+ * Disassociates a block from an IMP that was created using
+ * \c imp_implementationWithBlock and releases the copy of the
+ * block that was created.
+ *
+ * @param anImp An IMP that was created using \c imp_implementationWithBlock.
+ *
+ * @return YES if the block was released successfully, NO otherwise.
+ * (For example, the block might not have been used to create an IMP previously).
+ */
OBJC_EXPORT BOOL imp_removeBlock(IMP anImp)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3);
+/**
+ * This loads the object referenced by a weak pointer and returns it, after
+ * retaining and autoreleasing the object to ensure that it stays alive
+ * long enough for the caller to use it. This function would be used
+ * anywhere a __weak variable is used in an expression.
+ *
+ * @param location The weak pointer address
+ *
+ * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
+ */
+OBJC_EXPORT id objc_loadWeak(id *location)
+ __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+
+/**
+ * This function stores a new value into a __weak variable. It would
+ * be used anywhere a __weak variable is the target of an assignment.
+ *
+ * @param location The address of the weak pointer itself
+ * @param obj The new object this weak ptr should now point to
+ *
+ * @return The value stored into \e location, i.e. \e obj
+ */
+OBJC_EXPORT id objc_storeWeak(id *location, id obj)
+ __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+
-/* Associated Object support. */
+/* Associative References */
-/* objc_setAssociatedObject() options */
+/**
+ * Policies related to associative references.
+ * These are options to objc_setAssociatedObject()
+ */
enum {
- OBJC_ASSOCIATION_ASSIGN = 0,
- OBJC_ASSOCIATION_RETAIN_NONATOMIC = 1,
- OBJC_ASSOCIATION_COPY_NONATOMIC = 3,
- OBJC_ASSOCIATION_RETAIN = 01401,
- OBJC_ASSOCIATION_COPY = 01403
+ OBJC_ASSOCIATION_ASSIGN = 0, /**< Specifies a weak reference to the associated object. */
+ OBJC_ASSOCIATION_RETAIN_NONATOMIC = 1, /**< Specifies a strong reference to the associated object.
+ * The association is not made atomically. */
+ OBJC_ASSOCIATION_COPY_NONATOMIC = 3, /**< Specifies that the associated object is copied.
+ * The association is not made atomically. */
+ OBJC_ASSOCIATION_RETAIN = 01401, /**< Specifies a strong reference to the associated object.
+ * The association is made atomically. */
+ OBJC_ASSOCIATION_COPY = 01403 /**< Specifies that the associated object is copied.
+ * The association is made atomically. */
};
+
+/// Type to specify the behavior of an association.
typedef uintptr_t objc_AssociationPolicy;
+/**
+ * Sets an associated value for a given object using a given key and association policy.
+ *
+ * @param object The source object for the association.
+ * @param key The key for the association.
+ * @param value The value to associate with the key key for object. Pass nil to clear an existing association.
+ * @param policy The policy for the association. For possible values, see “Associative Object Behaviors.”
+ *
+ * @see objc_setAssociatedObject
+ * @see objc_removeAssociatedObjects
+ */
OBJC_EXPORT void objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy)
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
+
+/**
+ * Returns the value associated with a given object for a given key.
+ *
+ * @param object The source object for the association.
+ * @param key The key for the association.
+ *
+ * @return The value associated with the key \e key for \e object.
+ *
+ * @see objc_setAssociatedObject
+ */
OBJC_EXPORT id objc_getAssociatedObject(id object, const void *key)
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
+
+/**
+ * Removes all associations for a given object.
+ *
+ * @param object An object that maintains associated objects.
+ *
+ * @note The main purpose of this function is to make it easy to return an object
+ * to a "pristine state”. You should not use this function for general removal of
+ * associations from objects, since it also removes associations that other clients
+ * may have added to the object. Typically you should use \c objc_setAssociatedObject
+ * with a nil value to clear an association.
+ *
+ * @see objc_setAssociatedObject
+ * @see objc_getAssociatedObject
+ */
OBJC_EXPORT void objc_removeAssociatedObjects(id object)
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_1);
-// API to be called by clients of objects
-
-OBJC_EXPORT id objc_loadWeak(id *location)
- __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
-// returns value stored (either obj or NULL)
-OBJC_EXPORT id objc_storeWeak(id *location, id obj)
- __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
-
-
#define _C_ID '@'
#define _C_CLASS '#'
#define _C_SEL ':'
OBJC_EXPORT unsigned int method_getSizeOfArguments(Method m) OBJC2_UNAVAILABLE;
OBJC_EXPORT unsigned method_getArgumentInfo(struct objc_method *m, int arg, const char **type, int *offset) OBJC2_UNAVAILABLE;
-OBJC_EXPORT BOOL class_respondsToMethod(Class, SEL) OBJC2_UNAVAILABLE;
-OBJC_EXPORT IMP class_lookupMethod(Class, SEL) OBJC2_UNAVAILABLE;
OBJC_EXPORT Class objc_getOrigClass(const char *name) OBJC2_UNAVAILABLE;
#define OBJC_NEXT_METHOD_LIST 1
OBJC_EXPORT struct objc_method_list *class_nextMethodList(Class, void **) OBJC2_UNAVAILABLE;
# default-arch but otherwise comprehensive test for buildbot
buildbot:
- perl test.pl $(MAKEFLAGS) MEM=mrc,arc,gc CC=clang,llvm-gcc-4.2 LANGUAGE=objc,objc++
+ perl test.pl $(MAKEFLAGS) MEM=mrc,arc,gc CC=clang LANGUAGE=objc,objc++
# comprehensive tests
mac macos macosx:
- perl test.pl $(MAKEFLAGS) ARCH=x86_64,i386 MEM=mrc,arc,gc CC=clang,llvm-gcc-4.2 LANGUAGE=objc,objc++
+ perl test.pl $(MAKEFLAGS) ARCH=x86_64,i386 MEM=mrc,arc,gc CC=clang LANGUAGE=objc,objc++
iphonesimulator:
- perl test.pl $(MAKEFLAGS) ARCH=i386 SDK=iphonesimulator MEM=mrc,arc CC=clang,llvm-gcc-4.2 LANGUAGE=objc,objc++
+ perl test.pl $(MAKEFLAGS) ARCH=i386 SDK=iphonesimulator MEM=mrc,arc CC=clang LANGUAGE=objc,objc++
iphoneos:
- perl test.pl $(MAKEFLAGS) ARCH=armv6,armv7 SDK=iphoneos MEM=mrc,arc CC=clang,llvm-gcc-4.2 LANGUAGE=objc,objc++
+ perl test.pl $(MAKEFLAGS) ARCH=armv6,armv7 SDK=iphoneos MEM=mrc,arc CC=clang LANGUAGE=objc,objc++
clean:
@ perl test.pl clean
strcpy(types, "XX"); // types is copied
desclist = protocol_copyMethodDescriptionList(proto, YES, YES, &count);
testassert(desclist && count == 4);
- testassert(desclist[0].name == @selector(ReqInst0));
+ testprintf("%p %p\n", desclist[0].name, @selector(ReqInst0));
+ // testassert(desclist[0].name == @selector(ReqInst0));
testassert(0 == strcmp(desclist[0].types, "@:"));
free(desclist);
desclist = protocol_copyMethodDescriptionList(proto, YES, NO, &count);
--- /dev/null
+// TEST_CONFIG SDK=macos
+// TEST_CFLAGS -framework AppleScriptObjC -framework Foundation
+
+// Verify that trivial AppleScriptObjC apps run with GC off.
+
+#include <Foundation/Foundation.h>
+#include "test.h"
+
+int main()
+{
+ [NSBundle class];
+ testassert(!objc_collectingEnabled());
+ succeed(__FILE__);
+}
--- /dev/null
+// TEST_CFLAGS -framework AppleScriptObjC -framework Foundation
+// TEST_CONFIG MEM=gc
+
+// Verify that non-trivial AppleScriptObjC apps run with GC ON.
+
+#include <Foundation/Foundation.h>
+#include "test.h"
+
+@interface NonTrivial : NSObject @end
+@implementation NonTrivial @end
+
+int main()
+{
+ [NSBundle class];
+ testassert(objc_collectingEnabled());
+ succeed(__FILE__);
+}
// TEST_CRASHES
/*
TEST_RUN_OUTPUT
-objc\[\d+\]: cannot form weak reference to instance \(0x[0-9a-f]+\) of class Crash
+objc\[\d+\]: Cannot form weak reference to instance \(0x[0-9a-f]+\) of class Crash. It is possible that this object was over-released, or is in the process of deallocation.
CRASHED: SIG(ILL|TRAP)
END
*/
}
@end
-int main()
+
+void cycle(Test *obj, Test *obj2)
{
- Test *obj = [Test new];
- Test *obj2 = [Test new];
id result;
testprintf("Weak assignment\n");
testassert(result == NULL);
testassert(weak == NULL);
- testprintf("Weak clear\n");
+ testprintf("Weak re-assignment to NULL\n");
+ result = objc_storeWeak(&weak, NULL);
+ testassert(result == NULL);
+ testassert(weak == NULL);
+ testprintf("Weak move\n");
result = objc_storeWeak(&weak, obj);
testassert(result == obj);
testassert(weak == obj);
+ weak2 = (id)(PAGE_SIZE-16);
+ objc_moveWeak(&weak2, &weak);
+ testassert(weak == nil);
+ testassert(weak2 == obj);
+ objc_storeWeak(&weak2, NULL);
- result = objc_storeWeak(&weak2, obj);
+ testprintf("Weak copy\n");
+ result = objc_storeWeak(&weak, obj);
testassert(result == obj);
+ testassert(weak == obj);
+ weak2 = (id)(PAGE_SIZE-16);
+ objc_copyWeak(&weak2, &weak);
+ testassert(weak == obj);
testassert(weak2 == obj);
+ objc_storeWeak(&weak, NULL);
+ objc_storeWeak(&weak2, NULL);
+
+ testprintf("Weak clear\n");
+
+ id obj3 = [Test new];
+
+ result = objc_storeWeak(&weak, obj3);
+ testassert(result == obj3);
+ testassert(weak == obj3);
+
+ result = objc_storeWeak(&weak2, obj3);
+ testassert(result == obj3);
+ testassert(weak2 == obj3);
did_dealloc = false;
- [obj release];
+ [obj3 release];
testassert(did_dealloc);
testassert(weak == NULL);
testassert(weak2 == NULL);
+}
+
+
+int main()
+{
+ Test *obj = [Test new];
+ Test *obj2 = [Test new];
+ id result;
+
+ for (int i = 0; i < 100000; i++) {
+ if (i == 10) leak_mark();
+ cycle(obj, obj2);
+ }
+ // allow some slop for [Test new] inside cycle()
+ // to land in different side table stripes
+ leak_check(3072);
+
+
+ // rdar://14105994
+ id weaks[8];
+ for (size_t i = 0; i < sizeof(weaks)/sizeof(weaks[0]); i++) {
+ objc_storeWeak(&weaks[i], obj);
+ }
+ for (size_t i = 0; i < sizeof(weaks)/sizeof(weaks[0]); i++) {
+ objc_storeWeak(&weaks[i], nil);
+ }
+
Crash *obj3 = [Crash new];
result = objc_storeWeak(&weak, obj3);
--- /dev/null
+/*
+TEST_CRASHES
+TEST_RUN_OUTPUT
+objc1
+OK: badCache.m
+OR
+crash now
+objc\[\d+\]: Method cache corrupted.*
+objc\[\d+\]: .*
+objc\[\d+\]: .*
+objc\[\d+\]: .*
+objc\[\d+\]: .*
+objc\[\d+\]: Method cache corrupted\.
+CRASHED: SIG(ILL|TRAP)
+END
+*/
+
+
+#include "test.h"
+
+#if !__OBJC2__
+
+int main()
+{
+ fprintf(stderr, "objc1\n");
+ succeed(__FILE__);
+}
+
+#else
+
+#include "testroot.i"
+
+#if __LP64__
+typedef uint32_t mask_t;
+#else
+typedef uint16_t mask_t;
+#endif
+
+struct bucket_t {
+ void *sel;
+ void *imp;
+};
+
+struct cache_t {
+ struct bucket_t *buckets;
+ mask_t mask;
+ mask_t occupied;
+};
+
+struct class_t {
+ void *isa;
+ void *supercls;
+ struct cache_t cache;
+};
+
+@interface Subclass : TestRoot @end
+@implementation Subclass @end
+
+int main()
+{
+ Class cls = [TestRoot class];
+ id obj = [cls new];
+ [obj self];
+
+ // Test objc_msgSend.
+ struct cache_t *cache = &((__bridge struct class_t *)cls)->cache;
+ cache->mask = 0;
+ cache->buckets[0].sel = (void*)~0;
+ cache->buckets[0].imp = (void*)~0;
+ cache->buckets[1].sel = (void*)(uintptr_t)1;
+ cache->buckets[1].imp = (void*)cache->buckets;
+
+ fprintf(stderr, "crash now\n");
+ [obj self];
+
+ fail("should have crashed");
+}
+
+#endif
--- /dev/null
+/*
+TEST_CRASHES
+TEST_RUN_OUTPUT
+objc1
+OK: badCache2.m
+OR
+crash now
+objc\[\d+\]: Method cache corrupted.*
+objc\[\d+\]: .*
+objc\[\d+\]: .*
+objc\[\d+\]: .*
+objc\[\d+\]: .*
+objc\[\d+\]: Method cache corrupted\.
+CRASHED: SIG(ILL|TRAP)
+END
+*/
+
+
+#include "test.h"
+
+#if !__OBJC2__
+
+int main()
+{
+ fprintf(stderr, "objc1\n");
+ succeed(__FILE__);
+}
+
+#else
+
+#include "testroot.i"
+
+#if __LP64__
+typedef uint32_t mask_t;
+#else
+typedef uint16_t mask_t;
+#endif
+
+struct bucket_t {
+ void *sel;
+ void *imp;
+};
+
+struct cache_t {
+ struct bucket_t *buckets;
+ mask_t mask;
+ mask_t occupied;
+};
+
+struct class_t {
+ void *isa;
+ void *supercls;
+ struct cache_t cache;
+};
+
+@interface Subclass : TestRoot @end
+@implementation Subclass @end
+
+int main()
+{
+ Class cls = [TestRoot class];
+ id obj = [cls new];
+ [obj self];
+
+ // Test cache::find by clobbering the cache and then adding a method
+ struct cache_t *cache = &((__bridge struct class_t *)cls)->cache;
+ cache->mask = 0;
+ cache->buckets[0].sel = (void*)~0;
+ cache->buckets[0].imp = (void*)~0;
+
+ fprintf(stderr, "crash now\n");
+ class_addMethod(cls, @selector(fake:o:rama:), nil, nil);
+
+ fail("should have crashed");
+}
+
+#endif
--- /dev/null
+/*
+TEST_CRASHES
+TEST_RUN_OUTPUT
+objc\[\d+\]: tag index 7 used for two different classes \(was 0x[0-9a-fA-F]+ NSObject, now 0x[0-9a-fA-F]+ Protocol\)
+CRASHED: SIG(ILL|TRAP)
+OR
+no tagged pointers
+OK: badTagClass.m
+END
+*/
+
+#include "test.h"
+
+#include <objc/objc-internal.h>
+#include <objc/Protocol.h>
+
+#if OBJC_HAVE_TAGGED_POINTERS
+
+int main()
+{
+ // re-registration and nil registration allowed
+ _objc_registerTaggedPointerClass(OBJC_TAG_7, [NSObject class]);
+ _objc_registerTaggedPointerClass(OBJC_TAG_7, [NSObject class]);
+ _objc_registerTaggedPointerClass(OBJC_TAG_7, nil);
+ _objc_registerTaggedPointerClass(OBJC_TAG_7, [NSObject class]);
+
+ // colliding registration disallowed
+ _objc_registerTaggedPointerClass(OBJC_TAG_7, [Protocol class]);
+
+ fail(__FILE__);
+}
+
+#else
+
+int main()
+{
+ fprintf(stderr, "no tagged pointers\n");
+ succeed(__FILE__);
+}
+
+#endif
--- /dev/null
+/*
+TEST_CRASHES
+TEST_RUN_OUTPUT
+objc\[\d+\]: tag index 8 is too large.
+CRASHED: SIG(ILL|TRAP)
+OR
+no tagged pointers
+OK: badTagIndex.m
+END
+*/
+
+#include "test.h"
+
+#include <objc/objc-internal.h>
+#include <objc/NSObject.h>
+
+#if OBJC_HAVE_TAGGED_POINTERS
+
+int main()
+{
+ _objc_registerTaggedPointerClass((objc_tag_index_t)8, [NSObject class]);
+ fail(__FILE__);
+}
+
+#else
+
+int main()
+{
+ fprintf(stderr, "no tagged pointers\n");
+ succeed(__FILE__);
+}
+
+#endif
}
-(void)instancemethod {
testprintf("in [Super(Category) instancemethod]\n");
- testassert(self->isa == [Super class]);
+ testassert(object_getClass(self) == [Super class]);
testassert(state == 1);
state = 2;
}
}
+#if __has_feature(objc_arc)
+
void test_batch(void)
{
-#if __has_feature(objc_arc)
// not converted to ARC yet
return;
+}
+
#else
+// Like class_createInstances(), but refuses to accept zero allocations
+static unsigned
+reallyCreateInstances(Class cls, size_t extraBytes, id *dst, unsigned want)
+{
+ unsigned count;
+ while (0 == (count = class_createInstances(cls, extraBytes, dst, want))) {
+ testprintf("class_createInstances created nothing; retrying\n");
+ RELEASE_VALUE([[TestRoot alloc] init]);
+ }
+ return count;
+}
+
+void test_batch(void)
+{
id o2[100];
unsigned int count, i;
}
ctors1 = dtors1 = ctors2 = dtors2 = 0;
- count = class_createInstances([TestRoot class], 0, o2, 10);
+ count = reallyCreateInstances([TestRoot class], 0, o2, 10);
testassert(count > 0);
testassert(ctors1 == 0 && dtors1 == 0 &&
ctors2 == 0 && dtors2 == 0);
}
ctors1 = dtors1 = ctors2 = dtors2 = 0;
- count = class_createInstances([CXXBase class], 0, o2, 10);
+ count = reallyCreateInstances([CXXBase class], 0, o2, 10);
testassert(count > 0);
testassert(ctors1 == count && dtors1 == 0 &&
ctors2 == 0 && dtors2 == 0);
}
ctors1 = dtors1 = ctors2 = dtors2 = 0;
- count = class_createInstances([NoCXXSub class], 0, o2, 10);
+ count = reallyCreateInstances([NoCXXSub class], 0, o2, 10);
testassert(count > 0);
testassert(ctors1 == count && dtors1 == 0 &&
ctors2 == 0 && dtors2 == 0);
}
ctors1 = dtors1 = ctors2 = dtors2 = 0;
- count = class_createInstances([CXXSub class], 0, o2, 10);
+ count = reallyCreateInstances([CXXSub class], 0, o2, 10);
testassert(count > 0);
testassert(ctors1 == count && dtors1 == 0 &&
ctors2 == count && dtors2 == 0);
testcollect();
testassert(ctors1 == count && dtors1 == count &&
ctors2 == count && dtors2 == count);
-#endif
}
+// not ARC
+#endif
+
+
int main()
{
+ for (int i = 0; i < 1000; i++) {
+ testonthread(^{ test_single(); });
+ testonthread(^{ test_inplace(); });
+ testonthread(^{ test_batch(); });
+ }
+
testonthread(^{ test_single(); });
testonthread(^{ test_inplace(); });
+ testonthread(^{ test_batch(); });
leak_mark();
- testonthread(^{ test_batch(); });
+ for (int i = 0; i < 1000; i++) {
+ testonthread(^{ test_single(); });
+ testonthread(^{ test_inplace(); });
+ testonthread(^{ test_batch(); });
+ }
- // fixme can't get this to zero; may or may not be a real leak
- leak_check(64);
+ leak_check(0);
// fixme ctor exceptions aren't caught inside .cxx_construct ?
// Single allocation, ctors fail
bzero(buf, malloc_size(buf));
testassert(object_setClass(obj, [TestRoot class]) == nil);
- testassert(object_getClass(obj) == buf[0]);
+ testassert(object_getClass(obj) == [TestRoot class]);
testassert(object_getClass([TestRoot class]) == object_getClass([TestRoot class]));
testassert(object_getClass(nil) == Nil);
#include <malloc/malloc.h>
#include <objc/objc-runtime.h>
+OBJC_ROOT_CLASS
@interface SuperIvars {
id isa;
int ivar1;
} @end
@implementation SubIvars @end
-
+OBJC_ROOT_CLASS
@interface FourIvars {
int ivar1;
int ivar2;
} @end
@implementation FourIvars @end
+OBJC_ROOT_CLASS
@interface NoIvars { } @end
@implementation NoIvars @end
#include <malloc/malloc.h>
#include <objc/objc-runtime.h>
+OBJC_ROOT_CLASS
@interface SuperProps { id isa; int prop1; int prop2; }
@property int prop1;
@property int prop2;
@synthesize prop4;
@end
-
+OBJC_ROOT_CLASS
@interface FourProps { int prop1; int prop2; int prop3; int prop4; }
@property int prop1;
@property int prop2;
@synthesize prop4;
@end
+OBJC_ROOT_CLASS
@interface NoProps @end
@implementation NoProps @end
#endif
#include "test.h"
+OBJC_ROOT_CLASS
@interface Super { @public id isa; } @end
@implementation Super
+(void) initialize { }
s = class_createInstance([Super class], 0);
testassert(s);
- testassert(s->isa == [Super class]);
+ testassert(object_getClass(s) == [Super class]);
testassert(malloc_size(s) >= class_getInstanceSize([Super class]));
if (objc_collectingEnabled()) testassert(auto_zone_is_valid_pointer(objc_collectableZone(), s));
s = class_createInstance([Sub class], 0);
testassert(s);
- testassert(s->isa == [Sub class]);
+ testassert(object_getClass(s) == [Sub class]);
testassert(malloc_size(s) >= class_getInstanceSize([Sub class]));
if (objc_collectingEnabled()) testassert(auto_zone_is_valid_pointer(objc_collectableZone(), s));
s = class_createInstance([Super class], 100);
testassert(s);
- testassert(s->isa == [Super class]);
+ testassert(object_getClass(s) == [Super class]);
testassert(malloc_size(s) >= class_getInstanceSize([Super class]) + 100);
if (objc_collectingEnabled()) testassert(auto_zone_is_valid_pointer(objc_collectableZone(), s));
s = class_createInstance([Sub class], 100);
testassert(s);
- testassert(s->isa == [Sub class]);
+ testassert(object_getClass(s) == [Sub class]);
testassert(malloc_size(s) >= class_getInstanceSize([Sub class]) + 100);
if (objc_collectingEnabled()) testassert(auto_zone_is_valid_pointer(objc_collectableZone(), s));
// TEST_CONFIG MEM=mrc
/*
TEST_BUILD
- $C{COMPILE} $DIR/customrr.m '-Wl,-exported_symbol,.objc_class_name_InheritingSubCat' -o customrr.out
+ $C{COMPILE} $DIR/customrr.m -fvisibility=default -o customrr.out
$C{COMPILE} -undefined dynamic_lookup -dynamiclib $DIR/customrr-cat1.m -o customrr-cat1.dylib
$C{COMPILE} -undefined dynamic_lookup -dynamiclib $DIR/customrr-cat2.m -o customrr-cat2.dylib
END
#if !__OBJC2__
// pacify exported symbols list
+OBJC_ROOT_CLASS
@interface InheritingSubCat @end
@implementation InheritingSubCat @end
static int PlusReleases;
static int PlusAutoreleases;
static int PlusRetainCounts;
+static int Allocs;
+static int AllocWithZones;
static int SubRetains;
static int SubReleases;
static int SubPlusReleases;
static int SubPlusAutoreleases;
static int SubPlusRetainCounts;
+static int SubAllocs;
+static int SubAllocWithZones;
static int Imps;
PlusReleases = 0;
PlusAutoreleases = 0;
PlusRetainCounts = 0;
+ Allocs = 0;
+ AllocWithZones = 0;
SubRetains = 0;
SubReleases = 0;
SubPlusReleases = 0;
SubPlusAutoreleases = 0;
SubPlusRetainCounts = 0;
+ SubAllocs = 0;
+ SubAllocWithZones = 0;
Imps = 0;
}
void HackPlusRelease(id self __unused, SEL _cmd __unused) { PlusReleases++; }
id HackPlusAutorelease(id self, SEL _cmd __unused) { PlusAutoreleases++; return self; }
NSUInteger HackPlusRetainCount(id self __unused, SEL _cmd __unused) { PlusRetainCounts++; return 1; }
+id HackAlloc(Class self, SEL _cmd __unused) { Allocs++; return class_createInstance(self, 0); }
+id HackAllocWithZone(Class self, SEL _cmd __unused) { AllocWithZones++; return class_createInstance(self, 0); }
@interface OverridingSub : NSObject @end
@end
+@interface OverridingASub : NSObject @end
+@implementation OverridingASub
++(id) alloc { SubAllocs++; return class_createInstance(self, 0); }
+@end
+
+@interface OverridingAWZSub : NSObject @end
+@implementation OverridingAWZSub
++(id) allocWithZone:(NSZone * __unused)z { SubAllocWithZones++; return class_createInstance(self, 0); }
+@end
+
+@interface OverridingAAWZSub : NSObject @end
+@implementation OverridingAAWZSub
++(id) alloc { SubAllocs++; return class_createInstance(self, 0); }
++(id) allocWithZone:(NSZone * __unused)z { SubAllocWithZones++; return class_createInstance(self, 0); }
+@end
+
+
@interface InheritingSub : NSObject @end
@implementation InheritingSub @end
@implementation InheritingSubCat_2 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubA1;
+@interface UnrealizedSubA1 : NSObject @end
+@implementation UnrealizedSubA1 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubA2;
+@interface UnrealizedSubA2 : NSObject @end
+@implementation UnrealizedSubA2 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubA3;
+@interface UnrealizedSubA3 : NSObject @end
+@implementation UnrealizedSubA3 @end
+
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubB1;
+@interface UnrealizedSubB1 : NSObject @end
+@implementation UnrealizedSubB1 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubB2;
+@interface UnrealizedSubB2 : NSObject @end
+@implementation UnrealizedSubB2 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubB3;
+@interface UnrealizedSubB3 : NSObject @end
+@implementation UnrealizedSubB3 @end
+
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubC1;
+@interface UnrealizedSubC1 : NSObject @end
+@implementation UnrealizedSubC1 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubC2;
+@interface UnrealizedSubC2 : NSObject @end
+@implementation UnrealizedSubC2 @end
+extern uintptr_t OBJC_CLASS_$_UnrealizedSubC3;
+@interface UnrealizedSubC3 : NSObject @end
+@implementation UnrealizedSubC3 @end
+
+
int main(int argc __unused, char **argv)
{
objc_autoreleasePoolPush();
m[2] = (IMP)HackPlusAutorelease;
m = (IMP *)class_getClassMethod(cls, @selector(retainCount));
m[2] = (IMP)HackPlusRetainCount;
+ m = (IMP *)class_getClassMethod(cls, @selector(alloc));
+ m[2] = (IMP)HackAlloc;
+ m = (IMP *)class_getClassMethod(cls, @selector(allocWithZone:));
+ m[2] = (IMP)HackAllocWithZone;
_objc_flush_caches(cls);
Class cls = [NSObject class];
Class icl = [InheritingSub class];
Class ocl = [OverridingSub class];
+ /*
+ Class oa1 = [OverridingASub class];
+ Class oa2 = [OverridingAWZSub class];
+ Class oa3 = [OverridingAAWZSub class];
+ */
NSObject *obj = [NSObject new];
InheritingSub *inh = [InheritingSub new];
OverridingSub *ovr = [OverridingSub new];
[ocl autorelease];
testassert(SubPlusAutoreleases == 1);
+ [UnrealizedSubA1 retain];
+ testassert(PlusRetains == 3);
+ [UnrealizedSubA2 release];
+ testassert(PlusReleases == 3);
+ [UnrealizedSubA3 autorelease];
+ testassert(PlusAutoreleases == 3);
+
+
+ testprintf("objc_msgSend() does not bypass\n");
+ zero();
+
+ id (*retain_fn)(id, SEL) = (id(*)(id, SEL))objc_msgSend;
+ void (*release_fn)(id, SEL) = (void(*)(id, SEL))objc_msgSend;
+ id (*autorelease_fn)(id, SEL) = (id(*)(id, SEL))objc_msgSend;
+
+ retain_fn(obj, @selector(retain));
+ testassert(Retains == 1);
+ release_fn(obj, @selector(release));
+ testassert(Releases == 1);
+ autorelease_fn(obj, @selector(autorelease));
+ testassert(Autoreleases == 1);
+
+ retain_fn(cls, @selector(retain));
+ testassert(PlusRetains == 1);
+ release_fn(cls, @selector(release));
+ testassert(PlusReleases == 1);
+ autorelease_fn(cls, @selector(autorelease));
+ testassert(PlusAutoreleases == 1);
+
+ retain_fn(inh, @selector(retain));
+ testassert(Retains == 2);
+ release_fn(inh, @selector(release));
+ testassert(Releases == 2);
+ autorelease_fn(inh, @selector(autorelease));
+ testassert(Autoreleases == 2);
+
+ retain_fn(icl, @selector(retain));
+ testassert(PlusRetains == 2);
+ release_fn(icl, @selector(release));
+ testassert(PlusReleases == 2);
+ autorelease_fn(icl, @selector(autorelease));
+ testassert(PlusAutoreleases == 2);
+
+ retain_fn(ovr, @selector(retain));
+ testassert(SubRetains == 1);
+ release_fn(ovr, @selector(release));
+ testassert(SubReleases == 1);
+ autorelease_fn(ovr, @selector(autorelease));
+ testassert(SubAutoreleases == 1);
+
+ retain_fn(ocl, @selector(retain));
+ testassert(SubPlusRetains == 1);
+ release_fn(ocl, @selector(release));
+ testassert(SubPlusReleases == 1);
+ autorelease_fn(ocl, @selector(autorelease));
+ testassert(SubPlusAutoreleases == 1);
+
+#if __OBJC2__
+ retain_fn((Class)&OBJC_CLASS_$_UnrealizedSubB1, @selector(retain));
+ testassert(PlusRetains == 3);
+ release_fn((Class)&OBJC_CLASS_$_UnrealizedSubB2, @selector(release));
+ testassert(PlusReleases == 3);
+ autorelease_fn((Class)&OBJC_CLASS_$_UnrealizedSubB3, @selector(autorelease));
+ testassert(PlusAutoreleases == 3);
+#endif
+
testprintf("arc function bypasses instance but not class or override\n");
zero();
objc_autorelease(ocl);
testassert(SubPlusAutoreleases == 1);
+#if __OBJC2__
+#if 1
+ testwarn("rdar://12961688 CustomRR is wrong for unrealized classes");
+#else
+ objc_retain((Class)&OBJC_CLASS_$_UnrealizedSubC1);
+ testassert(PlusRetains == 3);
+ objc_release((Class)&OBJC_CLASS_$_UnrealizedSubC2);
+ testassert(PlusReleases == 3);
+ objc_autorelease((Class)&OBJC_CLASS_$_UnrealizedSubC3);
+ testassert(PlusAutoreleases == 3);
+#endif
+#endif
+
testprintf("unrelated addMethod does not clobber\n");
zero();
// TEST_CONFIG MEM=mrc
/*
TEST_BUILD
- $C{COMPILE} $DIR/customrr.m '-Wl,-exported_symbol,.objc_class_name_InheritingSubCat' -o customrr2.out -DTEST_EXCHANGEIMPLEMENTATIONS=1
+ $C{COMPILE} $DIR/customrr.m -fvisibility=default -o customrr2.out -DTEST_EXCHANGEIMPLEMENTATIONS=1
$C{COMPILE} -undefined dynamic_lookup -dynamiclib $DIR/customrr-cat1.m -o customrr-cat1.dylib
$C{COMPILE} -undefined dynamic_lookup -dynamiclib $DIR/customrr-cat2.m -o customrr-cat2.dylib
END
+++ /dev/null
-// rdar://6401639, waiting for rdar://5648998
-// TEST_DISABLED
-
-#include "test.h"
-#include <objc/objc.h>
-#include <mach/mach.h>
-#include <pthread.h>
-#define _OBJC_PRIVATE_H_
-#include <objc/objc-gdb.h>
-
-#warning this test needs to be augmented for the side table machienery
-
-@interface Super { id isa; } @end
-
-@implementation Super
-+(void)initialize { }
-+class { return self; }
-+(int)method { return 1; }
-+(int)method2 { return 1; }
-@end
-
-
-semaphore_t sema;
-
-void *thread(void *arg __unused)
-{
- objc_registerThreadWithCollector();
-
- semaphore_signal(sema);
- testprintf("hi\n");
- while (1) {
- [Super method];
- _objc_flush_caches(0, YES);
- }
-
- return NULL;
-}
-
-
-void stopAllThreads(void)
-{
- mach_msg_type_number_t count, i;
- thread_act_array_t list;
-
- task_threads(mach_task_self(), &list, &count);
- for (i = 0; i < count; i++) {
- if (list[i] == mach_thread_self()) continue;
- thread_suspend(list[i]);
- mach_port_deallocate(mach_task_self(), list[i]);
- }
-}
-
-void startAllThreads(void)
-{
- mach_msg_type_number_t count, i;
- thread_act_array_t list;
-
- task_threads(mach_task_self(), &list, &count);
- for (i = 0; i < count; i++) {
- if (list[i] == mach_thread_self()) continue;
- thread_resume(list[i]);
- mach_port_deallocate(mach_task_self(), list[i]);
- }
-}
-
-
-static void cycle(int mode, int *good, int *bad)
-{
- stopAllThreads();
- if (gdb_objc_startDebuggerMode(mode)) {
- testprintf("good\n");
- [Super method];
- [Super method2];
- if (mode == OBJC_DEBUGMODE_FULL) {
- // will crash without full write locks
- _objc_flush_caches(0, YES);
- }
- gdb_objc_endDebuggerMode();
- ++*good;
- } else {
- testprintf("bad\n");
- ++*bad;
- }
- startAllThreads();
- sched_yield();
-}
-
-
-int main()
-{
-#define STOPS 10000
-#define THREADS 1
- int i;
-
- [Super class];
-
- testassert(STOPS > 200);
-
- // Uncontended debugger mode
- testassert(gdb_objc_startDebuggerMode(0));
- gdb_objc_endDebuggerMode();
-
- // Uncontended full debugger mode
- testassert(gdb_objc_startDebuggerMode(OBJC_DEBUGMODE_FULL));
- gdb_objc_endDebuggerMode();
-
- // Nested debugger mode
- testassert(gdb_objc_startDebuggerMode(0));
- testassert(gdb_objc_startDebuggerMode(0));
- gdb_objc_endDebuggerMode();
- gdb_objc_endDebuggerMode();
-
- // Nested full debugger mode
- testassert(gdb_objc_startDebuggerMode(OBJC_DEBUGMODE_FULL));
- testassert(gdb_objc_startDebuggerMode(OBJC_DEBUGMODE_FULL));
- gdb_objc_endDebuggerMode();
- gdb_objc_endDebuggerMode();
-
- // Check that debugger mode sometimes works and sometimes doesn't
- // when contending with another runtime-manipulating thread.
-
- semaphore_create(mach_task_self(), &sema, 0, 0);
-
- for (i = 0; i < THREADS; i++) {
- pthread_t th;
- pthread_create(&th, NULL, &thread, NULL);
- semaphore_wait(sema);
- }
-
- testprintf("go\n");
-
- int good0 = 0, bad0 = 0;
- for (i = 0; i < STOPS; i++) {
- cycle(0, &good0, &bad0);
- }
- testprintf("good0 %d, bad0 %d\n", good0, bad0);
-
- int goodF = 0, badF = 0;
- for (i = 0; i < STOPS; i++) {
- cycle(OBJC_DEBUGMODE_FULL, &goodF, &badF);
- }
- testprintf("goodF %d, badF %d\n", goodF, badF);
-
- // Require at least 1% each of good and bad.
- // Also require more than one each (exactly one is likely
- // a bug wherein the locks got stuck the first time).
- // Also require that FULL worked less often.
-
- if (good0 > STOPS/100 && bad0 > STOPS/100 && good0 > 1 && bad0 > 1 &&
- goodF > STOPS/100 && badF > STOPS/100 && goodF > 1 && badF > 1
-#ifdef __OBJC2__
- && good0 > goodF /* not reliable enough in old runtime */
-#endif
- )
- {
- succeed(__FILE__);
- }
-
- fail("good0=%d/%d bad0=%d/%d goodF=%d/%d badF=%d/%d (required at least %d/%d good)",
- good0, STOPS, bad0, STOPS, goodF, STOPS, badF, STOPS, STOPS/100, STOPS);
-}
id o = nil;
BOOL b = YES;
BOOL b2 = NO;
-#if !__has_feature(objc_arr)
+#if !__has_feature(objc_arc)
__strong void *p;
#endif
id __unsafe_unretained u;
{
testassert(YES);
testassert(!NO);
+#if __cplusplus
+ testwarn("rdar://12371870 -Wnull-conversion");
+ testassert(!(bool)nil);
+ testassert(!(bool)Nil);
+#else
testassert(!nil);
testassert(!Nil);
+#endif
#if __has_feature(objc_bool)
// YES[array] is disallowed for objc just as true[array] is for C++
".long 1 \n"
PTR "L_load \n"
PTR "L_load \n"
- PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-0x1000) " \n"
+ PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-PAGE_SIZE) " \n"
"L_good_methods: \n"
".long 24 \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "0 \n"
PTR "__objc_empty_cache \n"
- PTR "__objc_empty_vtable \n"
+ PTR "0 \n"
PTR "L_ro \n"
""
"_OBJC_METACLASS_$_Super: \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "_OBJC_CLASS_$_Super \n"
PTR "__objc_empty_cache \n"
- PTR "__objc_empty_vtable \n"
+ PTR "0 \n"
PTR "L_meta_ro \n"
""
"L_ro: \n"
PTR "_OBJC_METACLASS_$_Sub \n"
PTR "_OBJC_CLASS_$_Super \n"
PTR "__objc_empty_cache \n"
- PTR "__objc_empty_vtable \n"
+ PTR "0 \n"
PTR "L_sub_ro \n"
""
"_OBJC_METACLASS_$_Sub: \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "_OBJC_METACLASS_$_Super \n"
PTR "__objc_empty_cache \n"
- PTR "__objc_empty_vtable \n"
+ PTR "0 \n"
PTR "L_sub_meta_ro \n"
""
"L_sub_ro: \n"
".long 1 \n"
PTR "L_load \n"
PTR "L_load \n"
- PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-0x1000) " \n"
+ PTR str2(SHARED_REGION_BASE+SHARED_REGION_SIZE-PAGE_SIZE) " \n"
"L_good_methods: \n"
".long 24 \n"
-/*
+/*
+need exception-safe ARC for exception deallocation tests
+need F/CF for testonthread() in GC mode
+TEST_CFLAGS -fobjc-arc-exceptions -framework Foundation
+
llvm-gcc unavoidably warns about our deliberately out-of-order handlers
TEST_BUILD_OUTPUT
#include <objc/objc-exception.h>
static volatile int state = 0;
+static volatile int dealloced = 0;
#define BAD 1000000
#if defined(USE_FOUNDATION)
+(id)exception { return AUTORELEASE([[self alloc] initWithName:@"Super" reason:@"reason" userInfo:nil]); }
-(void)check { state++; }
+(void)check { testassert(!"caught class object, not instance"); }
+-(void)dealloc { dealloced++; SUPER_DEALLOC(); }
+-(void)finalize { dealloced++; [super finalize]; }
@end
#define FILENAME "nsexc.m"
+(id)exception { return AUTORELEASE([self new]); }
-(void)check { state++; }
+(void)check { testassert(!"caught class object, not instance"); }
+-(void)dealloc { dealloced++; SUPER_DEALLOC(); }
+-(void)finalize { dealloced++; [super finalize]; }
@end
#define FILENAME "exc.m"
#endif
+#define TEST(code) \
+ do { \
+ testonthread(^{ PUSH_POOL { code } POP_POOL; }); \
+ testcollect(); \
+ } while (0)
+
+
+
int main()
{
- PUSH_POOL {
-
- testprintf("try-catch-finally, exception caught exactly\n");
-
+ testprintf("try-catch-finally, exception caught exactly\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
@catch (...) {
state = BAD;
}
- testassert(state == 6);
-
-
- testprintf("try-finally, no exception thrown\n");
-
+ });
+ testassert(state == 6);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-finally, no exception thrown\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
@catch (...) {
state = BAD;
}
- testassert(state == 4);
-
-
- testprintf("try-finally, with exception\n");
-
+ });
+ testassert(state == 4);
+ testassert(dealloced == 0);
+
+
+ testprintf("try-finally, with exception\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 5);
-
-
- testprintf("try-catch-finally, no exception\n");
-
+ });
+ testassert(state == 5);
+ testassert(dealloced == 1);
+
+
+#if __OBJC2__
+ testprintf("try-finally, with autorelease pool pop during unwind\n");
+ // Popping an autorelease pool during unwind used to deallocate the
+ // exception object, but now we retain them while in flight.
+
+ // This use-after-free is undetected without MallocScribble or guardmalloc.
+ if (!getenv("MallocScribble") &&
+ (!getenv("DYLD_INSERT_LIBRARIES") ||
+ !strstr(getenv("DYLD_INSERT_LIBRARIES"), "libgmalloc")))
+ {
+ testwarn("MallocScribble not set");
+ }
+
+ TEST({
state = 0;
+ dealloced = 0;
+ @try {
+ void *pool2 = objc_autoreleasePoolPush();
+ state++;
+ @try {
+ state++;
+ @throw [Super exception];
+ state = BAD;
+ }
+ @finally {
+ state++;
+ objc_autoreleasePoolPop(pool2);
+ }
+ state = BAD;
+ }
+ @catch (id e) {
+ state++;
+ [e check]; // state++
+ }
+ });
+ testassert(state == 5);
+ testassert(dealloced == 1);
+#endif
+
+
+ testprintf("try-catch-finally, no exception\n");
+
+ TEST({
+ state = 0;
+ dealloced = 0;
@try {
state++;
@try {
} @catch (...) {
state = BAD;
}
- testassert(state == 4);
-
-
- testprintf("try-catch-finally, exception not caught\n");
-
+ });
+ testassert(state == 4);
+ testassert(dealloced == 0);
+
+
+ testprintf("try-catch-finally, exception not caught\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 5);
-
-
- testprintf("try-catch-finally, exception caught exactly, rethrown\n");
-
+ });
+ testassert(state == 5);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch-finally, exception caught exactly, rethrown\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 7);
-
-
- testprintf("try-catch, no exception\n");
-
+ });
+ testassert(state == 7);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, no exception\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
} @catch (...) {
state = BAD;
}
- testassert(state == 3);
-
-
- testprintf("try-catch, exception not caught\n");
-
+ });
+ testassert(state == 3);
+ testassert(dealloced == 0);
+
+
+ testprintf("try-catch, exception not caught\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 4);
-
-
- testprintf("try-catch, exception caught exactly\n");
-
+ });
+ testassert(state == 4);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, exception caught exactly\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
@catch (...) {
state = BAD;
}
- testassert(state == 5);
-
-
- testprintf("try-catch, exception caught exactly, rethrown\n");
-
+ });
+ testassert(state == 5);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, exception caught exactly, rethrown\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 6);
-
-
- testprintf("try-catch, exception caught exactly, thrown again explicitly\n");
-
+ });
+ testassert(state == 6);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, exception caught exactly, thrown again explicitly\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 6);
-
-
- testprintf("try-catch, default catch, rethrown\n");
-
+ });
+ testassert(state == 6);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, default catch, rethrown\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 5);
-
-
- testprintf("try-catch, default catch, rethrown and caught inside nested handler\n");
-
+ });
+ testassert(state == 5);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, default catch, rethrown and caught inside nested handler\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
@catch (...) {
state = BAD;
}
- testassert(state == 9);
-
-
- testprintf("try-catch, default catch, rethrown inside nested handler but not caught\n");
-
+ });
+ testassert(state == 9);
+ testassert(dealloced == 1);
+
+
+ testprintf("try-catch, default catch, rethrown inside nested handler but not caught\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
@try {
state++;
@try {
state++;
[e check]; // state++
}
- testassert(state == 7);
-
-
+ });
+ testassert(state == 7);
+ testassert(dealloced == 1);
+
+
#if __cplusplus && __OBJC2__
- testprintf("C++ try/catch, Objective-C exception superclass\n");
-
+ testprintf("C++ try/catch, Objective-C exception superclass\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
try {
state++;
try {
state++;
[e check]; // state++;
}
- testassert(state == 8);
-
-
- testprintf("C++ try/catch, Objective-C exception subclass\n");
-
+ });
+ testassert(state == 8);
+ testassert(dealloced == 1);
+
+
+ testprintf("C++ try/catch, Objective-C exception subclass\n");
+
+ TEST({
state = 0;
+ dealloced = 0;
try {
state++;
try {
state++;
[e check]; // state++;
}
- testassert(state == 8);
+ });
+ testassert(state == 8);
+ testassert(dealloced == 1);
#endif
// alt handlers for modern Mac OS only
#else
+ {
// alt handlers
// run a lot to catch failed unregistration (runtime complains at 1000)
#define ALT_HANDLER_REPEAT 2000
- int i;
testprintf("alt handler, no exception\n");
- for (i = 0; i < ALT_HANDLER_REPEAT; i++) {
- PUSH_POOL {
+ TEST({
+ dealloced = 0;
+ for (int i = 0; i < ALT_HANDLER_REPEAT; i++) {
state = 0;
@try {
state++;
state = BAD;
}
testassert(state == 3);
- } POP_POOL;
- }
+ }
+ });
+ testassert(dealloced == 0);
+
testprintf("alt handler, exception thrown through\n");
- for (i = 0; i < ALT_HANDLER_REPEAT; i++) {
- PUSH_POOL {
+ TEST({
+ dealloced = 0;
+ for (int i = 0; i < ALT_HANDLER_REPEAT; i++) {
state = 0;
@try {
state++;
[e check]; // state++
}
testassert(state == 5);
- } POP_POOL;
- }
+ }
+ });
+ testassert(dealloced == ALT_HANDLER_REPEAT);
testprintf("alt handler, nested\n");
- for (i = 0; i < ALT_HANDLER_REPEAT; i++) {
- PUSH_POOL {
+ TEST({
+ dealloced = 0;
+ for (int i = 0; i < ALT_HANDLER_REPEAT; i++) {
state = 0;
@try {
state++;
state = BAD;
}
testassert(state == 9);
- } POP_POOL;
- }
+ }
+ });
+ testassert(dealloced == ALT_HANDLER_REPEAT);
testprintf("alt handler, nested, rethrows in between\n");
- for (i = 0; i < ALT_HANDLER_REPEAT; i++) {
- PUSH_POOL {
+ TEST({
+ dealloced = 0;
+ for (int i = 0; i < ALT_HANDLER_REPEAT; i++) {
state = 0;
@try {
state++;
[e check]; // state++
}
testassert(state == 10);
- } POP_POOL;
- }
+ }
+ });
+ testassert(dealloced == ALT_HANDLER_REPEAT);
testprintf("alt handler, exception thrown and caught inside\n");
- for (i = 0; i < ALT_HANDLER_REPEAT; i++) {
- PUSH_POOL {
+ TEST({
+ dealloced = 0;
+ for (int i = 0; i < ALT_HANDLER_REPEAT; i++) {
state = 0;
@try {
state++;
state = BAD;
}
testassert(state == 5);
- } POP_POOL;
- }
+ }
+ });
+ testassert(dealloced == ALT_HANDLER_REPEAT);
#if defined(USE_FOUNDATION)
testprintf("alt handler, rdar://10055775\n");
- for (i = 0; i < ALT_HANDLER_REPEAT; i++) {
- PUSH_POOL {
+ TEST({
+ dealloced = 0;
+ for (int i = 0; i < ALT_HANDLER_REPEAT; i++) {
state = 0;
@try {
uintptr_t token = objc_addExceptionHandler(altHandler1, (void*)altHandler1);
}
state++;
// state++ inside alt handler
- [NSException raise:@"foo" format:@"bar"];
+ [Super raise:@"foo" format:@"bar"];
state = BAD;
objc_removeExceptionHandler(token);
} @catch (id e) {
- testassert(state == 2);
+ state++;
+ testassert(state == 3);
}
- } POP_POOL;
- }
+ testassert(state == 3);
+ }
+ });
+ testassert(dealloced == ALT_HANDLER_REPEAT);
+
// defined(USE_FOUNDATION)
#endif
-
+ }
// alt handlers
#endif
- } POP_POOL;
-
#if __cplusplus && __OBJC2__
std::set_terminate(terminator);
objc_terminate();
do { \
Method m1, m2; \
\
- /* Check unexchanged version */ \
+ testprintf("Check unexchanged version\n"); \
state = 0; \
[Super s1]; \
testassert(state == v1); \
[Super s2]; \
testassert(state == v2); \
\
- /* Exchange */ \
+ testprintf("Exchange\n"); \
m1 = class_getClassMethod([Super class], @selector(s1)); \
m2 = class_getClassMethod([Super class], @selector(s2)); \
testassert(m1); \
testassert(m2); \
method_exchangeImplementations(m1, m2); \
\
- /* Check exchanged version */ \
+ testprintf("Check exchanged version\n"); \
state = 0; \
[Super s1]; \
testassert(state == v2); \
[Super s2]; \
testassert(state == v1); \
\
- /* NULL should do nothing */ \
+ testprintf("NULL should do nothing\n"); \
method_exchangeImplementations(m1, NULL); \
method_exchangeImplementations(NULL, m2); \
method_exchangeImplementations(NULL, NULL); \
\
- /* Make sure NULL did nothing */ \
+ testprintf("Make sure NULL did nothing\n"); \
state = 0; \
[Super s1]; \
testassert(state == v2); \
[Super s2]; \
testassert(state == v1); \
\
- /* Put them back */ \
+ testprintf("Put them back\n"); \
method_exchangeImplementations(m1, m2); \
\
- /* Check restored version */ \
+ testprintf("Check restored version\n"); \
state = 0; \
[Super s1]; \
testassert(state == v1); \
static int state = 0;
static id receiver;
+OBJC_ROOT_CLASS
@interface Super { id isa; } @end
@interface Super (Forwarded)
+(id)idret:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(id)idre2:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(id)idre3:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(long long)llret:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(long long)llre2:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(long long)llre3:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(struct stret)stret:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(struct stret)stre2:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(struct stret)stre3:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(double)fpret:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(double)fpre2:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+(double)fpre3:
- (long)i1:(long)i2:(long)i3:(long)i4:(long)i5:(long)i6:(long)i7:(long)i8:(long)i9:(long)i10:(long)i11:(long)i12:(long)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15;
+ (long)i1 :(long)i2 :(long)i3 :(long)i4 :(long)i5 :(long)i6 :(long)i7 :(long)i8 :(long)i9 :(long)i10 :(long)i11 :(long)i12 :(long)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
@end
state = 12;
result.idval = ID_RESULT;
return result.llval;
- } else if (_cmd == @selector(llret::::::::::::::::::::::::::::) ||
- _cmd == @selector(llre2::::::::::::::::::::::::::::) ||
- _cmd == @selector(llre3::::::::::::::::::::::::::::))
+ }
+ else if (_cmd == @selector(llret::::::::::::::::::::::::::::) ||
+ _cmd == @selector(llre2::::::::::::::::::::::::::::) ||
+ _cmd == @selector(llre3::::::::::::::::::::::::::::))
{
testassert(state == 13);
state = 14;
return LL_RESULT;
- } else if (_cmd == @selector(fpret::::::::::::::::::::::::::::) ||
- _cmd == @selector(fpre2::::::::::::::::::::::::::::) ||
- _cmd == @selector(fpre3::::::::::::::::::::::::::::))
+ }
+ else if (_cmd == @selector(fpret::::::::::::::::::::::::::::) ||
+ _cmd == @selector(fpre2::::::::::::::::::::::::::::) ||
+ _cmd == @selector(fpre3::::::::::::::::::::::::::::))
{
testassert(state == 15);
state = 16;
# error unknown architecture
#endif
return 0;
- } else if (_cmd == @selector(stret::::::::::::::::::::::::::::) ||
- _cmd == @selector(stre2::::::::::::::::::::::::::::) ||
- _cmd == @selector(stre3::::::::::::::::::::::::::::))
+ }
+ else if (_cmd == @selector(stret::::::::::::::::::::::::::::) ||
+ _cmd == @selector(stre2::::::::::::::::::::::::::::) ||
+ _cmd == @selector(stre3::::::::::::::::::::::::::::))
{
fail("stret message sent to non-stret forward_handler");
- } else {
+ }
+ else {
fail("unknown selector %s in forward_handler", sel_getName(_cmd));
}
}
_cmd == @selector(fpre3::::::::::::::::::::::::::::))
{
fail("non-stret selector %s sent to forward_stret_handler", sel_getName(_cmd));
- } else if (_cmd == @selector(stret::::::::::::::::::::::::::::) ||
- _cmd == @selector(stre2::::::::::::::::::::::::::::) ||
- _cmd == @selector(stre3::::::::::::::::::::::::::::))
+ }
+ else if (_cmd == @selector(stret::::::::::::::::::::::::::::) ||
+ _cmd == @selector(stre2::::::::::::::::::::::::::::) ||
+ _cmd == @selector(stre3::::::::::::::::::::::::::::))
{
testassert(state == 17);
state = 18;
return STRET_RESULT;
- } else {
- fail("unknown selector %s in forward::", sel_getName(_cmd));
+ }
+ else {
+ fail("unknown selector %s in forward_stret_handler", sel_getName(_cmd));
}
}
+
@implementation Super
+(void)initialize { }
+(id)class { return self; }
#endif
gp = (uintptr_t *)p;
testassert(*gp++ == (uintptr_t)self);
- testassert(*gp++ == (uintptr_t)sel);
+ testassert(*gp++ == (uintptr_t)(void *)sel);
testassert(*gp++ == 1);
testassert(*gp++ == 2);
testassert(*gp++ == 3);
typedef struct stret (*st_fn_t)(id self, SEL _cmd, long i1, long i2, long i3, long i4, long i5, long i6, long i7, long i8, long i9, long i10, long i11, long i12, long i13, double f1, double f2, double f3, double f4, double f5, double f6, double f7, double f8, double f9, double f10, double f11, double f12, double f13, double f14, double f15);
+#if __x86_64__
+typedef struct stret * (*fake_st_fn_t)(struct stret *, id self, SEL _cmd, long i1, long i2, long i3, long i4, long i5, long i6, long i7, long i8, long i9, long i10, long i11, long i12, long i13, double f1, double f2, double f3, double f4, double f5, double f6, double f7, double f8, double f9, double f10, double f11, double f12, double f13, double f14, double f15);
+#endif
+
__BEGIN_DECLS
extern void *getSP(void);
__END_DECLS
id idval;
long long llval;
struct stret stval;
+#if __x86_64__
+ struct stret *stptr;
+#endif
double fpval;
void *sp1 = (void*)1;
void *sp2 = (void*)2;
testassert(state == 8);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 7;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)objc_msgSend_stret)(&stval, [Super class], @selector(stret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 8);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test default forward handler, cached
testassert(state == 8);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 7;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)objc_msgSend_stret)(&stval, [Super class], @selector(stret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 8);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test default forward handler, uncached but fixed-up
testassert(state == 8);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 7;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)objc_msgSend_stret)(&stval, [Super class], @selector(stret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 8);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test manual forwarding
testassert(state == 8);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 7;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)_objc_msgForward_stret)(&stval, receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 8);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test manual forwarding, cached
testassert(state == 8);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 7;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)_objc_msgForward_stret)(&stval, receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 8);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test manual forwarding, uncached but fixed-up
testassert(state == 8);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 7;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)_objc_msgForward_stret)(&stval, receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 8);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test user-defined forward handler
testassert(state == 18);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 17;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)objc_msgSend_stret)(&stval, [Super class], @selector(stre3::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 18);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test user-defined forward handler, cached
testassert(state == 18);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 17;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)objc_msgSend_stret)(&stval, [Super class], @selector(stre3::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 18);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
// Test user-defined forward handler, uncached but fixed-up
testassert(state == 18);
testassert(stret_equal(stval, STRET_RESULT));
+#if __x86_64__
+ // check stret return register
+ state = 17;
+ sp1 = getSP();
+ stptr = ((fake_st_fn_t)objc_msgSend_stret)(&stval, [Super class], @selector(stre3::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 18);
+ testassert(stret_equal(stval, STRET_RESULT));
+ testassert(stptr == &stval);
+#endif
+
+
+
+ // Test user-defined forward handler, manual forwarding
+
+ state = 11;
+ sp1 = getSP();
+ idval = ((id_fn_t)_objc_msgForward)(receiver, @selector(idre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 12);
+ testassert(idval == ID_RESULT);
+
+ state = 13;
+ sp1 = getSP();
+ llval = ((ll_fn_t)_objc_msgForward)(receiver, @selector(llre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 14);
+ testassert(llval == LL_RESULT);
+
+ state = 15;
+ sp1 = getSP();
+ fpval = ((fp_fn_t)_objc_msgForward)(receiver, @selector(fpre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 16);
+ testassert(fpval == FP_RESULT);
+
+ state = 17;
+ sp1 = getSP();
+ stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 18);
+ testassert(stret_equal(stval, STRET_RESULT));
+
+
+ // Test user-defined forward handler, manual forwarding, cached
+
+ state = 11;
+ sp1 = getSP();
+ idval = ((id_fn_t)_objc_msgForward)(receiver, @selector(idre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 12);
+ testassert(idval == ID_RESULT);
+
+ state = 13;
+ sp1 = getSP();
+ llval = ((ll_fn_t)_objc_msgForward)(receiver, @selector(llre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 14);
+ testassert(llval == LL_RESULT);
+
+ state = 15;
+ sp1 = getSP();
+ fpval = ((fp_fn_t)_objc_msgForward)(receiver, @selector(fpre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 16);
+ testassert(fpval == FP_RESULT);
+
+ state = 17;
+ sp1 = getSP();
+ stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 18);
+ testassert(stret_equal(stval, STRET_RESULT));
+
+
+ // Test user-defined forward handler, manual forwarding, uncached but fixed-up
+
+ _objc_flush_caches(nil);
+
+ state = 11;
+ sp1 = getSP();
+ idval = ((id_fn_t)_objc_msgForward)(receiver, @selector(idre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 12);
+ testassert(idval == ID_RESULT);
+
+ state = 13;
+ sp1 = getSP();
+ llval = ((ll_fn_t)_objc_msgForward)(receiver, @selector(llre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 14);
+ testassert(llval == LL_RESULT);
+
+ state = 15;
+ sp1 = getSP();
+ fpval = ((fp_fn_t)_objc_msgForward)(receiver, @selector(fpre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 16);
+ testassert(fpval == FP_RESULT);
+
+ state = 17;
+ sp1 = getSP();
+ stval = ((st_fn_t)_objc_msgForward_stret)(receiver, @selector(stre2::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ sp2 = getSP();
+ testassert(sp1 == sp2);
+ testassert(state == 18);
+ testassert(stret_equal(stval, STRET_RESULT));
+
succeed(__FILE__);
}
// objc_getFutureClass with existing class
oldTestRoot = objc_getFutureClass("TestRoot");
testassert(oldTestRoot == [TestRoot class]);
+ testassert(! _class_isFutureClass(oldTestRoot));
// objc_getFutureClass with missing class
oldSub1 = objc_getFutureClass("Sub1");
testassert(oldSub1);
testassert(malloc_size(objc_unretainedPointer(oldSub1)) > 0);
testassert(objc_getClass("Sub1") == Nil);
+ testassert(_class_isFutureClass(oldSub1));
+ testassert(0 == strcmp(class_getName(oldSub1), "Sub1"));
+ testassert(object_getClass(oldSub1) == Nil); // CF expects this
// objc_getFutureClass a second time
testassert(oldSub1 == objc_getFutureClass("Sub1"));
testassert(oldSub1 == newSub1);
testassert(newSub1 == [newSub1 classref]);
testassert(newSub1 == class_getSuperclass(objc_getClass("SubSub1")));
+ testassert(! _class_isFutureClass(newSub1));
testassert(1 == [oldSub1 method]);
testassert(1 == [newSub1 method]);
#include "test.h"
+OBJC_ROOT_CLASS
@interface Main @end
@implementation Main @end
+#import <objc/objc-api.h>
+
+OBJC_ROOT_CLASS
@interface GC @end
@implementation GC @end
+++ /dev/null
-// TEST_CFLAGS -framework Foundation
-
-#import <Foundation/Foundation.h>
-#import <objc/runtime.h>
-#import <objc/objc-gdb.h>
-
-#include "test.h"
-
-@interface Foo : NSObject
-@end
-@implementation Foo
-- (void) foo
-{
-}
-
-- (void) test: __attribute__((unused)) sender
-{
- unsigned int x = 0;
- Method foo = class_getInstanceMethod([Foo class], @selector(foo));
- IMP fooIMP = method_getImplementation(foo);
- const char *fooTypes = method_getTypeEncoding(foo);
- while(1) {
- PUSH_POOL {
- char newSELName[100];
- sprintf(newSELName, "a%u", x++);
- SEL newSEL = sel_registerName(newSELName);
- class_addMethod([Foo class], newSEL, fooIMP, fooTypes);
- ((void(*)(id, SEL))objc_msgSend)(self, newSEL);
- } POP_POOL;
- }
-}
-@end
-
-int main() {
- PUSH_POOL {
- [NSThread detachNewThreadSelector: @selector(test:) toTarget: [Foo new] withObject: nil];
- unsigned int x = 0;
- unsigned int lockCount = 0;
- while(1) {
- if (gdb_objc_isRuntimeLocked())
- lockCount++;
- x++;
- if (x > 1000000)
- break;
- }
- if (lockCount < 10) {
- fail("Runtime not locked very much.");
- }
- } POP_POOL;
-
- succeed(__FILE__);
-
- return 0;
-}
testassert(sel == method_getName(m));
imp = method_getImplementation(m);
testassert(imp == class_getMethodImplementation(object_getClass(Super_cls), sel));
+ testassert(imp == object_getMethodImplementation(Super_cls, sel));
state = 0;
(*(imp_t)imp)(Super_cls, sel);
testassert(state == 1);
testassert(sel == method_getName(m));
imp = method_getImplementation(m);
testassert(imp == class_getMethodImplementation(object_getClass(Sub_cls), sel));
+ testassert(imp == object_getMethodImplementation(Sub_cls, sel));
state = 0;
(*(imp_t)imp)(Sub_cls, sel);
testassert(state == 2);
testassert(sel == method_getName(m));
imp = method_getImplementation(m);
testassert(imp == class_getMethodImplementation(object_getClass(Sub_cls), sel));
+ testassert(imp == object_getMethodImplementation(Sub_cls, sel));
state = 0;
(*(imp_t)imp)(Sub_cls, sel);
testassert(state == 3);
testassert(sel == method_getName(m));
imp = method_getImplementation(m);
testassert(imp == class_getMethodImplementation(Super_cls, sel));
- state = 0;
buf[0] = Super_cls;
+ testassert(imp == object_getMethodImplementation(objc_unretainedObject(buf), sel));
+ state = 0;
(*(imp_t)imp)(objc_unretainedObject(buf), sel);
testassert(state == 4);
testassert(sel == method_getName(m));
imp = method_getImplementation(m);
testassert(imp == class_getMethodImplementation(Sub_cls, sel));
- state = 0;
buf[0] = Sub_cls;
+ testassert(imp == object_getMethodImplementation(objc_unretainedObject(buf), sel));
+ state = 0;
(*(imp_t)imp)(objc_unretainedObject(buf), sel);
testassert(state == 5);
testassert(sel == method_getName(m));
imp = method_getImplementation(m);
testassert(imp == class_getMethodImplementation(Sub_cls, sel));
- state = 0;
buf[0] = Sub_cls;
+ testassert(imp == object_getMethodImplementation(objc_unretainedObject(buf), sel));
+ state = 0;
(*(imp_t)imp)(objc_unretainedObject(buf), sel);
testassert(state == 6);
testassert(! class_getInstanceMethod(Sub_cls, sel));
testassert(! class_getClassMethod(Sub_cls, sel));
testassert(class_getMethodImplementation(Sub_cls, sel) == (IMP)&_objc_msgForward);
+ buf[0] = Sub_cls;
+ testassert(object_getMethodImplementation(objc_unretainedObject(buf), sel) == (IMP)&_objc_msgForward);
testassert(class_getMethodImplementation_stret(Sub_cls, sel) == (IMP)&_objc_msgForward_stret);
+ testassert(object_getMethodImplementation_stret(objc_unretainedObject(buf), sel) == (IMP)&_objc_msgForward_stret);
testassert(! class_getInstanceMethod(NULL, NULL));
testassert(! class_getInstanceMethod(NULL, sel));
static int state = 0;
+OBJC_ROOT_CLASS
@interface Super { id isa; } @end
@implementation Super
+(id)class { return self; }
@interface Sub2 : Super @end
@implementation Sub2 @end
+OBJC_ROOT_CLASS
@interface Empty { id isa; } @end
@implementation Empty
+(id)class { return self; }
--- /dev/null
+// TEST_CONFIG MEM=gc
+// TEST_CFLAGS -framework Foundation
+
+// This test must use CF and test ignoredSelector must not use CF.
+
+#include "test.h"
+#include <objc/NSObject.h>
+
+int main()
+{
+ if (objc_collectingEnabled()) {
+ // ARC RR functions don't retain and don't hit the side table.
+ __block int count;
+ testblock_t testblock = ^{
+ for (int i = 0; i < count; i++) {
+ id obj = [NSObject new];
+ objc_retain(obj);
+ objc_retain(obj);
+ objc_release(obj);
+ }
+ };
+ count = 100;
+ testonthread(testblock);
+ testonthread(testblock);
+ leak_mark();
+ count = 10000000;
+ testonthread(testblock);
+ leak_check(0);
+ }
+
+ succeed(__FILE__);
+}
extern int state;
extern int cstate;
+OBJC_ROOT_CLASS
@interface Super { id isa; }
+(void) method;
+(void) method0;
*/
Sub2 *sub2 = [Sub2 new];
- sub2->isa = [Sub2 class];
sub2->subIvar = (void *)10;
testassert(((uintptr_t *)objc_unretainedPointer(sub2))[11] == 10);
testprintf("00\n");
}
-
+OBJC_ROOT_CLASS
@interface Super { id isa; } @end
@implementation Super @end
// strong: ""
// weak: NULL
+OBJC_ROOT_CLASS
@interface NoScanned { long i; } @end
@implementation NoScanned @end
NSDictionary *numbers = @{ @"π" : @M_PI, @"e" : @M_E };
testassert([[numbers objectForKey:@"π"] doubleValue] == M_PI);
testassert([[numbers objectForKey:@"e"] doubleValue] == M_E);
+
+ BOOL yesBool = YES;
+ BOOL noBool = NO;
+ array = @[
+ @(true),
+ @(YES),
+ [NSNumber numberWithBool:YES],
+ @YES,
+ @(yesBool),
+ @((BOOL)YES),
+
+ @(false),
+ @(NO),
+ [NSNumber numberWithBool:NO],
+ @NO,
+ @(noBool),
+ @((BOOL)NO),
+ ];
+ NSData * jsonData = [NSJSONSerialization dataWithJSONObject:array options:0 error:nil];
+ NSString * string = [[NSString alloc] initWithData:jsonData encoding:NSUTF8StringEncoding];
+#if __cplusplus
+ testassert([string isEqualToString:@"[true,true,true,true,true,true,false,false,false,false,false,false]"]);
+#else
+ // C99 @(true) and @(false) evaluate to @(1) and @(0).
+ testassert([string isEqualToString:@"[1,true,true,true,true,true,0,false,false,false,false,false]"]);
+#endif
+
#endif
} POP_POOL;
int state1 = 0;
+OBJC_ROOT_CLASS
@interface One @end
@implementation One
+(void)load
int state2 = 0;
+OBJC_ROOT_CLASS
@interface Two @end
@implementation Two
+(void)load
int state3 = 0;
+OBJC_ROOT_CLASS
@interface Three @end
@implementation Three
+(void)load
#error -DN=n missing
#endif
+#import <objc/objc-api.h>
#include <stdio.h>
#include <sched.h>
#include <unistd.h>
extern int state;
#define CLASS0(n,nn) \
+ OBJC_ROOT_CLASS \
@interface C_##n##_##nn @end \
@implementation C_##n##_##nn \
+(void)load { OSAtomicIncrement32(&state); usleep(10); } \
int state1 = 0;
int *state2_p;
+OBJC_ROOT_CLASS
@interface One @end
@implementation One
+(void)load
testassert(state2 == 0);
}
+OBJC_ROOT_CLASS
@interface Two @end
@implementation Two
+(void) load
-// TEST_CONFIG
+// TEST_CFLAGS -Wno-unused-parameter
#include "test.h"
#include "testroot.i"
testassert(_cmd == sel_registerName(#sel "_noarg"));\
} while (0)
+id NIL_RECEIVER;
id ID_RESULT;
long long LL_RESULT = __LONG_LONG_MAX__ - 2LL*__INT_MAX__;
double FP_RESULT = __DBL_MIN__ + __DBL_EPSILON__;
-(struct stret)stret { return STRET_RESULT; }
-(id)idret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(idret);
state = 1;
}
-(long long)llret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(llret);
state = 2;
}
-(struct stret)stret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(stret);
state = 3;
}
-(double)fpret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(fpret);
state = 4;
}
-(long double)lfpret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(lfpret);
state = 5;
+(id)idret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+idret called instead of -idret");
CHECK_ARGS(idret);
}
+(long long)llret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+llret called instead of -llret");
CHECK_ARGS(llret);
}
+(struct stret)stret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+stret called instead of -stret");
CHECK_ARGS(stret);
}
+(double)fpret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+fpret called instead of -fpret");
CHECK_ARGS(fpret);
}
+(long double)lfpret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+lfpret called instead of -lfpret");
CHECK_ARGS(lfpret);
@implementation Sub
-(id)idret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
id result;
CHECK_ARGS(idret);
}
-(long long)llret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
long long result;
CHECK_ARGS(llret);
}
-(struct stret)stret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
struct stret result;
CHECK_ARGS(stret);
}
-(double)fpret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
double result;
CHECK_ARGS(fpret);
}
-(long double)lfpret:
- (int)i1:(int)i2:(int)i3:(int)i4:(int)i5:(int)i6:(int)i7:(int)i8:(int)i9:(int)i10:(int)i11:(int)i12:(int)i13 :(double)f1:(double)f2:(double)f3:(double)f4:(double)f5:(double)f6:(double)f7:(double)f8:(double)f9:(double)f10:(double)f11:(double)f12:(double)f13:(double)f14:(double)f15
+ (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
long double result;
CHECK_ARGS(lfpret);
@end
-#if __x86_64__
+#if OBJC_HAVE_TAGGED_POINTERS
@interface TaggedSub : Sub @end
@implementation TaggedSub : Sub
-#define TAG_VALUE(tagSlot, value) (objc_unretainedObject((void*)(1UL | (((uintptr_t)(tagSlot)) << 1) | (((uintptr_t)(value)) << 4))))
-
+(void)initialize
{
- _objc_insert_tagged_isa(2, self);
+ _objc_registerTaggedPointerClass(OBJC_TAG_7, self);
}
@end
uint8_t set(uintptr_t dst, uint8_t newvalue)
{
- uintptr_t start = dst & ~(4096-1);
- mprotect((void*)start, 4096, PROT_READ|PROT_WRITE);
+ uintptr_t start = dst & ~(PAGE_SIZE-1);
+ mprotect((void*)start, PAGE_SIZE, PROT_READ|PROT_WRITE);
// int3
uint8_t oldvalue = *(uint8_t *)dst;
*(uint8_t *)dst = newvalue;
- mprotect((void*)start, 4096, PROT_READ|PROT_EXEC);
+ mprotect((void*)start, PAGE_SIZE, PROT_READ|PROT_EXEC);
return oldvalue;
}
uintptr_t *getOffsets(void *symbol, const char *symname)
{
- uintptr_t *result = (uintptr_t *)malloc(4096 * sizeof(uintptr_t));
- uintptr_t *end = result + 4096;
+ uintptr_t *result = (uintptr_t *)malloc(PAGE_SIZE * sizeof(uintptr_t));
+ uintptr_t *end = result + PAGE_SIZE;
uintptr_t *p = result;
// find library
char *cmd;
asprintf(&cmd, "/usr/bin/xcrun otool -arch x86_64 -tv -p _%s %s",
symname, dl.dli_fname);
+ testprintf("%s\n", cmd);
FILE *disa = popen(cmd, "r");
free(cmd);
testassert(disa);
testassert(p > result);
testassert(p < end);
*p = ~0UL;
+ // hack: skip last instruction because libunwind blows up if it's
+ // one byte long and followed by the next function with no NOPs first
+ if (p > result) p[-1] = ~0UL;
return result;
}
unclobber(fn, offset, insn_byte);
// require at least one path above to trip this offset
- if (!caught) fprintf(stderr, "OFFSET %lu NOT CAUGHT\n", offset);
+ if (!caught) fprintf(stderr, "OFFSET %s+%lu NOT CAUGHT\n", name, offset);
}
free(insnOffsets);
}
double fpval;
long double lfpval;
+#if __x86_64__
+ struct stret *stretptr;
+#endif
+
uint64_t startTime;
uint64_t totalTime;
uint64_t targetTime;
// get +initialize out of the way
[Sub class];
-#if __x86_64__
+#if OBJC_HAVE_TAGGED_POINTERS
[TaggedSub class];
#endif
Sub *sub = [Sub new];
Super *sup = [Super new];
-#if __x86_64__
- TaggedSub *tagged = TAG_VALUE(2, 999);
+#if OBJC_HAVE_TAGGED_POINTERS
+ TaggedSub *tagged = objc_unretainedObject(_objc_makeTaggedPointer(OBJC_TAG_7, 999));
#endif
-
+
// Basic cached and uncached dispatch.
// Do this first before anything below caches stuff.
+ testprintf("basic\n");
test_basic(sub);
-#if __x86_64__
+#if OBJC_HAVE_TAGGED_POINTERS
+ testprintf("basic tagged\n");
test_basic(tagged);
#endif
// fixme unless they all fail
// `.align 4` matches loop alignment to make -O0 work
// fill cache first
+ testprintf("time checks\n");
+
SELF = sub;
[sub voidret_nop];
[sub llret_nop];
// method_invoke_stret stret
// method_invoke_stret fpret
// method_invoke fpret long double
+ testprintf("method_invoke\n");
+
SELF = sup;
state = 0;
// message to nil stret
// message to nil fpret
// message to nil fpret long double
+ // Use NIL_RECEIVER to avoid compiler optimizations.
+ testprintf("message to nil\n");
+
state = 0;
idval = ID_RESULT;
- idval = [(id)nil idret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ idval = [(id)NIL_RECEIVER idret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(idval == nil);
state = 0;
llval = LL_RESULT;
- llval = [(id)nil llret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ llval = [(id)NIL_RECEIVER llret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(llval == 0LL);
state = 0;
stretval = zero;
- stretval = [(id)nil stret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ stretval = [(id)NIL_RECEIVER stret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
#if __clang__
testassert(0 == memcmp(&stretval, &zero, sizeof(stretval)));
#else
// no stret result guarantee
#endif
-
+
+#if __x86_64__
+ // check stret return register
+ state = 0;
+ stretval = zero;
+ stretptr = ((struct stret *(*)(struct stret *, id, SEL))objc_msgSend_stret)
+ (&stretval, nil, @selector(stret_nop));
+ testassert(stretptr == &stretval);
+ testassert(state == 0);
+ // no stret result guarantee for hand-written calls, even with clang
+#endif
+
state = 0;
fpval = FP_RESULT;
- fpval = [(id)nil fpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ fpval = [(id)NIL_RECEIVER fpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(fpval == 0.0);
state = 0;
lfpval = LFP_RESULT;
- lfpval = [(id)nil lfpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ lfpval = [(id)NIL_RECEIVER lfpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(lfpval == 0.0);
testassert(lfpval == 0.0);
# endif
#endif
-
- // message forwarded
- // message forwarded long long
- // message forwarded stret
- // message forwarded fpret
- // message forwarded fpret long double
- // fixme
+
#if __OBJC2__
// rdar://8271364 objc_msgSendSuper2 must not change objc_super
+ testprintf("super struct\n");
struct objc_super sup_st = {
sub,
object_getClass(sub),
#if __OBJC2__
// Debug messengers.
+ testprintf("debug messengers\n");
+
state = 0;
idmsg = (typeof(idmsg))objc_msgSend_debug;
idval = nil;
#if __x86_64__ && !__has_feature(objc_arc)
// DWARF unwind tables
// Not for ARC because the extra RR calls hit the traps at the wrong times
+ testprintf("unwind tables\n");
// install exception handler
struct sigaction act;
// which can die in the trapped messenger
test_dw("objc_msgSend", sub,tagged,@selector(idret_nop));
- test_dw("objc_msgSend_fixup", sub,tagged,@selector(idret_nop));
test_dw("objc_msgSend_stret", sub,tagged,@selector(stret_nop));
- test_dw("objc_msgSend_stret_fixup", sub,tagged,@selector(stret_nop));
test_dw("objc_msgSend_fpret", sub,tagged,@selector(fpret_nop));
- test_dw("objc_msgSend_fpret_fixup", sub,tagged,@selector(fpret_nop));
// fixme fp2ret
test_dw("objc_msgSendSuper", sub,tagged,@selector(idret_nop));
test_dw("objc_msgSendSuper2", sub,tagged,@selector(idret_nop));
- test_dw("objc_msgSendSuper2_fixup", sub,tagged,@selector(idret_nop));
test_dw("objc_msgSendSuper_stret", sub,tagged,@selector(stret_nop));
test_dw("objc_msgSendSuper2_stret", sub,tagged,@selector(stret_nop));
- test_dw("objc_msgSendSuper2_stret_fixup", sub,tagged,@selector(stret_nop));
// DWARF unwind tables
#endif
--- /dev/null
+// TEST_CONFIG MEM=mrc
+
+#include "test.h"
+#include "testroot.i"
+
+@implementation TestRoot (Loader)
++(void)load
+{
+ [[TestRoot new] autorelease];
+ testassert(TestRootAutorelease == 1);
+ testassert(TestRootDealloc == 0);
+}
+@end
+
+int main()
+{
+ // +load's autoreleased object should have deallocated
+ testassert(TestRootDealloc == 1);
+
+ [[TestRoot new] autorelease];
+ testassert(TestRootAutorelease == 2);
+
+ objc_autoreleasePoolPop(objc_autoreleasePoolPush());
+
+ [[TestRoot new] autorelease];
+ testassert(TestRootAutorelease == 3);
+
+ testonthread(^{
+ [[TestRoot new] autorelease];
+ testassert(TestRootAutorelease == 4);
+ testassert(TestRootDealloc == 1);
+ });
+
+ // thread's autoreleased object should have deallocated
+ testassert(TestRootDealloc == 2);
+
+ succeed(__FILE__);
+}
-// TEST_CFLAGS -framework Foundation
-
/*
+need exception-safe ARC for exception deallocation tests
+TEST_CFLAGS -fobjc-arc-exceptions -framework Foundation
+
llvm-gcc unavoidably warns about our deliberately out-of-order handlers
TEST_BUILD_OUTPUT
*/
// TEST_CFLAGS -Wno-deprecated-declarations
-
-/*
-TEST_RUN_OUTPUT
-objc\[\d+\]: \+\[Sub resolveClassMethod:lyingClassMethod\] returned YES, but no new implementation of \+\[Sub lyingClassMethod\] was found
-objc\[\d+\]: \+\[Sub resolveInstanceMethod:lyingInstanceMethod\] returned YES, but no new implementation of -\[Sub lyingInstanceMethod\] was found
-OK: resolve\.m
-OR
-confused by Foundation
-OK: resolve\.m
-END
-*/
#include "test.h"
#include "testroot.i"
int main()
{
testwarn("rdar://11368528 confused by Foundation");
- fprintf(stderr, "confused by Foundation\n");
succeed(__FILE__);
}
state = 2;
}
}
-+(id)forward:(SEL)sel :(marg_list)__unused args
+@end
+
+static id forward_handler(id self, SEL sel)
{
- if (sel == @selector(missingClassMethod)) {
- testassert(state == 21 || state == 25 || state == 80);
- if (state == 21) state = 22;
- if (state == 25) state = 26;
- if (state == 80) state = 81;;
- return nil;
- } else if (sel == @selector(lyingClassMethod)) {
- testassert(state == 31 || state == 35);
- if (state == 31) state = 32;
- if (state == 35) state = 36;
+ if (class_isMetaClass(object_getClass(self))) {
+ // self is a class object
+ if (sel == @selector(missingClassMethod)) {
+ testassert(state == 21 || state == 25 || state == 80);
+ if (state == 21) state = 22;
+ if (state == 25) state = 26;
+ if (state == 80) state = 81;;
+ return nil;
+ } else if (sel == @selector(lyingClassMethod)) {
+ testassert(state == 31 || state == 35);
+ if (state == 31) state = 32;
+ if (state == 35) state = 36;
+ return nil;
+ }
+ fail("+forward:: shouldn't be called with sel %s", sel_getName(sel));
return nil;
}
- fail("+forward:: shouldn't be called with sel %s", sel_getName(sel));
- return nil;
-}
--(id)forward:(SEL)sel :(marg_list)__unused args
-{
- if (sel == @selector(missingInstanceMethod)) {
- testassert(state == 61 || state == 65);
- if (state == 61) state = 62;
- if (state == 65) state = 66;
- return nil;
- } else if (sel == @selector(lyingInstanceMethod)) {
- testassert(state == 71 || state == 75);
- if (state == 71) state = 72;
- if (state == 75) state = 76;
+ else {
+ // self is not a class object
+ if (sel == @selector(missingInstanceMethod)) {
+ testassert(state == 61 || state == 65);
+ if (state == 61) state = 62;
+ if (state == 65) state = 66;
+ return nil;
+ } else if (sel == @selector(lyingInstanceMethod)) {
+ testassert(state == 71 || state == 75);
+ if (state == 71) state = 72;
+ if (state == 75) state = 76;
+ return nil;
+ }
+ fail("-forward:: shouldn't be called with sel %s", sel_getName(sel));
return nil;
}
- fail("-forward:: shouldn't be called with sel %s", sel_getName(sel));
- return nil;
}
-@end
static id classMethod_c(id __unused self, SEL __unused sel)
Sub *s;
id ret;
+ objc_setForwardHandler((void*)&forward_handler, NULL);
+
// Be ready for ARC to retain the class object and call +initialize early
state = 1;
#include <Foundation/Foundation.h>
static int state;
+static pthread_attr_t smallstack;
#define NESTED_COUNT 8
#endif
}
+
+static void
+slow_cycle(void)
+{
+ // Large autorelease stack.
+ // Do this only once because it's slow.
+ testprintf("-- Large autorelease stack.\n");
+ {
+ // limit stack size: autorelease pop should not be recursive
+ pthread_t th;
+ pthread_create(&th, &smallstack, &autorelease_lots_fn, NULL);
+ pthread_join(th, NULL);
+ }
+
+ // Single large autorelease pool.
+ // Do this only once because it's slow.
+ testprintf("-- Large autorelease pool.\n");
+ {
+ // limit stack size: autorelease pop should not be recursive
+ pthread_t th;
+ pthread_create(&th, &smallstack, &autorelease_lots_fn, (void*)1);
+ pthread_join(th, NULL);
+ }
+}
+
+
int main()
{
+ pthread_attr_init(&smallstack);
+ pthread_attr_setstacksize(&smallstack, 16384);
+
// inflate the refcount side table so it doesn't show up in leak checks
{
int count = 10000;
#endif
- pthread_attr_t smallstack;
- pthread_attr_init(&smallstack);
- pthread_attr_setstacksize(&smallstack, 4096*4);
-
for (int i = 0; i < 100; i++) {
cycle();
}
+ slow_cycle();
+
leak_mark();
for (int i = 0; i < 1000; i++) {
leak_check(0);
- // Large autorelease stack.
- // Do this only once because it's slow.
- testprintf("-- Large autorelease stack.\n");
- {
- // limit stack size: autorelease pop should not be recursive
- pthread_t th;
- pthread_create(&th, &smallstack, &autorelease_lots_fn, NULL);
- pthread_join(th, NULL);
- }
-
- // Single large autorelease pool.
- // Do this only once because it's slow.
- testprintf("-- Large autorelease pool.\n");
- {
- // limit stack size: autorelease pop should not be recursive
- pthread_t th;
- pthread_create(&th, &smallstack, &autorelease_lots_fn, (void*)1);
- pthread_join(th, NULL);
- }
+ slow_cycle();
leak_check(0);
#else
- // sel_getName recognizes GC-ignored SELs
# if defined(__i386__)
+ // sel_getName recognizes GC-ignored SELs
if (objc_collectingEnabled()) {
testassert(0 == strcmp("<ignored selector>",
sel_getName(@selector(retain))));
- } else
-# endif
- {
+ } else {
testassert(0 == strcmp("retain",
sel_getName(@selector(retain))));
}
} u;
u.sel = @selector(retain);
testassert(@selector(retain) == sel_registerName(u.ptr));
+# endif
#endif
#include <objc/runtime.h>
#import <Foundation/Foundation.h>
-#if __OBJC2__ && __LP64__
+#if OBJC_HAVE_TAGGED_POINTERS
void testTaggedNumber()
{
NSNumber *taggedNS = [NSNumber numberWithInt: 1234];
CFNumberRef taggedCF = (CFNumberRef)objc_unretainedPointer(taggedNS);
- uintptr_t taggedAddress = (uintptr_t)taggedCF;
int result;
testassert( CFGetTypeID(taggedCF) == CFNumberGetTypeID() );
+ testassert(_objc_getClassForTag(OBJC_TAG_NSNumber) == [taggedNS class]);
CFNumberGetValue(taggedCF, kCFNumberIntType, &result);
testassert(result == 1234);
- testassert(taggedAddress & 0x1); // make sure it is really tagged
+ testassert(_objc_isTaggedPointer(taggedCF));
+ testassert(_objc_getTaggedPointerTag(taggedCF) == OBJC_TAG_NSNumber);
+ testassert(_objc_makeTaggedPointer(_objc_getTaggedPointerTag(taggedCF), _objc_getTaggedPointerValue(taggedCF)) == taggedCF);
// do some generic object-y things to the taggedPointer instance
CFRetain(taggedCF);
succeed(__FILE__);
}
-// OBJC2 && __LP64__
+// OBJC_HAVE_TAGGED_POINTERS
#else
-// not (OBJC2 && __LP64__)
+// not OBJC_HAVE_TAGGED_POINTERS
- // Tagged pointers not supported. Crash if an NSNumber actually
- // is a tagged pointer (which means this test is out of date).
+// Tagged pointers not supported. Crash if an NSNumber actually
+// is a tagged pointer (which means this test is out of date).
int main()
{
#include "test.h"
#include <objc/runtime.h>
#include <objc/objc-internal.h>
+#include <objc/objc-gdb.h>
+#include <dlfcn.h>
#import <Foundation/NSObject.h>
-#if __has_feature(objc_arc)
+#if OBJC_HAVE_TAGGED_POINTERS
-int main()
-{
- testwarn("rdar://11368528 confused by Foundation");
- succeed(__FILE__);
-}
-
-#else
-
-#if __OBJC2__ && __LP64__
+#if !__OBJC2__ || !__x86_64__
+#error wrong architecture for tagged pointers
+#endif
static BOOL didIt;
-#define TAG_VALUE(tagSlot, value) (objc_unretainedObject((void*)(1UL | (((uintptr_t)(tagSlot)) << 1) | (((uintptr_t)(value)) << 4))))
-
@interface WeakContainer : NSObject
{
@public
}
@end
+OBJC_ROOT_CLASS
@interface TaggedBaseClass
@end
}
- (uintptr_t) taggedValue {
- return (uintptr_t)objc_unretainedPointer(self) >> 4;
+ return _objc_getTaggedPointerValue(objc_unretainedPointer(self));
}
- (struct stret) stret: (struct stret) aStruct {
}
- (uintptr_t) taggedValue {
- return (uintptr_t)objc_unretainedPointer(self) >> 4;
+ return _objc_getTaggedPointerValue(objc_unretainedPointer(self));
}
- (struct stret) stret: (struct stret) aStruct {
}
@end
-void testGenericTaggedPointer(uint8_t tagSlot, const char *classname)
+void testGenericTaggedPointer(objc_tag_index_t tag, const char *classname)
{
testprintf("%s\n", classname);
Class cls = objc_getClass(classname);
testassert(cls);
- id taggedPointer = TAG_VALUE(tagSlot, 1234);
+ void *taggedAddress = _objc_makeTaggedPointer(tag, 1234);
+ testassert(_objc_isTaggedPointer(taggedAddress));
+ testassert(_objc_getTaggedPointerTag(taggedAddress) == tag);
+ testassert(_objc_getTaggedPointerValue(taggedAddress) == 1234);
+
+ testassert((uintptr_t)taggedAddress & objc_debug_taggedpointer_mask);
+ uintptr_t slot = ((uintptr_t)taggedAddress >> objc_debug_taggedpointer_slot_shift) & objc_debug_taggedpointer_slot_mask;
+ testassert(objc_debug_taggedpointer_classes[slot] == cls);
+ testassert((((uintptr_t)taggedAddress << objc_debug_taggedpointer_payload_lshift) >> objc_debug_taggedpointer_payload_rshift) == 1234);
+
+ id taggedPointer = objc_unretainedObject(taggedAddress);
testassert(object_getClass(taggedPointer) == cls);
testassert([taggedPointer taggedValue] == 1234);
// Tagged pointers should bypass refcount tables and autorelease pools
// and weak reference tables
WeakContainer *w = [WeakContainer new];
+#if !__has_feature(objc_arc)
+ // prime method caches before leak checking
+ [taggedPointer retain];
+ [taggedPointer release];
+ [taggedPointer autorelease];
+#endif
leak_mark();
for (uintptr_t i = 0; i < sizeof(w->weaks)/sizeof(w->weaks[0]); i++) {
- id o = TAG_VALUE(tagSlot, i);
+ id o = objc_unretainedObject(_objc_makeTaggedPointer(tag, i));
testassert(object_getClass(o) == cls);
id result = WEAK_STORE(w->weaks[i], o);
int main()
{
- PUSH_POOL {
- _objc_insert_tagged_isa(5, objc_getClass("TaggedBaseClass"));
- testGenericTaggedPointer(5, "TaggedBaseClass");
+ testassert(objc_debug_taggedpointer_mask != 0);
+ testassert(_objc_taggedPointersEnabled());
+
+ PUSH_POOL {
+ // Avoid CF's tagged pointer tags because of rdar://11368528
+
+ _objc_registerTaggedPointerClass(OBJC_TAG_1,
+ objc_getClass("TaggedBaseClass"));
+ testGenericTaggedPointer(OBJC_TAG_1,
+ "TaggedBaseClass");
- _objc_insert_tagged_isa(2, objc_getClass("TaggedSubclass"));
- testGenericTaggedPointer(2, "TaggedSubclass");
+ _objc_registerTaggedPointerClass(OBJC_TAG_7,
+ objc_getClass("TaggedSubclass"));
+ testGenericTaggedPointer(OBJC_TAG_7,
+ "TaggedSubclass");
- _objc_insert_tagged_isa(3, objc_getClass("TaggedNSObjectSubclass"));
- testGenericTaggedPointer(3, "TaggedNSObjectSubclass");
+ _objc_registerTaggedPointerClass(OBJC_TAG_NSManagedObjectID,
+ objc_getClass("TaggedNSObjectSubclass"));
+ testGenericTaggedPointer(OBJC_TAG_NSManagedObjectID,
+ "TaggedNSObjectSubclass");
} POP_POOL;
succeed(__FILE__);
}
-// OBJC2 && __LP64__
+// OBJC_HAVE_TAGGED_POINTERS
#else
-// not (OBJC2 && __LP64__)
+// not OBJC_HAVE_TAGGED_POINTERS
- // Tagged pointers not supported.
+// Tagged pointers not supported.
int main()
{
+#if __OBJC2__
+ testassert(objc_debug_taggedpointer_mask == 0);
+ testassert(!_objc_taggedPointersEnabled());
+#else
+ testassert(!dlsym(RTLD_DEFAULT, "objc_debug_taggedpointer_mask"));
+#endif
+
succeed(__FILE__);
}
#endif
-
-#endif
--- /dev/null
+// TEST_ENV OBJC_DISABLE_TAGGED_POINTERS=YES
+// TEST_CRASHES
+/*
+TEST_RUN_OUTPUT
+objc\[\d+\]: tagged pointers are disabled
+CRASHED: SIG(ILL|TRAP)
+OR
+OK: taggedPointersDisabled.m
+END
+*/
+
+#include "test.h"
+#include <objc/objc-internal.h>
+
+#if !OBJC_HAVE_TAGGED_POINTERS
+
+int main()
+{
+ succeed(__FILE__);
+}
+
+#else
+
+int main()
+{
+ testassert(!_objc_taggedPointersEnabled());
+ _objc_registerTaggedPointerClass((objc_tag_index_t)0, nil);
+ fail("should have crashed in _objc_registerTaggedPointerClass()");
+}
+
+#endif
-// test.h
+// test.h
// Common definitions for trivial test harness
/* General purpose root class */
+OBJC_ROOT_CLASS
@interface TestRoot {
@public
Class isa;
int c;
int d;
int e;
+ int f;
+ int g;
+ int h;
+ int i;
+ int j;
};
static inline BOOL stret_equal(struct stret a, struct stret b)
a.b == b.b &&
a.c == b.c &&
a.d == b.d &&
- a.e == b.e);
+ a.e == b.e &&
+ a.f == b.f &&
+ a.g == b.g &&
+ a.h == b.h &&
+ a.i == b.i &&
+ a.j == b.j);
}
-static struct stret STRET_RESULT __attribute__((used)) = {1, 2, 3, 4, 5};
+static struct stret STRET_RESULT __attribute__((used)) = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
#endif
open(my $in, "< $file") || die "$file";
my $contents = join "", <$in>;
- die if defined $ALL_TESTS{$name};
- $ALL_TESTS{$name} = $ext if ($contents =~ m#^[/*\s]*TEST_#m);
+ if (defined $ALL_TESTS{$name}) {
+ print "${yellow}SKIP: multiple tests named '$name'; skipping file '$file'.${def}\n";
+ } else {
+ $ALL_TESTS{$name} = $ext if ($contents =~ m#^[/*\s]*TEST_#m);
+ }
close($in);
}
closedir($dir);
my @output = @_;
my %T = %{$C{"TEST_$name"}};
+
+ # Quietly strip MallocScribble before saving the "original" output
+ # because it is distracting.
+ filter_malloc(\@output);
+
my @original_output = @output;
# Run result-checking passes, reducing @output each time
$bad = "(output not 'OK: $name')" if ($bad eq "" && (scalar(@output) != 1 || $output[0] !~ /^OK: $name/));
if ($bad ne "") {
- my $red = "\e[41;37m";
- my $def = "\e[0m";
print "${red}FAIL: /// test '$name' \\\\\\$def\n";
colorprint($red, @original_output);
print "${red}FAIL: \\\\\\ test '$name' ///$def\n";
$xit = 0;
}
elsif ($warn ne "") {
- my $yellow = "\e[43;37m";
- my $def = "\e[0m";
print "${yellow}PASS: /// test '$name' \\\\\\$def\n";
colorprint($yellow, @original_output);
print "${yellow}PASS: \\\\\\ test '$name' ///$def\n";
return $bad;
}
+
+
+sub filter_malloc
+{
+ my $outputref = shift;
+ my $errors = 0;
+
+ my @new_output;
+ my $count = 0;
+ for my $line (@$outputref) {
+ # Ignore MallocScribble prologue.
+ # Ignore MallocStackLogging prologue.
+ if ($line =~ /malloc: enabling scribbling to detect mods to free/ ||
+ $line =~ /Deleted objects will be dirtied by the collector/ ||
+ $line =~ /malloc: stack logs being written into/ ||
+ $line =~ /malloc: recording malloc and VM allocation stacks/)
+ {
+ next;
+ }
+
+ # not malloc output
+ push @new_output, $line;
+
+ }
+
+ @$outputref = @new_output;
+}
+
sub filter_guardmalloc
{
my $outputref = shift;
my $result = $compiler_memo{$key};
return $result if defined $result;
- if (-e $cc) {
- $result = $cc;
- } elsif (-e "$sdk_path/$cc") {
- $result = "$sdk_path/$cc";
- } elsif ($sdk eq "system" && -e "/usr/bin/$cc") {
- $result = "/usr/bin/$cc";
- } elsif ($sdk eq "system") {
+ if ($sdk eq "system") {
$result = `xcrun -find $cc 2>/dev/null`;
} else {
$result = `xcrun -sdk $sdk -find $cc 2>/dev/null`;
# Look up test library (possible in root or SDK_PATH)
- if (-e (glob "$root/*~dst")[0]) {
- $root = (glob "$root/*~dst")[0];
+ my $rootarg = $root;
+ my $symroot;
+ my @sympaths = ( (glob "$root/*~sym")[0],
+ (glob "$root/BuildRecords/*_install/Symbols")[0],
+ "$root/Symbols" );
+ my @dstpaths = ( (glob "$root/*~dst")[0],
+ (glob "$root/BuildRecords/*_install/Root")[0],
+ "$root/Root" );
+ for(my $i = 0; $i < scalar(@sympaths); $i++) {
+ if (-e $sympaths[$i] && -e $dstpaths[$i]) {
+ $symroot = $sympaths[$i];
+ $root = $dstpaths[$i];
+ last;
+ }
}
if ($root ne "" && -e "$root$C{SDK_PATH}$TESTLIBPATH") {
} elsif (-e "$root/$TESTLIBNAME") {
$C{TESTLIB} = "$root/$TESTLIBNAME";
} else {
- die "No $TESTLIBNAME in root '$root' for sdk '$C{SDK_PATH}'\n";
+ die "No $TESTLIBNAME in root '$rootarg' for sdk '$C{SDK_PATH}'\n"
+ # . join("\n", @dstpaths) . "\n"
+ ;
+ }
+
+ if (-e "$symroot/$TESTLIBNAME.dSYM") {
+ $C{TESTDSYM} = "$symroot/$TESTLIBNAME.dSYM";
}
if ($VERBOSE) {
$cflags .= " '-Wl,-syslibroot,$C{SDK_PATH}'";
}
- if ($C{SDK} =~ /^iphoneos[0-9]/ && $cflags !~ /-miphoneos-version-min/) {
+ if ($C{SDK} =~ /^iphoneos[0-9]/ && $cflags !~ /-mios-version-min/) {
my ($vers) = ($C{SDK} =~ /^iphoneos([0-9]+\.[0-9+])/);
- $cflags .= " -miphoneos-version-min=$vers";
+ $cflags .= " -mios-version-min=$vers";
}
- if ($C{SDK} =~ /^iphonesimulator[0-9]/ && $cflags !~ /-D__IPHONE_OS_VERSION_MIN_REQUIRED/) {
+ if ($C{SDK} =~ /^iphonesimulator[0-9]/ && $cflags !~ /-mios-simulator-version-min/) {
my ($vers) = ($C{SDK} =~ /^iphonesimulator([0-9]+\.[0-9+])/);
- $vers = int($vers * 10000); # 4.2 => 42000
- $cflags .= " -D__IPHONE_OS_VERSION_MIN_REQUIRED=$vers";
+ $cflags .= " -mios-simulator-version-min=$vers";
}
if ($C{SDK} =~ /^iphonesimulator/) {
$objcflags .= " -fobjc-abi-version=2 -fobjc-legacy-dispatch";
if ($root ne "") {
my $library_path = dirname($C{TESTLIB});
$cflags .= " -L$library_path";
- $cflags .= " -isystem '$root/usr/include'";
- $cflags .= " -isystem '$root/usr/local/include'";
+ $cflags .= " -I '$root/usr/include'";
+ $cflags .= " -I '$root/usr/local/include'";
if ($C{SDK_PATH} ne "/") {
- $cflags .= " -isystem '$root$C{SDK_PATH}/usr/include'";
- $cflags .= " -isystem '$root$C{SDK_PATH}/usr/local/include'";
+ $cflags .= " -I '$root$C{SDK_PATH}/usr/include'";
+ $cflags .= " -I '$root$C{SDK_PATH}/usr/local/include'";
}
}
}
# Populate ENV_PREFIX
- $C{ENV} = "LANG=C";
+ $C{ENV} = "LANG=C MallocScribble=1";
$C{ENV} .= " VERBOSE=1" if $VERBOSE;
if ($root ne "") {
my $library_path = dirname($C{TESTLIB});
if ($C{TESTLIB} ne $TESTLIBPATH) {
# hack - send thin library because device may use lib=armv7
# even though app=armv6, and we want to set the lib's arch
- make("lipo -output /tmp/$TESTLIBNAME -thin $C{ARCH} $C{TESTLIB} || cp $C{TESTLIB} /tmp/$TESTLIBNAME");
+ make("xcrun -sdk $C{SDK} lipo -output /tmp/$TESTLIBNAME -thin $C{ARCH} $C{TESTLIB} || cp $C{TESTLIB} /tmp/$TESTLIBNAME");
die "Couldn't thin $C{TESTLIB} to $C{ARCH}\n" if ($?);
make("RSYNC_PASSWORD=alpine rsync -av /tmp/$TESTLIBNAME rsync://root\@localhost:10873/root/var/root/test/");
die "Couldn't rsync $C{TESTLIB} to device\n" if ($?);
+ make("RSYNC_PASSWORD=alpine rsync -av $C{TESTDSYM} rsync://root\@localhost:10873/root/var/root/test/");
}
}
#else
+static id forward_handler(void)
+{
+ return 0;
+}
+
static BOOL hasName(const char * const *names, const char *query)
{
const char *name;
// fixme object_dispose() not aggressive enough?
if (objc_collectingEnabled()) succeed(__FILE__);
+ objc_setForwardHandler((void*)&forward_handler, (void*)&forward_handler);
+
#if defined(__arm__)
int count = 10;
#else
while (count--) {
cycle();
}
- // leak_check(0);
- testwarn("rdar://11369189 can't check leaks because libxpc leaks");
+ leak_check(0);
// 5359412 Make sure dylibs with nothing other than image_info can close
void *dylib = dlopen("unload3.dylib", RTLD_LAZY);
#include "unload.h"
#include "testroot.i"
-
+#import <objc/objc-api.h>
@implementation SmallClass : TestRoot
-(void)unload2_instance_method { }
@implementation BigClass : TestRoot
-+(void) forward:(void *) __unused sel :(void*) __unused args { }
--(void) forward:(void *) __unused sel :(void*) __unused args { }
@end
-
+OBJC_ROOT_CLASS
@interface UnusedClass { id isa; } @end
@implementation UnusedClass @end
#if TARGET_OS_WIN32 || (TARGET_OS_MAC && TARGET_CPU_X86 && !TARGET_IPHONE_SIMULATOR)
// old ABI
-int fake[2] __attribute__((section("__OBJC,__image_info"))) = {0, 0};
+int fake[2] __attribute__((section("__OBJC,__image_info")))
#else
// new ABI
-int fake[2] __attribute__((section("__DATA,__objc_imageinfo"))) = {0, 0};
+int fake[2] __attribute__((section("__DATA,__objc_imageinfo")))
#endif
+ = { 0, TARGET_IPHONE_SIMULATOR ? (1<<5) : 0 };
// silence "no debug symbols in executable" warning
void fn(void) { }
extern int state;
-WEAK_IMPORT
+WEAK_IMPORT OBJC_ROOT_CLASS
@interface MissingRoot {
id isa;
}
}
@end
-
+OBJC_ROOT_CLASS
@interface NotMissingRoot {
id isa;
}
static void *noop_fn(void *self, SEL _cmd __unused) {
return self;
}
-static id __unsafe_unretained retain_fn(id __unsafe_unretained self, SEL _cmd __unused) {
- return _objc_rootRetain(self);
+static void *retain_fn(void *self, SEL _cmd __unused) {
+ void * (*fn)(void *) = (typeof(fn))_objc_rootRetain;
+ return fn(self);
}
-static void release_fn(id __unsafe_unretained self, SEL _cmd __unused) {
- _objc_rootRelease(self);
+static void release_fn(void *self, SEL _cmd __unused) {
+ void (*fn)(void *) = (typeof(fn))_objc_rootRelease;
+ fn(self);
}
-static void autorelease_fn(id __unsafe_unretained self, SEL _cmd __unused) {
- _objc_rootAutorelease(self);
+static void *autorelease_fn(void *self, SEL _cmd __unused) {
+ void * (*fn)(void *) = (typeof(fn))_objc_rootAutorelease;
+ return fn(self);
}
#if !defined(EMPTY)
--- /dev/null
+// TEST_CONFIG MEM=mrc
+
+#include "test.h"
+#include <objc/NSObject.h>
+
+static semaphore_t go1;
+static semaphore_t go2;
+static semaphore_t done;
+
+#define VARCOUNT 100000
+static id obj;
+static id vars[VARCOUNT];
+
+
+void *destroyer(void *arg __unused)
+{
+ while (1) {
+ semaphore_wait(go1);
+ for (int i = 0; i < VARCOUNT; i++) {
+ objc_destroyWeak(&vars[i]);
+ }
+ semaphore_signal(done);
+ }
+}
+
+
+void *deallocator(void *arg __unused)
+{
+ while (1) {
+ semaphore_wait(go2);
+ [obj release];
+ semaphore_signal(done);
+ }
+}
+
+
+void cycle(void)
+{
+ // rdar://12896779 objc_destroyWeak() versus weak clear in dealloc
+
+ // Clean up from previous cycle - objc_destroyWeak() doesn't set var to nil
+ for (int i = 0; i < VARCOUNT; i++) {
+ vars[i] = nil;
+ }
+
+ obj = [NSObject new];
+ for (int i = 0; i < VARCOUNT; i++) {
+ objc_storeWeak(&vars[i], obj);
+ }
+
+ // let destroyer start before deallocator runs
+ semaphore_signal(go1);
+ sched_yield();
+ semaphore_signal(go2);
+
+ semaphore_wait(done);
+ semaphore_wait(done);
+}
+
+
+int main()
+{
+ semaphore_create(mach_task_self(), &go1, 0, 0);
+ semaphore_create(mach_task_self(), &go2, 0, 0);
+ semaphore_create(mach_task_self(), &done, 0, 0);
+
+ pthread_t th[2];
+ pthread_create(&th[1], NULL, deallocator, NULL);
+ pthread_create(&th[1], NULL, destroyer, NULL);
+
+ for (int i = 0; i < 100; i++) {
+ cycle();
+ }
+
+ succeed(__FILE__);
+}
__ZdaPvRKSt9nothrow_t
__ZdlPv
__ZdlPvRKSt9nothrow_t
-__ZNSt3__113unordered_mapImPN23objc_references_support20ObjectAssociationMapENS1_20DisguisedPointerHashENS1_21DisguisedPointerEqualENS1_13ObjcAllocatorINS_4pairIKmS3_EEEEEixERS8_
-__ZNSt3__13mapIPvN23objc_references_support15ObjcAssociationENS2_17ObjectPointerLessENS2_13ObjcAllocatorINS_4pairIKS1_S3_EEEEE16__find_equal_keyERPNS_16__tree_node_baseIS1_EERS7_
-__ZNSt3__13mapIPvN23objc_references_support15ObjcAssociationENS2_17ObjectPointerLessENS2_13ObjcAllocatorINS_4pairIKS1_S3_EEEEEixERS7_
+__ZTISt9bad_alloc
+__ZTISt9exception
+__ZTSSt9bad_alloc
+__ZTSSt9exception