_tls_init
_lock_init
_recursive_mutex_init
-__malloc_internal
-__objc_internal_zone
_exception_init
_map_images
_map_images_nolock
__getObjcImageInfo
-__calloc_internal
__hasObjcContents
__objc_appendHeader
_verify_gc_readiness
__Z9protocolsv
__getObjc2ProtocolList
_NXMapKeyCopyingInsert
-__strdup_internal
__NXMapRehash
__getObjc2ProtocolRefs
__Z13remapProtocolm
__Z11addSubclassP7class_tS0_
__Z17attachMethodListsP7class_tPP13method_list_tiaPa
__Z15fixupMethodListP13method_list_ta
-__memdup_internal
+_memdup
__ZNSt3__113__stable_sortIRN8method_t16SortBySELAddressEN13method_list_t15method_iteratorEEEvT0_S6_T_NS_15iterator_traitsIS6_E1
__Z9addMethodP7class_tP13objc_selectorPFP11objc_objectS3_S1_zEPKca
__Z23getMethodNoSuper_nolockP7class_tP13objc_selector
_layout_bitmap_create
_set_bits
_layout_bitmap_free
-__free_internal
__ZNSt3__116__insertion_sortIRN8method_t16SortBySELAddressEN13method_list_t15method_iteratorEEEvT0_S6_T_
__Z17buildProtocolListP13category_listPK15protocol_list_tPS3_
__Z17buildPropertyListPK15property_list_tP13category_lista
__Z11flushCachesP7class_t
_flush_cache
__class_getCache
-__realloc_internal
_load_images
_load_images_nolock
_prepare_load_methods
+++ /dev/null
-/*
- * Copyright (c) 2007-2009 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <sys/stat.h>
-#include <mach-o/fat.h>
-#include <mach-o/arch.h>
-#include <mach-o/loader.h>
-
-// from "objc-private.h"
-// masks for objc_image_info.flags
-#define OBJC_IMAGE_IS_REPLACEMENT (1<<0)
-#define OBJC_IMAGE_SUPPORTS_GC (1<<1)
-#define OBJC_IMAGE_REQUIRES_GC (1<<2)
-#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3)
-
-bool debug;
-bool verbose;
-bool quiet;
-bool rrOnly;
-bool patch = true;
-bool unpatch = false;
-
-struct gcinfo {
- bool hasObjC;
- bool hasInfo;
- uint32_t flags;
- char *arch;
-} GCInfo[4];
-
-void dumpinfo(char *filename);
-
-int Errors = 0;
-char *FileBase;
-size_t FileSize;
-const char *FileName;
-
-int main(int argc, char *argv[]) {
- //NSAutoreleasePool *pool = [NSAutoreleasePool new];
- int i;
- //dumpinfo("/System/Library/Frameworks/AppKit.framework/AppKit");
- if (argc == 1) {
- printf("Usage: markgc [-v] [-r] [--] library_or_executable_image [image2 ...]\n");
- printf(" changes Garbage Collection readiness of named images, ignoring those without ObjC segments\n");
- printf(" -p - patch RR binary to (apparently) support GC (default)\n");
- printf(" -u - unpatch GC binary to RR only\n");
- printf("\nAuthor: blaine@apple.com\n");
- exit(0);
- }
- for (i = 1; i < argc; ++i) {
- if (!strcmp(argv[i], "-v")) {
- verbose = true;
- continue;
- }
- if (!strcmp(argv[i], "-d")) {
- debug = true;
- continue;
- }
- if (!strcmp(argv[i], "-q")) {
- quiet = true;
- continue;
- }
- if (!strcmp(argv[i], "-p")) {
- patch = true;
- continue;
- }
- if (!strcmp(argv[i], "-u")) {
- unpatch = true;
- patch = false;
- continue;
- }
- dumpinfo(argv[i]);
- }
- return Errors;
-}
-
-struct imageInfo {
- uint32_t version;
- uint32_t flags;
-};
-
-void patchFile(uint32_t value, size_t offset) {
- int fd = open(FileName, 1);
- off_t lresult = lseek(fd, offset, SEEK_SET);
- if (lresult == -1) {
- printf("couldn't seek to 0x%lx position on fd %d\n", offset, fd);
- ++Errors;
- return;
- }
- size_t wresult = write(fd, &value, 4);
- if (wresult != 4) {
- ++Errors;
- printf("didn't write new value\n");
- }
- else {
- printf("patched %s at offset 0x%lx\n", FileName, offset);
- }
- close(fd);
-}
-
-uint32_t iiflags(struct imageInfo *ii, size_t size, bool needsFlip) {
- if (needsFlip) {
- ii->flags = OSSwapInt32(ii->flags);
- }
- if (debug) printf("flags->%x, nitems %lu\n", ii->flags, size/sizeof(struct imageInfo));
- uint32_t support_mask = OBJC_IMAGE_SUPPORTS_GC;
- uint32_t flags = ii->flags;
- if (patch && (flags & support_mask) != support_mask) {
- //printf("will patch %s at offset %p\n", FileName, (char*)(&ii->flags) - FileBase);
- uint32_t newvalue = flags | support_mask;
- if (needsFlip) newvalue = OSSwapInt32(newvalue);
- patchFile(newvalue, (char*)(&ii->flags) - FileBase);
- }
- if (unpatch && (flags & support_mask) == support_mask) {
- uint32_t newvalue = flags & ~support_mask;
- if (needsFlip) newvalue = OSSwapInt32(newvalue);
- patchFile(newvalue, (char*)(&ii->flags) - FileBase);
- }
- for(unsigned niis = 1; niis < size/sizeof(struct imageInfo); ++niis) {
- if (needsFlip) ii[niis].flags = OSSwapInt32(ii[niis].flags);
- if (ii[niis].flags != flags) {
- // uh, oh.
- printf("XXX ii[%d].flags %x != ii[0].flags %x\n", niis, ii[niis].flags, flags);
- ++Errors;
- }
- }
- return flags;
-}
-
-void printflags(uint32_t flags) {
- if (flags & 0x1) printf(" F&C");
- if (flags & 0x2) printf(" GC");
- if (flags & 0x4) printf(" GC-only");
- else printf(" RR");
-}
-
-/*
-void doimageinfo(struct imageInfo *ii, uint32_t size, bool needsFlip) {
- uint32_t flags = iiflags(ii, size, needsFlip);
- printflags(flags);
-}
-*/
-
-
-void dosect32(void *start, struct section *sect, bool needsFlip, struct gcinfo *gcip) {
- if (debug) printf("section %s from segment %s\n", sect->sectname, sect->segname);
- if (strcmp(sect->segname, "__OBJC")) return;
- gcip->hasObjC = true;
- if (strcmp(sect->sectname, "__image_info")) return;
- gcip->hasInfo = true;
- if (needsFlip) {
- sect->offset = OSSwapInt32(sect->offset);
- sect->size = OSSwapInt32(sect->size);
- }
- // these guys aren't inline - they point elsewhere
- gcip->flags = iiflags(start + sect->offset, sect->size, needsFlip);
-}
-
-void dosect64(void *start, struct section_64 *sect, bool needsFlip, struct gcinfo *gcip) {
- if (debug) printf("section %s from segment %s\n", sect->sectname, sect->segname);
- if (strcmp(sect->segname, "__OBJC") && strcmp(sect->segname, "__DATA")) return;
- if (strcmp(sect->sectname, "__image_info") && strncmp(sect->sectname, "__objc_imageinfo", 16)) return;
- gcip->hasObjC = true;
- gcip->hasInfo = true;
- if (needsFlip) {
- sect->offset = OSSwapInt32(sect->offset);
- sect->size = OSSwapInt64(sect->size);
- }
- // these guys aren't inline - they point elsewhere
- gcip->flags = iiflags(start + sect->offset, (size_t)sect->size, needsFlip);
-}
-
-void doseg32(void *start, struct segment_command *seg, bool needsFlip, struct gcinfo *gcip) {
- // lets do sections
- if (needsFlip) {
- seg->fileoff = OSSwapInt32(seg->fileoff);
- seg->nsects = OSSwapInt32(seg->nsects);
- }
- if (debug) printf("segment name: %s, nsects %d\n", seg->segname, seg->nsects);
- if (seg->segname[0]) {
- if (strcmp("__OBJC", seg->segname)) return;
- }
- struct section *sect = (struct section *)(seg + 1);
- for (uint32_t nsects = 0; nsects < seg->nsects; ++nsects) {
- // sections directly follow
-
- dosect32(start, sect + nsects, needsFlip, gcip);
- }
-}
-void doseg64(void *start, struct segment_command_64 *seg, bool needsFlip, struct gcinfo *gcip) {
- if (debug) printf("segment name: %s\n", seg->segname);
- if (seg->segname[0] && strcmp("__OBJC", seg->segname) && strcmp("__DATA", seg->segname)) return;
- gcip->hasObjC = true;
- // lets do sections
- if (needsFlip) {
- seg->fileoff = OSSwapInt64(seg->fileoff);
- seg->nsects = OSSwapInt32(seg->nsects);
- }
- struct section_64 *sect = (struct section_64 *)(seg + 1);
- for (uint32_t nsects = 0; nsects < seg->nsects; ++nsects) {
- // sections directly follow
-
- dosect64(start, sect + nsects, needsFlip, gcip);
- }
-}
-
-#if 0
-/*
- * A variable length string in a load command is represented by an lc_str
- * union. The strings are stored just after the load command structure and
- * the offset is from the start of the load command structure. The size
- * of the string is reflected in the cmdsize field of the load command.
- * Once again any padded bytes to bring the cmdsize field to a multiple
- * of 4 bytes must be zero.
- */
-union lc_str {
- uint32_t offset; /* offset to the string */
-#ifndef __LP64__
- char *ptr; /* pointer to the string */
-#endif
-};
-
-struct dylib {
- union lc_str name; /* library's path name */
- uint32_t timestamp; /* library's build time stamp */
- uint32_t current_version; /* library's current version number */
- uint32_t compatibility_version; /* library's compatibility vers number*/
-};
-
- * A dynamically linked shared library (filetype == MH_DYLIB in the mach header)
- * contains a dylib_command (cmd == LC_ID_DYLIB) to identify the library.
- * An object that uses a dynamically linked shared library also contains a
- * dylib_command (cmd == LC_LOAD_DYLIB, LC_LOAD_WEAK_DYLIB, or
- * LC_REEXPORT_DYLIB) for each library it uses.
-
-struct dylib_command {
- uint32_t cmd; /* LC_ID_DYLIB, LC_LOAD_{,WEAK_}DYLIB,
- LC_REEXPORT_DYLIB */
- uint32_t cmdsize; /* includes pathname string */
- struct dylib dylib; /* the library identification */
-};
-#endif
-
-void dodylib(void *start, struct dylib_command *dylibCmd, bool needsFlip) {
- if (!verbose) return;
- if (needsFlip) {
- }
- size_t count = dylibCmd->cmdsize - sizeof(struct dylib_command);
- //printf("offset is %d, count is %d\n", dylibCmd->dylib.name.offset, count);
- if (dylibCmd->dylib.name.offset > count) return;
- //printf("-->%.*s<---", count, ((void *)dylibCmd)+dylibCmd->dylib.name.offset);
- if (verbose) printf("load %s\n", ((char *)dylibCmd)+dylibCmd->dylib.name.offset);
-}
-
-struct load_command *doloadcommand(void *start, struct load_command *lc, bool needsFlip, bool is32, struct gcinfo *gcip) {
- if (needsFlip) {
- lc->cmd = OSSwapInt32(lc->cmd);
- lc->cmdsize = OSSwapInt32(lc->cmdsize);
- }
-
- switch(lc->cmd) {
- case LC_SEGMENT_64:
- if (debug) printf("...segment64\n");
- if (is32) printf("XXX we have a 64-bit segment in a 32-bit mach-o\n");
- doseg64(start, (struct segment_command_64 *)lc, needsFlip, gcip);
- break;
- case LC_SEGMENT:
- if (debug) printf("...segment32\n");
- doseg32(start, (struct segment_command *)lc, needsFlip, gcip);
- break;
- case LC_SYMTAB: if (debug) printf("...dynamic symtab\n"); break;
- case LC_DYSYMTAB: if (debug) printf("...symtab\n"); break;
- case LC_LOAD_DYLIB:
- dodylib(start, (struct dylib_command *)lc, needsFlip);
- break;
- case LC_SUB_UMBRELLA: if (debug) printf("...load subumbrella\n"); break;
- default: if (debug) printf("cmd is %x\n", lc->cmd); break;
- }
-
- return (struct load_command *)((void *)lc + lc->cmdsize);
-}
-
-void doofile(void *start, size_t size, struct gcinfo *gcip) {
- struct mach_header *mh = (struct mach_header *)start;
- bool isFlipped = false;
- if (mh->magic == MH_CIGAM || mh->magic == MH_CIGAM_64) {
- if (debug) printf("(flipping)\n");
- mh->magic = OSSwapInt32(mh->magic);
- mh->cputype = OSSwapInt32(mh->cputype);
- mh->cpusubtype = OSSwapInt32(mh->cpusubtype);
- mh->filetype = OSSwapInt32(mh->filetype);
- mh->ncmds = OSSwapInt32(mh->ncmds);
- mh->sizeofcmds = OSSwapInt32(mh->sizeofcmds);
- mh->flags = OSSwapInt32(mh->flags);
- isFlipped = true;
- }
- if (rrOnly && mh->filetype != MH_DYLIB) return; // ignore executables
- NXArchInfo *info = (NXArchInfo *)NXGetArchInfoFromCpuType(mh->cputype, mh->cpusubtype);
- //printf("%s:", info->description);
- gcip->arch = (char *)info->description;
- //if (debug) printf("...description is %s\n", info->description);
- bool is32 = !(mh->cputype & CPU_ARCH_ABI64);
- if (debug) printf("is 32? %d\n", is32);
- if (debug) printf("filetype -> %d\n", mh->filetype);
- if (debug) printf("ncmds -> %d\n", mh->ncmds);
- struct load_command *lc = (is32 ? (struct load_command *)(mh + 1) : (struct load_command *)((struct mach_header_64 *)start + 1));
- unsigned ncmds;
- for (ncmds = 0; ncmds < mh->ncmds; ++ncmds) {
- lc = doloadcommand(start, lc, isFlipped, is32, gcip);
- }
- //printf("\n");
-}
-
-void initGCInfo() {
- bzero((void *)GCInfo, sizeof(GCInfo));
-}
-
-void printGCInfo(char *filename) {
- if (!GCInfo[0].hasObjC) return; // don't bother
- // verify that flags are all the same
- uint32_t flags = GCInfo[0].flags;
- bool allSame = true;
- for (int i = 1; i < 4 && GCInfo[i].arch; ++i) {
- if (flags != GCInfo[i].flags) {
- allSame = false;
- }
- }
- if (rrOnly) {
- if (allSame && (flags & 0x2))
- return;
- printf("*** not all GC in %s:\n", filename);
- }
- if (allSame && !verbose) {
- printf("%s:", filename);
- printflags(flags);
- printf("\n");
- }
- else {
- printf("%s:\n", filename);
- for (int i = 0; i < 4 && GCInfo[i].arch; ++i) {
- printf("%s:", GCInfo[i].arch);
- printflags(GCInfo[i].flags);
- printf("\n");
- }
- printf("\n");
- }
-}
-
-void dofat(void *start) {
- struct fat_header *fh = start;
- bool needsFlip = false;
- if (fh->magic == FAT_CIGAM) {
- fh->nfat_arch = OSSwapInt32(fh->nfat_arch);
- needsFlip = true;
- }
- if (debug) printf("%d architectures\n", fh->nfat_arch);
- unsigned narchs;
- struct fat_arch *arch_ptr = (struct fat_arch *)(fh + 1);
- for (narchs = 0; narchs < fh->nfat_arch; ++narchs) {
- if (debug) printf("doing arch %d\n", narchs);
- if (needsFlip) {
- arch_ptr->offset = OSSwapInt32(arch_ptr->offset);
- arch_ptr->size = OSSwapInt32(arch_ptr->size);
- }
- doofile(start+arch_ptr->offset, arch_ptr->size, &GCInfo[narchs]);
- arch_ptr++;
- }
-}
-
-bool openFile(const char *filename) {
- FileName = filename;
- // get size
- struct stat statb;
- int fd = open(filename, 0);
- if (fd < 0) {
- printf("couldn't open %s for reading\n", filename);
- return false;
- }
- int osresult = fstat(fd, &statb);
- if (osresult != 0) {
- printf("couldn't get size of %s\n", filename);
- close(fd);
- return false;
- }
- if ((sizeof(size_t) == 4) && ((size_t)statb.st_size > SIZE_T_MAX)) {
- printf("couldn't malloc %llu bytes\n", statb.st_size);
- close(fd);
- return false;
- }
- FileSize = (size_t)statb.st_size;
- FileBase = malloc(FileSize);
- if (!FileBase) {
- printf("couldn't malloc %lu bytes\n", FileSize);
- close(fd);
- return false;
- }
- ssize_t readsize = read(fd, FileBase, FileSize);
- if ((readsize == -1) || ((size_t)readsize != FileSize)) {
- printf("read %ld bytes, wanted %ld\n", (size_t)readsize, FileSize);
- close(fd);
- return false;
- }
- close(fd);
- return true;
-}
-
-void closeFile() {
- free(FileBase);
-}
-
-void dumpinfo(char *filename) {
- initGCInfo();
- if (!openFile(filename)) exit(1);
- struct fat_header *fh = (struct fat_header *)FileBase;
- if (fh->magic == FAT_MAGIC || fh->magic == FAT_CIGAM) {
- dofat((void *)FileBase);
- //printGCInfo(filename);
- }
- else if (fh->magic == MH_MAGIC || fh->magic == MH_CIGAM || fh->magic == MH_MAGIC_64 || fh->magic == MH_CIGAM_64) {
- doofile((void *)FileBase, FileSize, &GCInfo[0]);
- //printGCInfo(filename);
- }
- else if (!quiet) {
- printf("don't understand %s!\n", filename);
- }
- closeFile();
- }
-
--- /dev/null
+/*
+ * Copyright (c) 2007-2009 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/errno.h>
+#include <mach-o/fat.h>
+#include <mach-o/arch.h>
+#include <mach-o/loader.h>
+
+// from "objc-private.h"
+// masks for objc_image_info.flags
+#define OBJC_IMAGE_SUPPORTS_GC (1<<1)
+
+// Some OS X SDKs don't define these.
+#ifndef CPU_TYPE_ARM
+#define CPU_TYPE_ARM ((cpu_type_t) 12)
+#endif
+#ifndef CPU_ARCH_ABI64
+#define CPU_ARCH_ABI64 0x01000000 /* 64 bit ABI */
+#endif
+#ifndef CPU_TYPE_ARM64
+#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+#endif
+
+// File abstraction taken from ld64/FileAbstraction.hpp
+// and ld64/MachOFileAbstraction.hpp.
+
+#ifdef __OPTIMIZE__
+#define INLINE __attribute__((always_inline))
+#else
+#define INLINE
+#endif
+
+//
+// This abstraction layer is for use with file formats that have 64-bit/32-bit and Big-Endian/Little-Endian variants
+//
+// For example: to make a utility that handles 32-bit little enidan files use: Pointer32<LittleEndian>
+//
+//
+// get16() read a 16-bit number from an E endian struct
+// set16() write a 16-bit number to an E endian struct
+// get32() read a 32-bit number from an E endian struct
+// set32() write a 32-bit number to an E endian struct
+// get64() read a 64-bit number from an E endian struct
+// set64() write a 64-bit number to an E endian struct
+//
+// getBits() read a bit field from an E endian struct (bitCount=number of bits in field, firstBit=bit index of field)
+// setBits() write a bit field to an E endian struct (bitCount=number of bits in field, firstBit=bit index of field)
+//
+// getBitsRaw() read a bit field from a struct with native endianness
+// setBitsRaw() write a bit field from a struct with native endianness
+//
+
+class BigEndian
+{
+public:
+ static uint16_t get16(const uint16_t& from) INLINE { return OSReadBigInt16(&from, 0); }
+ static void set16(uint16_t& into, uint16_t value) INLINE { OSWriteBigInt16(&into, 0, value); }
+
+ static uint32_t get32(const uint32_t& from) INLINE { return OSReadBigInt32(&from, 0); }
+ static void set32(uint32_t& into, uint32_t value) INLINE { OSWriteBigInt32(&into, 0, value); }
+
+ static uint64_t get64(const uint64_t& from) INLINE { return OSReadBigInt64(&from, 0); }
+ static void set64(uint64_t& into, uint64_t value) INLINE { OSWriteBigInt64(&into, 0, value); }
+
+ static uint32_t getBits(const uint32_t& from,
+ uint8_t firstBit, uint8_t bitCount) INLINE { return getBitsRaw(get32(from), firstBit, bitCount); }
+ static void setBits(uint32_t& into, uint32_t value,
+ uint8_t firstBit, uint8_t bitCount) INLINE { uint32_t temp = get32(into); setBitsRaw(temp, value, firstBit, bitCount); set32(into, temp); }
+
+ static uint32_t getBitsRaw(const uint32_t& from,
+ uint8_t firstBit, uint8_t bitCount) INLINE { return ((from >> (32-firstBit-bitCount)) & ((1<<bitCount)-1)); }
+ static void setBitsRaw(uint32_t& into, uint32_t value,
+ uint8_t firstBit, uint8_t bitCount) INLINE { uint32_t temp = into;
+ const uint32_t mask = ((1<<bitCount)-1);
+ temp &= ~(mask << (32-firstBit-bitCount));
+ temp |= ((value & mask) << (32-firstBit-bitCount));
+ into = temp; }
+ enum { little_endian = 0 };
+};
+
+
+class LittleEndian
+{
+public:
+ static uint16_t get16(const uint16_t& from) INLINE { return OSReadLittleInt16(&from, 0); }
+ static void set16(uint16_t& into, uint16_t value) INLINE { OSWriteLittleInt16(&into, 0, value); }
+
+ static uint32_t get32(const uint32_t& from) INLINE { return OSReadLittleInt32(&from, 0); }
+ static void set32(uint32_t& into, uint32_t value) INLINE { OSWriteLittleInt32(&into, 0, value); }
+
+ static uint64_t get64(const uint64_t& from) INLINE { return OSReadLittleInt64(&from, 0); }
+ static void set64(uint64_t& into, uint64_t value) INLINE { OSWriteLittleInt64(&into, 0, value); }
+
+ static uint32_t getBits(const uint32_t& from,
+ uint8_t firstBit, uint8_t bitCount) INLINE { return getBitsRaw(get32(from), firstBit, bitCount); }
+ static void setBits(uint32_t& into, uint32_t value,
+ uint8_t firstBit, uint8_t bitCount) INLINE { uint32_t temp = get32(into); setBitsRaw(temp, value, firstBit, bitCount); set32(into, temp); }
+
+ static uint32_t getBitsRaw(const uint32_t& from,
+ uint8_t firstBit, uint8_t bitCount) INLINE { return ((from >> firstBit) & ((1<<bitCount)-1)); }
+ static void setBitsRaw(uint32_t& into, uint32_t value,
+ uint8_t firstBit, uint8_t bitCount) INLINE { uint32_t temp = into;
+ const uint32_t mask = ((1<<bitCount)-1);
+ temp &= ~(mask << firstBit);
+ temp |= ((value & mask) << firstBit);
+ into = temp; }
+ enum { little_endian = 1 };
+};
+
+#if __BIG_ENDIAN__
+typedef BigEndian CurrentEndian;
+typedef LittleEndian OtherEndian;
+#elif __LITTLE_ENDIAN__
+typedef LittleEndian CurrentEndian;
+typedef BigEndian OtherEndian;
+#else
+#error unknown endianness
+#endif
+
+
+template <typename _E>
+class Pointer32
+{
+public:
+ typedef uint32_t uint_t;
+ typedef int32_t sint_t;
+ typedef _E E;
+
+ static uint64_t getP(const uint_t& from) INLINE { return _E::get32(from); }
+ static void setP(uint_t& into, uint64_t value) INLINE { _E::set32(into, value); }
+};
+
+
+template <typename _E>
+class Pointer64
+{
+public:
+ typedef uint64_t uint_t;
+ typedef int64_t sint_t;
+ typedef _E E;
+
+ static uint64_t getP(const uint_t& from) INLINE { return _E::get64(from); }
+ static void setP(uint_t& into, uint64_t value) INLINE { _E::set64(into, value); }
+};
+
+
+//
+// mach-o file header
+//
+template <typename P> struct macho_header_content {};
+template <> struct macho_header_content<Pointer32<BigEndian> > { mach_header fields; };
+template <> struct macho_header_content<Pointer64<BigEndian> > { mach_header_64 fields; };
+template <> struct macho_header_content<Pointer32<LittleEndian> > { mach_header fields; };
+template <> struct macho_header_content<Pointer64<LittleEndian> > { mach_header_64 fields; };
+
+template <typename P>
+class macho_header {
+public:
+ uint32_t magic() const INLINE { return E::get32(header.fields.magic); }
+ void set_magic(uint32_t value) INLINE { E::set32(header.fields.magic, value); }
+
+ uint32_t cputype() const INLINE { return E::get32(header.fields.cputype); }
+ void set_cputype(uint32_t value) INLINE { E::set32((uint32_t&)header.fields.cputype, value); }
+
+ uint32_t cpusubtype() const INLINE { return E::get32(header.fields.cpusubtype); }
+ void set_cpusubtype(uint32_t value) INLINE { E::set32((uint32_t&)header.fields.cpusubtype, value); }
+
+ uint32_t filetype() const INLINE { return E::get32(header.fields.filetype); }
+ void set_filetype(uint32_t value) INLINE { E::set32(header.fields.filetype, value); }
+
+ uint32_t ncmds() const INLINE { return E::get32(header.fields.ncmds); }
+ void set_ncmds(uint32_t value) INLINE { E::set32(header.fields.ncmds, value); }
+
+ uint32_t sizeofcmds() const INLINE { return E::get32(header.fields.sizeofcmds); }
+ void set_sizeofcmds(uint32_t value) INLINE { E::set32(header.fields.sizeofcmds, value); }
+
+ uint32_t flags() const INLINE { return E::get32(header.fields.flags); }
+ void set_flags(uint32_t value) INLINE { E::set32(header.fields.flags, value); }
+
+ uint32_t reserved() const INLINE { return E::get32(header.fields.reserved); }
+ void set_reserved(uint32_t value) INLINE { E::set32(header.fields.reserved, value); }
+
+ typedef typename P::E E;
+private:
+ macho_header_content<P> header;
+};
+
+
+//
+// mach-o load command
+//
+template <typename P>
+class macho_load_command {
+public:
+ uint32_t cmd() const INLINE { return E::get32(command.cmd); }
+ void set_cmd(uint32_t value) INLINE { E::set32(command.cmd, value); }
+
+ uint32_t cmdsize() const INLINE { return E::get32(command.cmdsize); }
+ void set_cmdsize(uint32_t value) INLINE { E::set32(command.cmdsize, value); }
+
+ typedef typename P::E E;
+private:
+ load_command command;
+};
+
+
+
+
+//
+// mach-o segment load command
+//
+template <typename P> struct macho_segment_content {};
+template <> struct macho_segment_content<Pointer32<BigEndian> > { segment_command fields; enum { CMD = LC_SEGMENT }; };
+template <> struct macho_segment_content<Pointer64<BigEndian> > { segment_command_64 fields; enum { CMD = LC_SEGMENT_64 }; };
+template <> struct macho_segment_content<Pointer32<LittleEndian> > { segment_command fields; enum { CMD = LC_SEGMENT }; };
+template <> struct macho_segment_content<Pointer64<LittleEndian> > { segment_command_64 fields; enum { CMD = LC_SEGMENT_64 }; };
+
+template <typename P>
+class macho_segment_command {
+public:
+ uint32_t cmd() const INLINE { return E::get32(segment.fields.cmd); }
+ void set_cmd(uint32_t value) INLINE { E::set32(segment.fields.cmd, value); }
+
+ uint32_t cmdsize() const INLINE { return E::get32(segment.fields.cmdsize); }
+ void set_cmdsize(uint32_t value) INLINE { E::set32(segment.fields.cmdsize, value); }
+
+ const char* segname() const INLINE { return segment.fields.segname; }
+ void set_segname(const char* value) INLINE { strncpy(segment.fields.segname, value, 16); }
+
+ uint64_t vmaddr() const INLINE { return P::getP(segment.fields.vmaddr); }
+ void set_vmaddr(uint64_t value) INLINE { P::setP(segment.fields.vmaddr, value); }
+
+ uint64_t vmsize() const INLINE { return P::getP(segment.fields.vmsize); }
+ void set_vmsize(uint64_t value) INLINE { P::setP(segment.fields.vmsize, value); }
+
+ uint64_t fileoff() const INLINE { return P::getP(segment.fields.fileoff); }
+ void set_fileoff(uint64_t value) INLINE { P::setP(segment.fields.fileoff, value); }
+
+ uint64_t filesize() const INLINE { return P::getP(segment.fields.filesize); }
+ void set_filesize(uint64_t value) INLINE { P::setP(segment.fields.filesize, value); }
+
+ uint32_t maxprot() const INLINE { return E::get32(segment.fields.maxprot); }
+ void set_maxprot(uint32_t value) INLINE { E::set32((uint32_t&)segment.fields.maxprot, value); }
+
+ uint32_t initprot() const INLINE { return E::get32(segment.fields.initprot); }
+ void set_initprot(uint32_t value) INLINE { E::set32((uint32_t&)segment.fields.initprot, value); }
+
+ uint32_t nsects() const INLINE { return E::get32(segment.fields.nsects); }
+ void set_nsects(uint32_t value) INLINE { E::set32(segment.fields.nsects, value); }
+
+ uint32_t flags() const INLINE { return E::get32(segment.fields.flags); }
+ void set_flags(uint32_t value) INLINE { E::set32(segment.fields.flags, value); }
+
+ enum {
+ CMD = macho_segment_content<P>::CMD
+ };
+
+ typedef typename P::E E;
+private:
+ macho_segment_content<P> segment;
+};
+
+
+//
+// mach-o section
+//
+template <typename P> struct macho_section_content {};
+template <> struct macho_section_content<Pointer32<BigEndian> > { section fields; };
+template <> struct macho_section_content<Pointer64<BigEndian> > { section_64 fields; };
+template <> struct macho_section_content<Pointer32<LittleEndian> > { section fields; };
+template <> struct macho_section_content<Pointer64<LittleEndian> > { section_64 fields; };
+
+template <typename P>
+class macho_section {
+public:
+ const char* sectname() const INLINE { return section.fields.sectname; }
+ void set_sectname(const char* value) INLINE { strncpy(section.fields.sectname, value, 16); }
+
+ const char* segname() const INLINE { return section.fields.segname; }
+ void set_segname(const char* value) INLINE { strncpy(section.fields.segname, value, 16); }
+
+ uint64_t addr() const INLINE { return P::getP(section.fields.addr); }
+ void set_addr(uint64_t value) INLINE { P::setP(section.fields.addr, value); }
+
+ uint64_t size() const INLINE { return P::getP(section.fields.size); }
+ void set_size(uint64_t value) INLINE { P::setP(section.fields.size, value); }
+
+ uint32_t offset() const INLINE { return E::get32(section.fields.offset); }
+ void set_offset(uint32_t value) INLINE { E::set32(section.fields.offset, value); }
+
+ uint32_t align() const INLINE { return E::get32(section.fields.align); }
+ void set_align(uint32_t value) INLINE { E::set32(section.fields.align, value); }
+
+ uint32_t reloff() const INLINE { return E::get32(section.fields.reloff); }
+ void set_reloff(uint32_t value) INLINE { E::set32(section.fields.reloff, value); }
+
+ uint32_t nreloc() const INLINE { return E::get32(section.fields.nreloc); }
+ void set_nreloc(uint32_t value) INLINE { E::set32(section.fields.nreloc, value); }
+
+ uint32_t flags() const INLINE { return E::get32(section.fields.flags); }
+ void set_flags(uint32_t value) INLINE { E::set32(section.fields.flags, value); }
+
+ uint32_t reserved1() const INLINE { return E::get32(section.fields.reserved1); }
+ void set_reserved1(uint32_t value) INLINE { E::set32(section.fields.reserved1, value); }
+
+ uint32_t reserved2() const INLINE { return E::get32(section.fields.reserved2); }
+ void set_reserved2(uint32_t value) INLINE { E::set32(section.fields.reserved2, value); }
+
+ typedef typename P::E E;
+private:
+ macho_section_content<P> section;
+};
+
+
+
+
+static bool debug = true;
+
+bool processFile(const char *filename);
+
+int main(int argc, const char *argv[]) {
+ for (int i = 1; i < argc; ++i) {
+ if (!processFile(argv[i])) return 1;
+ }
+ return 0;
+}
+
+struct imageinfo {
+ uint32_t version;
+ uint32_t flags;
+};
+
+
+// Segment and section names are 16 bytes and may be un-terminated.
+bool segnameEquals(const char *lhs, const char *rhs)
+{
+ return 0 == strncmp(lhs, rhs, 16);
+}
+
+bool segnameStartsWith(const char *segname, const char *prefix)
+{
+ return 0 == strncmp(segname, prefix, strlen(prefix));
+}
+
+bool sectnameEquals(const char *lhs, const char *rhs)
+{
+ return segnameEquals(lhs, rhs);
+}
+
+
+template <typename P>
+void dosect(uint8_t *start, macho_section<P> *sect, bool isOldABI, bool isOSX)
+{
+ if (debug) printf("section %.16s from segment %.16s\n",
+ sect->sectname(), sect->segname());
+
+ if (isOSX) {
+ // Add "supports GC" flag to objc image info
+ if ((segnameStartsWith(sect->segname(), "__DATA") &&
+ sectnameEquals(sect->sectname(), "__objc_imageinfo")) ||
+ (segnameEquals(sect->segname(), "__OBJC") &&
+ sectnameEquals(sect->sectname(), "__image_info")))
+ {
+ imageinfo *ii = (imageinfo*)(start + sect->offset());
+ P::E::set32(ii->flags, P::E::get32(ii->flags) | OBJC_IMAGE_SUPPORTS_GC);
+ if (debug) printf("added GC support flag\n");
+ }
+ }
+
+ if (isOldABI) {
+ // Keep init funcs because libSystem doesn't call _objc_init().
+ } else {
+ // Strip S_MOD_INIT/TERM_FUNC_POINTERS. We don't want dyld to call
+ // our init funcs because it is too late, and we don't want anyone to
+ // call our term funcs ever.
+ if (segnameStartsWith(sect->segname(), "__DATA") &&
+ sectnameEquals(sect->sectname(), "__mod_init_func"))
+ {
+ // section type 0 is S_REGULAR
+ sect->set_flags(sect->flags() & ~SECTION_TYPE);
+ sect->set_sectname("__objc_init_func");
+ if (debug) printf("disabled __mod_init_func section\n");
+ }
+ if (segnameStartsWith(sect->segname(), "__DATA") &&
+ sectnameEquals(sect->sectname(), "__mod_term_func"))
+ {
+ // section type 0 is S_REGULAR
+ sect->set_flags(sect->flags() & ~SECTION_TYPE);
+ sect->set_sectname("__objc_term_func");
+ if (debug) printf("disabled __mod_term_func section\n");
+ }
+ }
+}
+
+template <typename P>
+void doseg(uint8_t *start, macho_segment_command<P> *seg,
+ bool isOldABI, bool isOSX)
+{
+ if (debug) printf("segment name: %.16s, nsects %u\n",
+ seg->segname(), seg->nsects());
+ macho_section<P> *sect = (macho_section<P> *)(seg + 1);
+ for (uint32_t i = 0; i < seg->nsects(); ++i) {
+ dosect(start, §[i], isOldABI, isOSX);
+ }
+}
+
+
+template<typename P>
+bool parse_macho(uint8_t *buffer)
+{
+ macho_header<P>* mh = (macho_header<P>*)buffer;
+ uint8_t *cmds;
+
+ bool isOldABI = false;
+ bool isOSX = false;
+ cmds = (uint8_t *)(mh + 1);
+ for (uint32_t c = 0; c < mh->ncmds(); c++) {
+ macho_load_command<P>* cmd = (macho_load_command<P>*)cmds;
+ cmds += cmd->cmdsize();
+ if (cmd->cmd() == LC_SEGMENT || cmd->cmd() == LC_SEGMENT_64) {
+ macho_segment_command<P>* seg = (macho_segment_command<P>*)cmd;
+ if (segnameEquals(seg->segname(), "__OBJC")) isOldABI = true;
+ }
+ else if (cmd->cmd() == LC_VERSION_MIN_MACOSX) {
+ isOSX = true;
+ }
+ }
+
+ if (debug) printf("ABI=%s, OS=%s\n",
+ isOldABI ? "old" : "new", isOSX ? "osx" : "ios");
+
+ cmds = (uint8_t *)(mh + 1);
+ for (uint32_t c = 0; c < mh->ncmds(); c++) {
+ macho_load_command<P>* cmd = (macho_load_command<P>*)cmds;
+ cmds += cmd->cmdsize();
+ if (cmd->cmd() == LC_SEGMENT || cmd->cmd() == LC_SEGMENT_64) {
+ doseg(buffer, (macho_segment_command<P>*)cmd, isOldABI, isOSX);
+ }
+ }
+
+ return true;
+}
+
+
+bool parse_macho(uint8_t *buffer)
+{
+ uint32_t magic = *(uint32_t *)buffer;
+
+ switch (magic) {
+ case MH_MAGIC_64:
+ return parse_macho<Pointer64<CurrentEndian>>(buffer);
+ case MH_MAGIC:
+ return parse_macho<Pointer32<CurrentEndian>>(buffer);
+ case MH_CIGAM_64:
+ return parse_macho<Pointer64<OtherEndian>>(buffer);
+ case MH_CIGAM:
+ return parse_macho<Pointer32<OtherEndian>>(buffer);
+ default:
+ printf("file is not mach-o (magic %x)\n", magic);
+ return false;
+ }
+}
+
+
+bool parse_fat(uint8_t *buffer, size_t size)
+{
+ uint32_t magic;
+
+ if (size < sizeof(magic)) {
+ printf("file is too small\n");
+ return false;
+ }
+
+ magic = *(uint32_t *)buffer;
+ if (magic != FAT_MAGIC && magic != FAT_CIGAM) {
+ /* Not a fat file */
+ return parse_macho(buffer);
+ } else {
+ struct fat_header *fh;
+ uint32_t fat_magic, fat_nfat_arch;
+ struct fat_arch *archs;
+
+ if (size < sizeof(struct fat_header)) {
+ printf("file is too small\n");
+ return false;
+ }
+
+ fh = (struct fat_header *)buffer;
+ fat_magic = OSSwapBigToHostInt32(fh->magic);
+ fat_nfat_arch = OSSwapBigToHostInt32(fh->nfat_arch);
+
+ if (size < (sizeof(struct fat_header) + fat_nfat_arch * sizeof(struct fat_arch))) {
+ printf("file is too small\n");
+ return false;
+ }
+
+ archs = (struct fat_arch *)(buffer + sizeof(struct fat_header));
+
+ /* Special case hidden CPU_TYPE_ARM64 */
+ if (size >= (sizeof(struct fat_header) + (fat_nfat_arch + 1) * sizeof(struct fat_arch))) {
+ if (fat_nfat_arch > 0
+ && OSSwapBigToHostInt32(archs[fat_nfat_arch].cputype) == CPU_TYPE_ARM64) {
+ fat_nfat_arch++;
+ }
+ }
+ /* End special case hidden CPU_TYPE_ARM64 */
+
+ if (debug) printf("%d fat architectures\n",
+ fat_nfat_arch);
+
+ for (uint32_t i = 0; i < fat_nfat_arch; i++) {
+ uint32_t arch_cputype = OSSwapBigToHostInt32(archs[i].cputype);
+ uint32_t arch_cpusubtype = OSSwapBigToHostInt32(archs[i].cpusubtype);
+ uint32_t arch_offset = OSSwapBigToHostInt32(archs[i].offset);
+ uint32_t arch_size = OSSwapBigToHostInt32(archs[i].size);
+
+ if (debug) printf("cputype %d cpusubtype %d\n",
+ arch_cputype, arch_cpusubtype);
+
+ /* Check that slice data is after all fat headers and archs */
+ if (arch_offset < (sizeof(struct fat_header) + fat_nfat_arch * sizeof(struct fat_arch))) {
+ printf("file is badly formed\n");
+ return false;
+ }
+
+ /* Check that the slice ends before the file does */
+ if (arch_offset > size) {
+ printf("file is badly formed\n");
+ return false;
+ }
+
+ if (arch_size > size) {
+ printf("file is badly formed\n");
+ return false;
+ }
+
+ if (arch_offset > (size - arch_size)) {
+ printf("file is badly formed\n");
+ return false;
+ }
+
+ bool ok = parse_macho(buffer + arch_offset);
+ if (!ok) return false;
+ }
+ return true;
+ }
+}
+
+bool processFile(const char *filename)
+{
+ if (debug) printf("file %s\n", filename);
+ int fd = open(filename, O_RDWR);
+ if (fd < 0) {
+ printf("open %s: %s\n", filename, strerror(errno));
+ return false;
+ }
+
+ struct stat st;
+ if (fstat(fd, &st) < 0) {
+ printf("fstat %s: %s\n", filename, strerror(errno));
+ return false;
+ }
+
+ void *buffer = mmap(NULL, (size_t)st.st_size, PROT_READ|PROT_WRITE,
+ MAP_FILE|MAP_SHARED, fd, 0);
+ if (buffer == MAP_FAILED) {
+ printf("mmap %s: %s\n", filename, strerror(errno));
+ return false;
+ }
+
+ bool result = parse_fat((uint8_t *)buffer, (size_t)st.st_size);
+ munmap(buffer, (size_t)st.st_size);
+ close(fd);
+ return result;
+}
objectVersion = 46;
objects = {
+/* Begin PBXAggregateTarget section */
+ 837F67A81A771F63004D34FA /* objc-simulator */ = {
+ isa = PBXAggregateTarget;
+ buildConfigurationList = 837F67A91A771F63004D34FA /* Build configuration list for PBXAggregateTarget "objc-simulator" */;
+ buildPhases = (
+ );
+ dependencies = (
+ 837F67AD1A771F6E004D34FA /* PBXTargetDependency */,
+ );
+ name = "objc-simulator";
+ productName = objc_simulator;
+ };
+/* End PBXAggregateTarget section */
+
/* Begin PBXBuildFile section */
393CEAC00DC69E3E000B69DE /* objc-references.mm in Sources */ = {isa = PBXBuildFile; fileRef = 393CEABF0DC69E3E000B69DE /* objc-references.mm */; };
393CEAC60DC69E67000B69DE /* objc-references.h in Headers */ = {isa = PBXBuildFile; fileRef = 393CEAC50DC69E67000B69DE /* objc-references.h */; };
399BC72E1224831B007FBDF0 /* objc-externalref.mm in Sources */ = {isa = PBXBuildFile; fileRef = 399BC72D1224831B007FBDF0 /* objc-externalref.mm */; };
39ABD72312F0B61800D1054C /* objc-weak.h in Headers */ = {isa = PBXBuildFile; fileRef = 39ABD71F12F0B61800D1054C /* objc-weak.h */; };
39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */ = {isa = PBXBuildFile; fileRef = 39ABD72012F0B61800D1054C /* objc-weak.mm */; };
- 39ABD72512F0B61800D1054C /* objc-weak.h in Headers */ = {isa = PBXBuildFile; fileRef = 39ABD71F12F0B61800D1054C /* objc-weak.h */; };
- 39ABD72612F0B61800D1054C /* objc-weak.mm in Sources */ = {isa = PBXBuildFile; fileRef = 39ABD72012F0B61800D1054C /* objc-weak.mm */; };
830F2A740D737FB800392440 /* objc-msg-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A690D737FB800392440 /* objc-msg-arm.s */; };
830F2A750D737FB900392440 /* objc-msg-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A6A0D737FB800392440 /* objc-msg-i386.s */; };
830F2A7D0D737FBB00392440 /* objc-msg-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A720D737FB800392440 /* objc-msg-x86_64.s */; };
830F2A940D73876100392440 /* objc-accessors.h in Headers */ = {isa = PBXBuildFile; fileRef = 830F2A920D73876100392440 /* objc-accessors.h */; };
830F2A950D73876100392440 /* objc-accessors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A930D73876100392440 /* objc-accessors.mm */; };
830F2A980D738DC200392440 /* hashtable.h in Headers */ = {isa = PBXBuildFile; fileRef = 830F2A970D738DC200392440 /* hashtable.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 830F2AB10D73962200392440 /* markgc.c in Sources */ = {isa = PBXBuildFile; fileRef = 830F2AA50D7394C200392440 /* markgc.c */; };
83112ED40F00599600A5FBAF /* objc-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 83112ED30F00599600A5FBAF /* objc-internal.h */; settings = {ATTRIBUTES = (Private, ); }; };
831C85D50E10CF850066E64C /* objc-os.h in Headers */ = {isa = PBXBuildFile; fileRef = 831C85D30E10CF850066E64C /* objc-os.h */; };
831C85D60E10CF850066E64C /* objc-os.mm in Sources */ = {isa = PBXBuildFile; fileRef = 831C85D40E10CF850066E64C /* objc-os.mm */; };
834DF8B715993EE1002F2BC9 /* objc-sel-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */; };
834EC0A411614167009B2563 /* objc-abi.h in Headers */ = {isa = PBXBuildFile; fileRef = 834EC0A311614167009B2563 /* objc-abi.h */; settings = {ATTRIBUTES = (Private, ); }; };
83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; };
- 83725F4C14CA5C210014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; };
8379996E13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */; };
8383A3A3122600E9009290B8 /* a1a2-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */; };
8383A3A4122600E9009290B8 /* a2a3-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */; };
- 8383A3AC122600FB009290B8 /* a1a2-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3AD122600FB009290B8 /* a2a3-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3AE122600FB009290B8 /* hashtable2.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485B80D6D687300CEA253 /* hashtable2.mm */; };
- 8383A3AF122600FB009290B8 /* maptable.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485BC0D6D687300CEA253 /* maptable.mm */; };
- 8383A3B0122600FB009290B8 /* objc-accessors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A930D73876100392440 /* objc-accessors.mm */; };
- 8383A3B1122600FB009290B8 /* objc-auto.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.mm */; };
- 8383A3B2122600FB009290B8 /* objc-auto-dump.mm in Sources */ = {isa = PBXBuildFile; fileRef = BC07A0100EF72D9C0014EC61 /* objc-auto-dump.mm */; };
- 8383A3B3122600FB009290B8 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; };
- 8383A3B4122600FB009290B8 /* objc-cache.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CB0D6D68A200CEA253 /* objc-cache.mm */; };
- 8383A3B5122600FB009290B8 /* objc-class-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.mm */; };
- 8383A3B6122600FB009290B8 /* objc-class.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CE0D6D68A200CEA253 /* objc-class.mm */; };
- 8383A3B7122600FB009290B8 /* objc-errors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D00D6D68A200CEA253 /* objc-errors.mm */; };
- 8383A3B8122600FB009290B8 /* objc-exception.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D20D6D68A200CEA253 /* objc-exception.mm */; settings = {COMPILER_FLAGS = "-fexceptions"; }; };
- 8383A3B9122600FB009290B8 /* objc-file.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D30D6D68A200CEA253 /* objc-file.mm */; };
- 8383A3BA122600FB009290B8 /* objc-file-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83BE02E30FCCB23400661494 /* objc-file-old.mm */; };
- 8383A3BB122600FB009290B8 /* objc-initialize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D50D6D68A200CEA253 /* objc-initialize.mm */; };
- 8383A3BC122600FB009290B8 /* objc-layout.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D60D6D68A200CEA253 /* objc-layout.mm */; };
- 8383A3BD122600FB009290B8 /* objc-load.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D80D6D68A200CEA253 /* objc-load.mm */; };
- 8383A3BE122600FB009290B8 /* objc-loadmethod.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485DA0D6D68A200CEA253 /* objc-loadmethod.mm */; };
- 8383A3BF122600FB009290B8 /* objc-lockdebug.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485DB0D6D68A200CEA253 /* objc-lockdebug.mm */; };
- 8383A3C0122600FB009290B8 /* objc-os.mm in Sources */ = {isa = PBXBuildFile; fileRef = 831C85D40E10CF850066E64C /* objc-os.mm */; };
- 8383A3C1122600FB009290B8 /* objc-references.mm in Sources */ = {isa = PBXBuildFile; fileRef = 393CEABF0DC69E3E000B69DE /* objc-references.mm */; };
- 8383A3C3122600FB009290B8 /* objc-runtime-new.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E10D6D68A200CEA253 /* objc-runtime-new.mm */; };
- 8383A3C4122600FB009290B8 /* objc-runtime-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */; };
- 8383A3C5122600FB009290B8 /* objc-runtime.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E40D6D68A200CEA253 /* objc-runtime.mm */; };
- 8383A3C6122600FB009290B8 /* objc-sel-set.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E60D6D68A200CEA253 /* objc-sel-set.mm */; };
- 8383A3C7122600FB009290B8 /* objc-sel-table.s in Sources */ = {isa = PBXBuildFile; fileRef = 83EB007A121C9EC200B92C16 /* objc-sel-table.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3C8122600FB009290B8 /* objc-sel.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E80D6D68A200CEA253 /* objc-sel.mm */; };
- 8383A3C9122600FB009290B8 /* objc-sync.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EA0D6D68A200CEA253 /* objc-sync.mm */; };
- 8383A3CA122600FB009290B8 /* objc-typeencoding.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */; };
- 8383A3CB122600FB009290B8 /* a1a2-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3CC122600FB009290B8 /* a1a2-blocktramps-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9D116AB2820071B552 /* a1a2-blocktramps-x86_64.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3CD122600FB009290B8 /* a2a3-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9E116AB2820071B552 /* a2a3-blocktramps-i386.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3CE122600FB009290B8 /* a2a3-blocktramps-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9F116AB2820071B552 /* a2a3-blocktramps-x86_64.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3D0122600FB009290B8 /* objc-msg-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A690D737FB800392440 /* objc-msg-arm.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3D1122600FB009290B8 /* objc-msg-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A6A0D737FB800392440 /* objc-msg-i386.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3D2122600FB009290B8 /* objc-msg-simulator-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = 83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3D3122600FB009290B8 /* objc-msg-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A720D737FB800392440 /* objc-msg-x86_64.s */; settings = {COMPILER_FLAGS = " -Qunused-arguments"; }; };
- 8383A3D4122600FB009290B8 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; };
- 8383A3DC1226291C009290B8 /* objc-externalref.mm in Sources */ = {isa = PBXBuildFile; fileRef = 399BC72D1224831B007FBDF0 /* objc-externalref.mm */; };
838485BF0D6D687300CEA253 /* hashtable2.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485B70D6D687300CEA253 /* hashtable2.h */; settings = {ATTRIBUTES = (Public, ); }; };
838485C00D6D687300CEA253 /* hashtable2.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485B80D6D687300CEA253 /* hashtable2.mm */; };
838485C30D6D687300CEA253 /* maptable.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BB0D6D687300CEA253 /* maptable.h */; settings = {ATTRIBUTES = (Private, ); }; };
838485C40D6D687300CEA253 /* maptable.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485BC0D6D687300CEA253 /* maptable.mm */; };
838485EF0D6D68A200CEA253 /* objc-api.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C80D6D68A200CEA253 /* objc-api.h */; settings = {ATTRIBUTES = (Public, ); }; };
838485F00D6D68A200CEA253 /* objc-auto.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C90D6D68A200CEA253 /* objc-auto.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 838485F10D6D68A200CEA253 /* objc-auto.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.mm */; };
+ 838485F10D6D68A200CEA253 /* objc-auto.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.mm */; settings = {COMPILER_FLAGS = "-fexceptions"; }; };
838485F20D6D68A200CEA253 /* objc-cache.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CB0D6D68A200CEA253 /* objc-cache.mm */; };
838485F30D6D68A200CEA253 /* objc-class-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.mm */; };
838485F40D6D68A200CEA253 /* objc-class.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CD0D6D68A200CEA253 /* objc-class.h */; settings = {ATTRIBUTES = (Public, ); }; };
83BE02E90FCCB24D00661494 /* objc-file.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E60FCCB24D00661494 /* objc-file.h */; };
83BE02EA0FCCB24D00661494 /* objc-runtime-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */; };
83C9C3391668B50E00F4E544 /* objc-msg-simulator-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */; };
- 83C9C33A1668B56300F4E544 /* objc-msg-simulator-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */; };
83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */; };
- 83D49E5013C7C84F0057F1DD /* objc-msg-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */; };
- 83E50CDB0FF19E8200D74C19 /* hashtable2.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485B70D6D687300CEA253 /* hashtable2.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CDC0FF19E8200D74C19 /* maptable.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BB0D6D687300CEA253 /* maptable.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 83E50CDD0FF19E8200D74C19 /* objc-api.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C80D6D68A200CEA253 /* objc-api.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CDE0FF19E8200D74C19 /* objc-auto.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C90D6D68A200CEA253 /* objc-auto.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CDF0FF19E8200D74C19 /* objc-auto-dump.h in Headers */ = {isa = PBXBuildFile; fileRef = BC07A00B0EF72D360014EC61 /* objc-auto-dump.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 83E50CE00FF19E8200D74C19 /* objc-class.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CD0D6D68A200CEA253 /* objc-class.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CE10FF19E8200D74C19 /* objc-config.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CF0D6D68A200CEA253 /* objc-config.h */; };
- 83E50CE20FF19E8200D74C19 /* objc-exception.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D10D6D68A200CEA253 /* objc-exception.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CE30FF19E8200D74C19 /* objc-initialize.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D40D6D68A200CEA253 /* objc-initialize.h */; };
- 83E50CE40FF19E8200D74C19 /* objc-load.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D70D6D68A200CEA253 /* objc-load.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CE50FF19E8200D74C19 /* objc-loadmethod.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D90D6D68A200CEA253 /* objc-loadmethod.h */; };
- 83E50CE60FF19E8200D74C19 /* objc-private.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485DC0D6D68A200CEA253 /* objc-private.h */; };
- 83E50CE80FF19E8200D74C19 /* objc-runtime-new.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E00D6D68A200CEA253 /* objc-runtime-new.h */; };
- 83E50CE90FF19E8200D74C19 /* objc-runtime.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E30D6D68A200CEA253 /* objc-runtime.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CEB0FF19E8200D74C19 /* objc-sel-set.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E50D6D68A200CEA253 /* objc-sel-set.h */; };
- 83E50CEC0FF19E8200D74C19 /* objc-sync.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E90D6D68A200CEA253 /* objc-sync.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CED0FF19E8200D74C19 /* objc.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485EC0D6D68A200CEA253 /* objc.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CEE0FF19E8200D74C19 /* Object.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485ED0D6D68A200CEA253 /* Object.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CEF0FF19E8200D74C19 /* Protocol.h in Headers */ = {isa = PBXBuildFile; fileRef = 838486180D6D68A800CEA253 /* Protocol.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CF00FF19E8200D74C19 /* runtime.h in Headers */ = {isa = PBXBuildFile; fileRef = 8384861A0D6D68A800CEA253 /* runtime.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CF10FF19E8200D74C19 /* List.h in Headers */ = {isa = PBXBuildFile; fileRef = 838486240D6D68F000CEA253 /* List.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CF20FF19E8200D74C19 /* message.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BD0D6D687300CEA253 /* message.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CF30FF19E8200D74C19 /* objc-accessors.h in Headers */ = {isa = PBXBuildFile; fileRef = 830F2A920D73876100392440 /* objc-accessors.h */; };
- 83E50CF40FF19E8200D74C19 /* hashtable.h in Headers */ = {isa = PBXBuildFile; fileRef = 830F2A970D738DC200392440 /* hashtable.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83E50CF50FF19E8200D74C19 /* objc-references.h in Headers */ = {isa = PBXBuildFile; fileRef = 393CEAC50DC69E67000B69DE /* objc-references.h */; };
- 83E50CF60FF19E8200D74C19 /* objc-os.h in Headers */ = {isa = PBXBuildFile; fileRef = 831C85D30E10CF850066E64C /* objc-os.h */; };
- 83E50CF70FF19E8200D74C19 /* objc-gdb.h in Headers */ = {isa = PBXBuildFile; fileRef = 834266D70E665A8B002E4DA2 /* objc-gdb.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 83E50CF80FF19E8200D74C19 /* objc-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 83112ED30F00599600A5FBAF /* objc-internal.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 83E50D130FF19E8200D74C19 /* Object.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EE0D6D68A200CEA253 /* Object.mm */; };
- 83E50D140FF19E8200D74C19 /* Protocol.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838486190D6D68A800CEA253 /* Protocol.mm */; };
- 83E50D150FF19E8200D74C19 /* List.m in Sources */ = {isa = PBXBuildFile; fileRef = 838486230D6D68F000CEA253 /* List.m */; };
- 83E57595121E892100295464 /* objc-abi.h in Headers */ = {isa = PBXBuildFile; fileRef = 834EC0A311614167009B2563 /* objc-abi.h */; settings = {ATTRIBUTES = (Private, ); }; };
- 83E57596121E896200295464 /* objc-file-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E50FCCB24D00661494 /* objc-file-old.h */; };
- 83E57597121E8A0A00295464 /* objc-runtime-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */; };
- 83E57598121E8A1600295464 /* objc-file.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E60FCCB24D00661494 /* objc-file.h */; };
83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */ = {isa = PBXBuildFile; fileRef = 83EB007A121C9EC200B92C16 /* objc-sel-table.s */; };
83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52615E843B100E0926F /* NSObjCRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; };
83F4B52915E843B100E0926F /* NSObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52715E843B100E0926F /* NSObject.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83F4B52B15E843C300E0926F /* NSObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52715E843B100E0926F /* NSObject.h */; settings = {ATTRIBUTES = (Public, ); }; };
- 83F4B52C15E843C800E0926F /* NSObjCRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52615E843B100E0926F /* NSObjCRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; };
83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F550DF155E030800E95D3B /* objc-cache-old.mm */; };
87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; };
9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
- 9672F7EF14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; };
BC07A00C0EF72D360014EC61 /* objc-auto-dump.h in Headers */ = {isa = PBXBuildFile; fileRef = BC07A00B0EF72D360014EC61 /* objc-auto-dump.h */; settings = {ATTRIBUTES = (Private, ); }; };
BC07A0110EF72D9C0014EC61 /* objc-auto-dump.mm in Sources */ = {isa = PBXBuildFile; fileRef = BC07A0100EF72D9C0014EC61 /* objc-auto-dump.mm */; };
E8923DA1116AB2820071B552 /* a1a2-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
- 835720F50F8BF8EE00BD4FAD /* PBXContainerItemProxy */ = {
+ 837F67AC1A771F6E004D34FA /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
proxyType = 1;
- remoteGlobalIDString = 830F2AA80D7394D000392440;
- remoteInfo = markgc;
+ remoteGlobalIDString = D2AAC0620554660B00DB518D;
+ remoteInfo = objc;
};
/* End PBXContainerItemProxy section */
830F2A690D737FB800392440 /* objc-msg-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-arm.s"; path = "runtime/Messengers.subproj/objc-msg-arm.s"; sourceTree = "<group>"; };
830F2A6A0D737FB800392440 /* objc-msg-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-i386.s"; path = "runtime/Messengers.subproj/objc-msg-i386.s"; sourceTree = "<group>"; };
830F2A720D737FB800392440 /* objc-msg-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-x86_64.s"; path = "runtime/Messengers.subproj/objc-msg-x86_64.s"; sourceTree = "<group>"; tabWidth = 8; usesTabs = 1; };
- 830F2A920D73876100392440 /* objc-accessors.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-accessors.h"; path = "runtime/Accessors.subproj/objc-accessors.h"; sourceTree = "<group>"; };
- 830F2A930D73876100392440 /* objc-accessors.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-accessors.mm"; path = "runtime/Accessors.subproj/objc-accessors.mm"; sourceTree = "<group>"; };
+ 830F2A920D73876100392440 /* objc-accessors.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-accessors.h"; path = "runtime/objc-accessors.h"; sourceTree = "<group>"; };
+ 830F2A930D73876100392440 /* objc-accessors.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-accessors.mm"; path = "runtime/objc-accessors.mm"; sourceTree = "<group>"; };
830F2A970D738DC200392440 /* hashtable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = hashtable.h; path = runtime/hashtable.h; sourceTree = "<group>"; };
- 830F2AA50D7394C200392440 /* markgc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = markgc.c; sourceTree = "<group>"; };
- 830F2AA90D7394D000392440 /* markgc */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = markgc; sourceTree = BUILT_PRODUCTS_DIR; };
+ 830F2AA50D7394C200392440 /* markgc.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = markgc.cpp; sourceTree = "<group>"; };
83112ED30F00599600A5FBAF /* objc-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-internal.h"; path = "runtime/objc-internal.h"; sourceTree = "<group>"; };
831C85D30E10CF850066E64C /* objc-os.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-os.h"; path = "runtime/objc-os.h"; sourceTree = "<group>"; };
831C85D40E10CF850066E64C /* objc-os.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-os.mm"; path = "runtime/objc-os.mm"; sourceTree = "<group>"; };
83BE02E70FCCB24D00661494 /* objc-runtime-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime-old.h"; path = "runtime/objc-runtime-old.h"; sourceTree = "<group>"; };
83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-simulator-x86_64.s"; path = "runtime/Messengers.subproj/objc-msg-simulator-x86_64.s"; sourceTree = "<group>"; };
83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-arm64.s"; path = "runtime/Messengers.subproj/objc-msg-arm64.s"; sourceTree = "<group>"; };
- 83E50D2A0FF19E8200D74C19 /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
- 83E50D2B0FF19E9E00D74C19 /* IndigoSDK.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = IndigoSDK.xcconfig; path = AppleInternal/XcodeConfig/IndigoSDK.xcconfig; sourceTree = DEVELOPER_DIR; };
83EB007A121C9EC200B92C16 /* objc-sel-table.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-sel-table.s"; path = "runtime/objc-sel-table.s"; sourceTree = "<group>"; };
83F4B52615E843B100E0926F /* NSObjCRuntime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NSObjCRuntime.h; path = runtime/NSObjCRuntime.h; sourceTree = "<group>"; };
83F4B52715E843B100E0926F /* NSObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NSObject.h; path = runtime/NSObject.h; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
- 830F2AA70D7394D000392440 /* Frameworks */ = {
- isa = PBXFrameworksBuildPhase;
- buildActionMask = 2147483647;
- files = (
- );
- runOnlyForDeploymentPostprocessing = 0;
- };
- 83E50D240FF19E8200D74C19 /* Frameworks */ = {
- isa = PBXFrameworksBuildPhase;
- buildActionMask = 2147483647;
- files = (
- );
- runOnlyForDeploymentPostprocessing = 0;
- };
D289988505E68E00004EDB86 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
838485DB0D6D68A200CEA253 /* objc-lockdebug.mm */,
83725F4914CA5BFA0014370E /* objc-opt.mm */,
831C85D40E10CF850066E64C /* objc-os.mm */,
- 831C85D40E10CF850066E64C /* objc-os.mm */,
393CEABF0DC69E3E000B69DE /* objc-references.mm */,
838485E10D6D68A200CEA253 /* objc-runtime-new.mm */,
838485E20D6D68A200CEA253 /* objc-runtime-old.mm */,
isa = PBXGroup;
children = (
D2AAC0630554660B00DB518D /* libobjc.A.dylib */,
- 830F2AA90D7394D000392440 /* markgc */,
- 83E50D2A0FF19E8200D74C19 /* libobjc.A.dylib */,
);
name = Products;
sourceTree = "<group>";
838485B20D6D67F900CEA253 /* Other */ = {
isa = PBXGroup;
children = (
- 830F2AA50D7394C200392440 /* markgc.c */,
+ 830F2AA50D7394C200392440 /* markgc.cpp */,
838485B40D6D683300CEA253 /* APPLE_LICENSE */,
838485B50D6D683300CEA253 /* ReleaseNotes.rtf */,
838485B30D6D682B00CEA253 /* libobjc.order */,
- 83E50D2B0FF19E9E00D74C19 /* IndigoSDK.xcconfig */,
);
name = Other;
sourceTree = "<group>";
/* End PBXGroup section */
/* Begin PBXHeadersBuildPhase section */
- 83E50CDA0FF19E8200D74C19 /* Headers */ = {
- isa = PBXHeadersBuildPhase;
- buildActionMask = 2147483647;
- files = (
- 83E50CF40FF19E8200D74C19 /* hashtable.h in Headers */,
- 83E50CDB0FF19E8200D74C19 /* hashtable2.h in Headers */,
- 83E50CF10FF19E8200D74C19 /* List.h in Headers */,
- 83E50CDC0FF19E8200D74C19 /* maptable.h in Headers */,
- 83E50CF20FF19E8200D74C19 /* message.h in Headers */,
- 83E57595121E892100295464 /* objc-abi.h in Headers */,
- 83E50CF30FF19E8200D74C19 /* objc-accessors.h in Headers */,
- 83E50CDD0FF19E8200D74C19 /* objc-api.h in Headers */,
- 83E50CDE0FF19E8200D74C19 /* objc-auto.h in Headers */,
- 83E50CDF0FF19E8200D74C19 /* objc-auto-dump.h in Headers */,
- 83E50CE00FF19E8200D74C19 /* objc-class.h in Headers */,
- 83E50CE10FF19E8200D74C19 /* objc-config.h in Headers */,
- 83E50CE20FF19E8200D74C19 /* objc-exception.h in Headers */,
- 83E57596121E896200295464 /* objc-file-old.h in Headers */,
- 83E57598121E8A1600295464 /* objc-file.h in Headers */,
- 83E50CF70FF19E8200D74C19 /* objc-gdb.h in Headers */,
- 83E50CE30FF19E8200D74C19 /* objc-initialize.h in Headers */,
- 83E50CF80FF19E8200D74C19 /* objc-internal.h in Headers */,
- 83E50CE40FF19E8200D74C19 /* objc-load.h in Headers */,
- 83E50CE50FF19E8200D74C19 /* objc-loadmethod.h in Headers */,
- 83E50CF60FF19E8200D74C19 /* objc-os.h in Headers */,
- 83E50CE60FF19E8200D74C19 /* objc-private.h in Headers */,
- 83E50CF50FF19E8200D74C19 /* objc-references.h in Headers */,
- 83E50CE80FF19E8200D74C19 /* objc-runtime-new.h in Headers */,
- 83E57597121E8A0A00295464 /* objc-runtime-old.h in Headers */,
- 83E50CE90FF19E8200D74C19 /* objc-runtime.h in Headers */,
- 83E50CEB0FF19E8200D74C19 /* objc-sel-set.h in Headers */,
- 83E50CEC0FF19E8200D74C19 /* objc-sync.h in Headers */,
- 83E50CED0FF19E8200D74C19 /* objc.h in Headers */,
- 83E50CEE0FF19E8200D74C19 /* Object.h in Headers */,
- 83E50CEF0FF19E8200D74C19 /* Protocol.h in Headers */,
- 83E50CF00FF19E8200D74C19 /* runtime.h in Headers */,
- 39ABD72512F0B61800D1054C /* objc-weak.h in Headers */,
- 83F4B52B15E843C300E0926F /* NSObject.h in Headers */,
- 83F4B52C15E843C800E0926F /* NSObjCRuntime.h in Headers */,
- );
- runOnlyForDeploymentPostprocessing = 0;
- };
D2AAC0600554660B00DB518D /* Headers */ = {
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
/* End PBXHeadersBuildPhase section */
/* Begin PBXNativeTarget section */
- 830F2AA80D7394D000392440 /* markgc */ = {
- isa = PBXNativeTarget;
- buildConfigurationList = 830F2AAE0D7394D600392440 /* Build configuration list for PBXNativeTarget "markgc" */;
- buildPhases = (
- 830F2AA60D7394D000392440 /* Sources */,
- 830F2AA70D7394D000392440 /* Frameworks */,
- );
- buildRules = (
- );
- dependencies = (
- );
- name = markgc;
- productName = markgc;
- productReference = 830F2AA90D7394D000392440 /* markgc */;
- productType = "com.apple.product-type.tool";
- };
- 83E50CD70FF19E8200D74C19 /* objc-simulator */ = {
- isa = PBXNativeTarget;
- buildConfigurationList = 83E50D270FF19E8200D74C19 /* Build configuration list for PBXNativeTarget "objc-simulator" */;
- buildPhases = (
- 83E50CDA0FF19E8200D74C19 /* Headers */,
- 83E50CFC0FF19E8200D74C19 /* Sources */,
- 83E50D240FF19E8200D74C19 /* Frameworks */,
- 83E50D260FF19E8200D74C19 /* Run Script (symlink) */,
- 96895502173DB369006D6747 /* Run Script (RC_HIDE_64) */,
- );
- buildRules = (
- );
- dependencies = (
- );
- name = "objc-simulator";
- productName = objc;
- productReference = 83E50D2A0FF19E8200D74C19 /* libobjc.A.dylib */;
- productType = "com.apple.product-type.library.dynamic";
- };
D2AAC0620554660B00DB518D /* objc */ = {
isa = PBXNativeTarget;
buildConfigurationList = 1DEB914A08733D8E0010E9CD /* Build configuration list for PBXNativeTarget "objc" */;
D289988505E68E00004EDB86 /* Frameworks */,
830F2AB60D739AB600392440 /* Run Script (markgc) */,
830F2AFA0D73BC5800392440 /* Run Script (symlink) */,
- 96BF404516F7DC5300DA41F6 /* Run Script (RC_HIDE_64) */,
);
buildRules = (
);
dependencies = (
- 835720F60F8BF8EE00BD4FAD /* PBXTargetDependency */,
);
name = objc;
productName = objc;
attributes = {
BuildIndependentTargetsInParallel = NO;
LastUpgradeCheck = 0440;
+ TargetAttributes = {
+ 837F67A81A771F63004D34FA = {
+ CreatedOnToolsVersion = 6.3;
+ };
+ };
};
buildConfigurationList = 1DEB914E08733D8E0010E9CD /* Build configuration list for PBXProject "objc" */;
compatibilityVersion = "Xcode 3.2";
projectRoot = "";
targets = (
D2AAC0620554660B00DB518D /* objc */,
- 830F2AA80D7394D000392440 /* markgc */,
- 83E50CD70FF19E8200D74C19 /* objc-simulator */,
+ 837F67A81A771F63004D34FA /* objc-simulator */,
);
};
/* End PBXProject section */
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "if [ ${NATIVE_ARCH} = ${NATIVE_ARCH_32_BIT} -o ${NATIVE_ARCH} = ${NATIVE_ARCH_64_BIT} -o ${NATIVE_ARCH} = ${NATIVE_ARCH_ACTUAL} ]; then\n \"${BUILT_PRODUCTS_DIR}/markgc\" -p \"${BUILT_PRODUCTS_DIR}/libobjc.A.dylib\"\nelse\n echo \"Skipping markgc for cross compile.\"\nfi";
+ shellScript = "set -x\n/usr/bin/xcrun -toolchain XcodeDefault -sdk macosx clang++ -Wall -mmacosx-version-min=10.9 -arch x86_64 -std=c++11 \"${SRCROOT}/markgc.cpp\" -o \"${BUILT_PRODUCTS_DIR}/markgc\"\n\"${BUILT_PRODUCTS_DIR}/markgc\" \"${BUILT_PRODUCTS_DIR}/libobjc.A.dylib\"";
};
830F2AFA0D73BC5800392440 /* Run Script (symlink) */ = {
isa = PBXShellScriptBuildPhase;
shellPath = /bin/sh;
shellScript = "cd \"${INSTALL_DIR}\"\n/bin/ln -s libobjc.A.dylib libobjc.dylib\n";
};
- 83E50D260FF19E8200D74C19 /* Run Script (symlink) */ = {
- isa = PBXShellScriptBuildPhase;
- buildActionMask = 8;
- files = (
- );
- inputPaths = (
- );
- name = "Run Script (symlink)";
- outputPaths = (
- );
- runOnlyForDeploymentPostprocessing = 1;
- shellPath = /bin/sh;
- shellScript = "cd \"${INSTALL_DIR}\"\n/bin/ln -s libobjc.A.dylib libobjc.dylib\n";
- };
- 96895502173DB369006D6747 /* Run Script (RC_HIDE_64) */ = {
- isa = PBXShellScriptBuildPhase;
- buildActionMask = 8;
- files = (
- );
- inputPaths = (
- );
- name = "Run Script (RC_HIDE_64)";
- outputPaths = (
- );
- runOnlyForDeploymentPostprocessing = 1;
- shellPath = /bin/sh;
- shellScript = "cd \"${DSTROOT}\"\nif [ -n \"${RC_HIDE_64}\" ]\nthen\n find . -type f -name \"*.h\" | while read\n do\n unifdef -DOBJC_HIDE_64=1 -o \"$REPLY.tmp\" \"$REPLY\"\n sed 's/OBJC_ARM64_UNAVAILABLE//g' < \"$REPLY.tmp\" > \"$REPLY\"\n rm \"$REPLY.tmp\"\n done\nfi";
- };
- 96BF404516F7DC5300DA41F6 /* Run Script (RC_HIDE_64) */ = {
- isa = PBXShellScriptBuildPhase;
- buildActionMask = 8;
- files = (
- );
- inputPaths = (
- );
- name = "Run Script (RC_HIDE_64)";
- outputPaths = (
- );
- runOnlyForDeploymentPostprocessing = 1;
- shellPath = /bin/sh;
- shellScript = "cd \"${DSTROOT}\"\nif [ -n \"${RC_HIDE_64}\" ]\nthen\n find . -type f -name \"*.h\" | while read\n do\n unifdef -DOBJC_HIDE_64=1 -o \"$REPLY.tmp\" \"$REPLY\"\n sed 's/OBJC_ARM64_UNAVAILABLE//g' < \"$REPLY.tmp\" > \"$REPLY\"\n rm \"$REPLY.tmp\"\n done\nfi";
- };
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
- 830F2AA60D7394D000392440 /* Sources */ = {
- isa = PBXSourcesBuildPhase;
- buildActionMask = 2147483647;
- files = (
- 830F2AB10D73962200392440 /* markgc.c in Sources */,
- );
- runOnlyForDeploymentPostprocessing = 0;
- };
- 83E50CFC0FF19E8200D74C19 /* Sources */ = {
- isa = PBXSourcesBuildPhase;
- buildActionMask = 2147483647;
- files = (
- 83E50D130FF19E8200D74C19 /* Object.mm in Sources */,
- 83E50D140FF19E8200D74C19 /* Protocol.mm in Sources */,
- 83E50D150FF19E8200D74C19 /* List.m in Sources */,
- 8383A3AC122600FB009290B8 /* a1a2-blocktramps-arm.s in Sources */,
- 8383A3AD122600FB009290B8 /* a2a3-blocktramps-arm.s in Sources */,
- 8383A3AE122600FB009290B8 /* hashtable2.mm in Sources */,
- 8383A3AF122600FB009290B8 /* maptable.mm in Sources */,
- 8383A3B0122600FB009290B8 /* objc-accessors.mm in Sources */,
- 8383A3B1122600FB009290B8 /* objc-auto.mm in Sources */,
- 8383A3B2122600FB009290B8 /* objc-auto-dump.mm in Sources */,
- 8383A3B3122600FB009290B8 /* objc-block-trampolines.mm in Sources */,
- 8383A3B4122600FB009290B8 /* objc-cache.mm in Sources */,
- 8383A3B5122600FB009290B8 /* objc-class-old.mm in Sources */,
- 8383A3B6122600FB009290B8 /* objc-class.mm in Sources */,
- 8383A3B7122600FB009290B8 /* objc-errors.mm in Sources */,
- 8383A3B8122600FB009290B8 /* objc-exception.mm in Sources */,
- 8383A3B9122600FB009290B8 /* objc-file.mm in Sources */,
- 8383A3BA122600FB009290B8 /* objc-file-old.mm in Sources */,
- 8383A3BB122600FB009290B8 /* objc-initialize.mm in Sources */,
- 8383A3BC122600FB009290B8 /* objc-layout.mm in Sources */,
- 8383A3BD122600FB009290B8 /* objc-load.mm in Sources */,
- 8383A3BE122600FB009290B8 /* objc-loadmethod.mm in Sources */,
- 8383A3BF122600FB009290B8 /* objc-lockdebug.mm in Sources */,
- 8383A3C0122600FB009290B8 /* objc-os.mm in Sources */,
- 8383A3C1122600FB009290B8 /* objc-references.mm in Sources */,
- 8383A3C3122600FB009290B8 /* objc-runtime-new.mm in Sources */,
- 8383A3C4122600FB009290B8 /* objc-runtime-old.mm in Sources */,
- 8383A3C5122600FB009290B8 /* objc-runtime.mm in Sources */,
- 8383A3C6122600FB009290B8 /* objc-sel-set.mm in Sources */,
- 8383A3C7122600FB009290B8 /* objc-sel-table.s in Sources */,
- 8383A3C8122600FB009290B8 /* objc-sel.mm in Sources */,
- 8383A3C9122600FB009290B8 /* objc-sync.mm in Sources */,
- 8383A3CA122600FB009290B8 /* objc-typeencoding.mm in Sources */,
- 8383A3CB122600FB009290B8 /* a1a2-blocktramps-i386.s in Sources */,
- 8383A3CC122600FB009290B8 /* a1a2-blocktramps-x86_64.s in Sources */,
- 8383A3CD122600FB009290B8 /* a2a3-blocktramps-i386.s in Sources */,
- 8383A3CE122600FB009290B8 /* a2a3-blocktramps-x86_64.s in Sources */,
- 8383A3D0122600FB009290B8 /* objc-msg-arm.s in Sources */,
- 8383A3D1122600FB009290B8 /* objc-msg-i386.s in Sources */,
- 8383A3D2122600FB009290B8 /* objc-msg-simulator-i386.s in Sources */,
- 8383A3D3122600FB009290B8 /* objc-msg-x86_64.s in Sources */,
- 8383A3D4122600FB009290B8 /* objc-probes.d in Sources */,
- 8383A3DC1226291C009290B8 /* objc-externalref.mm in Sources */,
- 39ABD72612F0B61800D1054C /* objc-weak.mm in Sources */,
- 83D49E5013C7C84F0057F1DD /* objc-msg-arm64.s in Sources */,
- 9672F7EF14D5F488007CEC96 /* NSObject.mm in Sources */,
- 83725F4C14CA5C210014370E /* objc-opt.mm in Sources */,
- 83C9C33A1668B56300F4E544 /* objc-msg-simulator-x86_64.s in Sources */,
- );
- runOnlyForDeploymentPostprocessing = 0;
- };
D2AAC0610554660B00DB518D /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
- 835720F60F8BF8EE00BD4FAD /* PBXTargetDependency */ = {
+ 837F67AD1A771F6E004D34FA /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
- target = 830F2AA80D7394D000392440 /* markgc */;
- targetProxy = 835720F50F8BF8EE00BD4FAD /* PBXContainerItemProxy */;
+ target = D2AAC0620554660B00DB518D /* objc */;
+ targetProxy = 837F67AC1A771F6E004D34FA /* PBXContainerItemProxy */;
};
/* End PBXTargetDependency section */
DYLIB_CURRENT_VERSION = 228;
EXECUTABLE_PREFIX = lib;
GCC_CW_ASM_SYNTAX = NO;
- GCC_DYNAMIC_NO_PIC = NO;
- GCC_MODEL_TUNING = G5;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_THREADSAFE_STATICS = NO;
GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO;
);
INSTALL_PATH = /usr/lib;
ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order";
+ "ORDER_FILE[sdk=iphonesimulator*]" = "";
OTHER_CFLAGS = (
"-fdollars-in-identifiers",
"$(OTHER_CFLAGS)",
"OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = (
"-lc++abi",
"-Wl,-segalign,0x4000",
- "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000",
+ "-Xlinker",
+ "-sectalign",
+ "-Xlinker",
+ __DATA,
+ "-Xlinker",
+ __objc_data,
+ "-Xlinker",
+ 0x1000,
);
- "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-l_BUILD_objc-simulator_TARGET_INSTEAD";
+ "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-lc++abi";
"OTHER_LDFLAGS[sdk=macosx*]" = (
"-lCrashReporterClient",
"-lauto",
"-lc++abi",
- "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000",
+ "-Xlinker",
+ "-sectalign",
+ "-Xlinker",
+ __DATA,
+ "-Xlinker",
+ __objc_data,
+ "-Xlinker",
+ 0x1000,
);
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = objc.A;
);
INSTALL_PATH = /usr/lib;
ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order";
+ "ORDER_FILE[sdk=iphonesimulator*]" = "";
OTHER_CFLAGS = (
"-fdollars-in-identifiers",
"$(OTHER_CFLAGS)",
"OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = (
"-lc++abi",
"-Wl,-segalign,0x4000",
- "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000",
+ "-Xlinker",
+ "-sectalign",
+ "-Xlinker",
+ __DATA,
+ "-Xlinker",
+ __objc_data,
+ "-Xlinker",
+ 0x1000,
);
- "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-l_BUILD_objc-simulator_TARGET_INSTEAD";
+ "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-lc++abi";
"OTHER_LDFLAGS[sdk=macosx*]" = (
"-lCrashReporterClient",
"-lauto",
"-lc++abi",
- "-Xlinker -sectalign -Xlinker __DATA -Xlinker __objc_data -Xlinker 0x1000",
+ "-Xlinker",
+ "-sectalign",
+ "-Xlinker",
+ __DATA,
+ "-Xlinker",
+ __objc_data,
+ "-Xlinker",
+ 0x1000,
);
PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc;
PRODUCT_NAME = objc.A;
"$(OTHER_CFLAGS)",
"-D_LIBCPP_VISIBLE=\"\"",
);
- STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
WARNING_CFLAGS = (
"-Wall",
"-Wextra",
"$(OTHER_CFLAGS)",
"-D_LIBCPP_VISIBLE=\"\"",
);
- STANDARD_C_PLUS_PLUS_LIBRARY_TYPE = dynamic;
WARNING_CFLAGS = (
"-Wall",
"-Wextra",
};
name = Release;
};
- 830F2AAB0D7394D100392440 /* Debug */ = {
- isa = XCBuildConfiguration;
- buildSettings = {
- ALWAYS_SEARCH_USER_PATHS = NO;
- COPY_PHASE_STRIP = NO;
- GCC_C_LANGUAGE_STANDARD = gnu99;
- GCC_DYNAMIC_NO_PIC = NO;
- GCC_MODEL_TUNING = G5;
- GCC_OPTIMIZATION_LEVEL = 0;
- INSTALL_PATH = /usr/local/bin;
- PRODUCT_NAME = markgc;
- SKIP_INSTALL = YES;
- };
- name = Debug;
- };
- 830F2AAC0D7394D100392440 /* Release */ = {
- isa = XCBuildConfiguration;
- buildSettings = {
- ALWAYS_SEARCH_USER_PATHS = NO;
- COPY_PHASE_STRIP = YES;
- DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
- GCC_C_LANGUAGE_STANDARD = gnu99;
- GCC_DYNAMIC_NO_PIC = NO;
- GCC_MODEL_TUNING = G5;
- INSTALL_PATH = /usr/local/bin;
- PRODUCT_NAME = markgc;
- SKIP_INSTALL = YES;
- };
- name = Release;
- };
- 83E50D280FF19E8200D74C19 /* Debug */ = {
+ 837F67AA1A771F63004D34FA /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 83E50D2B0FF19E9E00D74C19 /* IndigoSDK.xcconfig */;
buildSettings = {
- ARCHS = i386;
- COPY_PHASE_STRIP = NO;
- DYLIB_CURRENT_VERSION = 227;
- EXECUTABLE_PREFIX = lib;
- GCC_CW_ASM_SYNTAX = NO;
- GCC_OPTIMIZATION_LEVEL = 0;
- GCC_THREADSAFE_STATICS = NO;
- GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO;
- HEADER_SEARCH_PATHS = (
- "$(DSTROOT)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/include/**",
- "$(DSTROOT)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/local/include/**",
- "$(CONFIGURATION_BUILD_DIR)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/include/**",
- "$(CONFIGURATION_BUILD_DIR)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/local/include/**",
- );
- INSTALL_PATH = "$(INDIGO_INSTALL_PATH_PREFIX)/usr/lib";
- LD_DYLIB_INSTALL_NAME_mh_dylib = "/usr/lib/$(EXECUTABLE_PATH)";
- OTHER_CFLAGS = (
- "-fobjc-legacy-dispatch",
- "-fobjc-abi-version=2",
- "-fdollars-in-identifiers",
- );
- OTHER_LDFLAGS = "-lc++abi";
- PRIVATE_HEADERS_FOLDER_PATH = "$(INDIGO_INSTALL_PATH_PREFIX)/usr/local/include/objc";
- PRODUCT_NAME = objc.A;
- PUBLIC_HEADERS_FOLDER_PATH = "$(INDIGO_INSTALL_PATH_PREFIX)/usr/include/objc";
- UNEXPORTED_SYMBOLS_FILE = unexported_symbols;
+ PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Debug;
};
- 83E50D290FF19E8200D74C19 /* Release */ = {
+ 837F67AB1A771F63004D34FA /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 83E50D2B0FF19E9E00D74C19 /* IndigoSDK.xcconfig */;
buildSettings = {
- ARCHS = i386;
- DYLIB_CURRENT_VERSION = 227;
- EXECUTABLE_PREFIX = lib;
- GCC_CW_ASM_SYNTAX = NO;
- GCC_THREADSAFE_STATICS = NO;
- GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO;
- HEADER_SEARCH_PATHS = (
- "$(DSTROOT)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/include/**",
- "$(DSTROOT)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/local/include/**",
- "$(CONFIGURATION_BUILD_DIR)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/include/**",
- "$(CONFIGURATION_BUILD_DIR)/$(INDIGO_INSTALL_PATH_PREFIX)/usr/local/include/**",
- );
- INSTALL_PATH = "$(INDIGO_INSTALL_PATH_PREFIX)/usr/lib";
- LD_DYLIB_INSTALL_NAME_mh_dylib = "/usr/lib/$(EXECUTABLE_PATH)";
- OTHER_CFLAGS = (
- "-fobjc-legacy-dispatch",
- "-fobjc-abi-version=2",
- "-fdollars-in-identifiers",
- );
- OTHER_LDFLAGS = "-lc++abi";
- PRIVATE_HEADERS_FOLDER_PATH = "$(INDIGO_INSTALL_PATH_PREFIX)/usr/local/include/objc";
- PRODUCT_NAME = objc.A;
- PUBLIC_HEADERS_FOLDER_PATH = "$(INDIGO_INSTALL_PATH_PREFIX)/usr/include/objc";
- UNEXPORTED_SYMBOLS_FILE = unexported_symbols;
+ PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
};
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
- 830F2AAE0D7394D600392440 /* Build configuration list for PBXNativeTarget "markgc" */ = {
- isa = XCConfigurationList;
- buildConfigurations = (
- 830F2AAB0D7394D100392440 /* Debug */,
- 830F2AAC0D7394D100392440 /* Release */,
- );
- defaultConfigurationIsVisible = 0;
- defaultConfigurationName = Release;
- };
- 83E50D270FF19E8200D74C19 /* Build configuration list for PBXNativeTarget "objc-simulator" */ = {
+ 837F67A91A771F63004D34FA /* Build configuration list for PBXAggregateTarget "objc-simulator" */ = {
isa = XCConfigurationList;
buildConfigurations = (
- 83E50D280FF19E8200D74C19 /* Debug */,
- 83E50D290FF19E8200D74C19 /* Release */,
+ 837F67AA1A771F63004D34FA /* Debug */,
+ 837F67AB1A771F63004D34FA /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
+++ /dev/null
-#!/bin/sh
-# Simple script to run the libclosure tests
-# Note: to build the testing root, the makefile will ask to authenticate with sudo
-# Use the RootsDirectory environment variable to direct the build to somewhere other than /tmp/
-
-RootsDirectory=${RootsDirectory:-/tmp/}
-StartingDir="$PWD"
-ObjcDir="`dirname $0`"
-TestsDir="test/"
-cd "$ObjcDir"
-# <rdar://problem/6456031> ER: option to not require extra privileges (-nosudo or somesuch)
-Buildit="/Network/Servers/xs1/release/bin/buildit -rootsDirectory ${RootsDirectory} -arch i386 -arch x86_64 -project objc4 ."
-echo Sudoing for buildit:
-sudo $Buildit
-XIT=$?
-if [[ $XIT == 0 ]]; then
- cd "$TestsDir"
- #ObjcRootPath="$RootsDirectory/objc4.roots/objc4~dst/usr/lib/libobjc.A.dylib"
- #ObjcRootHeaders="$RootsDirectory/objc4.roots/objc4~dst/usr/include/"
- #make HALT=YES OBJC_LIB="$ObjcRootPath" OTHER_CFLAGS="-isystem $ObjcRootHeaders"
- perl test.pl ARCHS=x86_64 OBJC_ROOT="$RootsDirectory/objc4.roots/"
- XIT=`expr $XIT \| $?`
- perl test.pl ARCHS=i386 OBJC_ROOT="$RootsDirectory/objc4.roots/"
- XIT=`expr $XIT \| $?`
- perl test.pl ARCHS=x86_64 GUARDMALLOC=YES OBJC_ROOT="$RootsDirectory/objc4.roots/"
- XIT=`expr $XIT \| $?`
- perl test.pl ARCHS=i386 GUARDMALLOC=YES OBJC_ROOT="$RootsDirectory/objc4.roots/"
- XIT=`expr $XIT \| $?`
- perl test.pl clean
-fi
-cd "$StartingDir"
-exit $XIT
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2006-2007 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#ifndef _OBJC_ACCESSORS_H_
-#define _OBJC_ACCESSORS_H_
-
-#include <objc/objc.h>
-#include <stddef.h>
-
-__BEGIN_DECLS
-
-#if SUPPORT_GC
-
-extern void objc_setProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
-extern id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
-
-extern void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
-extern id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
-
-#endif
-
-__END_DECLS
-
-#endif
+++ /dev/null
-/*
- * Copyright (c) 2006-2008 Apple Inc. All Rights Reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-#include <string.h>
-#include <stddef.h>
-
-#include <libkern/OSAtomic.h>
-
-#include "objc-private.h"
-#include "objc-auto.h"
-#include "runtime.h"
-#include "objc-accessors.h"
-
-// stub interface declarations to make compiler happy.
-
-@interface __NSCopyable
-- (id)copyWithZone:(void *)zone;
-@end
-
-@interface __NSMutableCopyable
-- (id)mutableCopyWithZone:(void *)zone;
-@end
-
-
-typedef uintptr_t spin_lock_t;
-OBJC_EXTERN void _spin_lock(spin_lock_t *lockp);
-OBJC_EXTERN int _spin_lock_try(spin_lock_t *lockp);
-OBJC_EXTERN void _spin_unlock(spin_lock_t *lockp);
-
-/* need to consider cache line contention - space locks out XXX */
-
-#define GOODPOWER 7
-#define GOODMASK ((1<<GOODPOWER)-1)
-#define GOODHASH(x) (((long)x >> 5) & GOODMASK)
-static spin_lock_t PropertyLocks[1 << GOODPOWER] = { 0 };
-
-#define MUTABLE_COPY 2
-
-id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
- if (offset == 0) {
- return object_getClass(self);
- }
-
- // Retain release world
- id *slot = (id*) ((char*)self + offset);
- if (!atomic) return *slot;
-
- // Atomic retain release world
- spin_lock_t *slotlock = &PropertyLocks[GOODHASH(slot)];
- _spin_lock(slotlock);
- id value = objc_retain(*slot);
- _spin_unlock(slotlock);
-
- // for performance, we (safely) issue the autorelease OUTSIDE of the spinlock.
- return objc_autoreleaseReturnValue(value);
-}
-
-
-static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy) __attribute__((always_inline));
-
-static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy)
-{
- if (offset == 0) {
- object_setClass(self, newValue);
- return;
- }
-
- id oldValue;
- id *slot = (id*) ((char*)self + offset);
-
- if (copy) {
- newValue = [newValue copyWithZone:NULL];
- } else if (mutableCopy) {
- newValue = [newValue mutableCopyWithZone:NULL];
- } else {
- if (*slot == newValue) return;
- newValue = objc_retain(newValue);
- }
-
- if (!atomic) {
- oldValue = *slot;
- *slot = newValue;
- } else {
- spin_lock_t *slotlock = &PropertyLocks[GOODHASH(slot)];
- _spin_lock(slotlock);
- oldValue = *slot;
- *slot = newValue;
- _spin_unlock(slotlock);
- }
-
- objc_release(oldValue);
-}
-
-void objc_setProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy)
-{
- bool copy = (shouldCopy && shouldCopy != MUTABLE_COPY);
- bool mutableCopy = (shouldCopy == MUTABLE_COPY);
- reallySetProperty(self, _cmd, newValue, offset, atomic, copy, mutableCopy);
-}
-
-void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset)
-{
- reallySetProperty(self, _cmd, newValue, offset, true, false, false);
-}
-
-void objc_setProperty_nonatomic(id self, SEL _cmd, id newValue, ptrdiff_t offset)
-{
- reallySetProperty(self, _cmd, newValue, offset, false, false, false);
-}
-
-
-void objc_setProperty_atomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset)
-{
- reallySetProperty(self, _cmd, newValue, offset, true, true, false);
-}
-
-void objc_setProperty_nonatomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset)
-{
- reallySetProperty(self, _cmd, newValue, offset, false, true, false);
-}
-
-
-#if SUPPORT_GC
-
-id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
- return *(id*) ((char*)self + offset);
-}
-
-void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) {
- if (shouldCopy) {
- newValue = (shouldCopy == MUTABLE_COPY ? [newValue mutableCopyWithZone:NULL] : [newValue copyWithZone:NULL]);
- }
- objc_assign_ivar(newValue, self, offset);
-}
-
-// objc_getProperty and objc_setProperty are resolver functions in objc-auto.mm
-
-#else
-
-id
-objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic)
-{
- return objc_getProperty_non_gc(self, _cmd, offset, atomic);
-}
-
-void
-objc_setProperty(id self, SEL _cmd, ptrdiff_t offset, id newValue,
- BOOL atomic, signed char shouldCopy)
-{
- objc_setProperty_non_gc(self, _cmd, offset, newValue, atomic, shouldCopy);
-}
-
-#endif
-
-
-// This entry point was designed wrong. When used as a getter, src needs to be locked so that
-// if simultaneously used for a setter then there would be contention on src.
-// So we need two locks - one of which will be contended.
-void objc_copyStruct(void *dest, const void *src, ptrdiff_t size, BOOL atomic, BOOL hasStrong) {
- static spin_lock_t StructLocks[1 << GOODPOWER] = { 0 };
- spin_lock_t *lockfirst = NULL;
- spin_lock_t *locksecond = NULL;
- if (atomic) {
- lockfirst = &StructLocks[GOODHASH(src)];
- locksecond = &StructLocks[GOODHASH(dest)];
- // order the locks by address so that we don't deadlock
- if (lockfirst > locksecond) {
- lockfirst = locksecond;
- locksecond = &StructLocks[GOODHASH(src)];
- }
- else if (lockfirst == locksecond) {
- // lucky - we only need one lock
- locksecond = NULL;
- }
- _spin_lock(lockfirst);
- if (locksecond) _spin_lock(locksecond);
- }
-#if SUPPORT_GC
- if (UseGC && hasStrong) {
- auto_zone_write_barrier_memmove(gc_zone, dest, src, size);
- } else
-#endif
- {
- memmove(dest, src, size);
- }
- if (atomic) {
- _spin_unlock(lockfirst);
- if (locksecond) _spin_unlock(locksecond);
- }
-}
-
-void objc_copyCppObjectAtomic(void *dest, const void *src, void (*copyHelper) (void *dest, const void *source)) {
- static spin_lock_t CppObjectLocks[1 << GOODPOWER] = { 0 };
- spin_lock_t *lockfirst = &CppObjectLocks[GOODHASH(src)], *locksecond = &CppObjectLocks[GOODHASH(dest)];
- // order the locks by address so that we don't deadlock
- if (lockfirst > locksecond) {
- spin_lock_t *temp = lockfirst;
- lockfirst = locksecond;
- locksecond = temp;
- } else if (lockfirst == locksecond) {
- // lucky - we only need one lock
- locksecond = NULL;
- }
- _spin_lock(lockfirst);
- if (locksecond) _spin_lock(locksecond);
-
- // let C++ code perform the actual copy.
- copyHelper(dest, src);
-
- _spin_unlock(lockfirst);
- if (locksecond) _spin_unlock(locksecond);
-}
# error requires armv7
#endif
+// Set FP=1 on architectures that pass parameters in floating-point registers
+#if __ARM_ARCH_7K__
+# define FP 1
+#else
+# define FP 0
+#endif
+
+#if FP
+
+# if !__ARM_NEON__
+# error sorry
+# endif
+
+# define FP_RETURN_ZERO \
+ vmov.i32 q0, #0 ; \
+ vmov.i32 q1, #0 ; \
+ vmov.i32 q2, #0 ; \
+ vmov.i32 q3, #0
+
+# define FP_SAVE \
+ vpush {q0-q3}
+
+# define FP_RESTORE \
+ vpop {q0-q3}
+
+#else
+
+# define FP_RETURN_ZERO
+# define FP_SAVE
+# define FP_RESTORE
+
+#endif
+
.syntax unified
#define MI_EXTERN(var) \
MI_EXTERN(___objc_error)
+.data
+
// _objc_entryPoints and _objc_exitPoints are used by method dispatch
// caching code to figure out whether any threads are actively
// in the cache for dispatching. The labels surround the asm code
// that do cache lookups. The tables are zero-terminated.
-.data
+
+.align 2
.private_extern _objc_entryPoints
_objc_entryPoints:
.long _cache_getImp
.long _objc_msgSendSuper2_stret
.long 0
-.data
.private_extern _objc_exitPoints
_objc_exitPoints:
.long LGetImpExit
b __objc_msgSend_uncached
LNilReceiver:
- mov r1, #0
+ // r0 is already zero
+ mov r1, #0
+ mov r2, #0
+ mov r3, #0
+ FP_RETURN_ZERO
MESSENGER_END_NIL
- bx lr
+ bx lr
LMsgSendExit:
END_ENTRY objc_msgSend
stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
-
+ sub sp, #8 // align stack
+ FP_SAVE
// receiver already in r0
// selector already in r1
mov r2, r9 // class to search
mov r12, r0 // r12 = IMP
movs r9, #0 // r9=0, Z=1 for nonstret forwarding
+ FP_RESTORE
+ add sp, #8 // align stack
ldmfd sp!, {r0-r3,r7,lr}
bx r12
stmfd sp!, {r0-r3,r7,lr}
add r7, sp, #16
+ sub sp, #8 // align stack
+ FP_SAVE
mov r0, r1 // receiver
mov r1, r2 // selector
mov r12, r0 // r12 = IMP
movs r9, #1 // r9=1, Z=0 for stret forwarding
+ FP_RESTORE
+ add sp, #8 // align stack
ldmfd sp!, {r0-r3,r7,lr}
bx r12
#include <arm/arch.h>
+.data
+
// _objc_entryPoints and _objc_exitPoints are used by method dispatch
// caching code to figure out whether any threads are actively
// in the cache for dispatching. The labels surround the asm code
// that do cache lookups. The tables are zero-terminated.
-.data
+
+.align 4
.private_extern _objc_entryPoints
_objc_entryPoints:
.quad _cache_getImp
.quad _objc_msgSendSuper2
.quad 0
-.data
.private_extern _objc_exitPoints
_objc_exitPoints:
.quad LExit_cache_getImp
.endif
.endmacro
+.macro JumpMiss
+.if $0 == NORMAL
+ b __objc_msgSend_uncached_impcache
+.else
+ b LGetImpMiss
+.endif
+.endmacro
+
.macro CacheLookup
// x1 = SEL, x9 = isa
ldp x10, x11, [x9, #CACHE] // x10 = buckets, x11 = occupied|mask
3: // wrap: x12 = first bucket, w11 = mask
add x12, x12, w11, UXTW #4 // x12 = buckets+(mask<<4)
- // clone scanning loop to crash instead of hang when cache is corrupt
+ // Clone scanning loop to miss instead of hang when cache is corrupt.
+ // The slow path may detect any corruption and halt later.
ldp x16, x17, [x12] // {x16, x17} = *bucket
1: cmp x16, x1 // if (bucket->sel != _cmd)
ldp x16, x17, [x12, #-16]! // {x16, x17} = *--bucket
b 1b // loop
-3: // double wrap - busted
- // x0 = receiver
- // x1 = SEL
- mov x2, x9 // x2 = isa
-
-.if $0 == GETIMP
- mov x0, #0
- b _cache_getImp_corrupt_cache_error
-.else
- b _objc_msgSend_corrupt_cache_error
-.endif
-
+3: // double wrap
+ JumpMiss $0
+
.endmacro
// to get the critical regions for which method caches
// cannot be garbage collected.
+.align 2
.private_extern _objc_entryPoints
_objc_entryPoints:
.long __cache_getImp
.macro MethodTableLookup
MESSENGER_END_SLOW
- // stack is already aligned
- pushl %eax // class
- pushl %ecx // selector
- pushl %edx // receiver
+
+ // stack has return address and nothing else
+ subl $$(12+5*16), %esp
+
+ movdqa %xmm3, 4*16(%esp)
+ movdqa %xmm2, 3*16(%esp)
+ movdqa %xmm1, 2*16(%esp)
+ movdqa %xmm0, 1*16(%esp)
+
+ movl %eax, 8(%esp) // class
+ movl %ecx, 4(%esp) // selector
+ movl %edx, 0(%esp) // receiver
call __class_lookupMethodAndLoadCache3
- addl $$12, %esp // pop parameters
+
+ movdqa 4*16(%esp), %xmm3
+ movdqa 3*16(%esp), %xmm2
+ movdqa 2*16(%esp), %xmm1
+ movdqa 1*16(%esp), %xmm0
+
+ addl $$(12+5*16), %esp // pop parameters
.endmacro
LMsgSendNilSelf:
// %eax is already zero
movl $0,%edx
+ xorps %xmm0, %xmm0
LMsgSendDone:
MESSENGER_END_NIL
ret
// to get the critical regions for which method caches
// cannot be garbage collected.
+.align 2
.private_extern _objc_entryPoints
_objc_entryPoints:
.long _cache_getImp
1:
// loop
- cmpl $$0, (%eax)
- je LCacheMiss_f // if (bucket->sel == 0) cache miss
- cmpl 8(%edx), %eax
- je 3f // if (bucket = cache->buckets) wrap
+ cmpl $$1, (%eax)
+ jbe 3f // if (bucket->sel <= 1) wrap or miss
- subl $$8, %eax // bucket--
+ addl $$8, %eax // bucket++
2:
cmpl (%eax), %ecx // if (bucket->sel != sel)
jne 1b // scan more
CacheHit $0 // call or return imp
3:
+ // wrap or miss
+ jb LCacheMiss_f // if (bucket->sel < 1) cache miss
// wrap
- movzwl 12(%edx), %eax // eax = mask
- shll $$3, %eax // eax = offset = mask * 8
- addl 8(%edx), %eax // eax = bucket = cache->buckets+offset
+ movl 4(%eax), %eax // bucket->imp is really first bucket
jmp 2f
- // clone scanning loop to crash instead of hang when cache is corrupt
+ // Clone scanning loop to miss instead of hang when cache is corrupt.
+ // The slow path may detect any corruption and halt later.
1:
// loop
- cmpl $$0, (%eax)
- je LCacheMiss_f // if (bucket->sel == 0) cache miss
- cmpl 8(%edx), %eax
- je 3f // if (bucket = cache->buckets) wrap
+ cmpq $$1, (%eax)
+ jbe 3f // if (bucket->sel <= 1) wrap or miss
- subl $$8, %eax // bucket--
+ addl $$8, %eax // bucket++
2:
cmpl (%eax), %ecx // if (bucket->sel != sel)
jne 1b // scan more
CacheHit $0 // call or return imp
3:
- // double wrap - busted
-
- pushl %ebp
- movl %esp, %ebp
- pushl $$0
- pushl $$0
- pushl $$0 // stack alignment
- pushl %edx // isa
- pushl %ecx // SEL
-.if $0 == STRET || $0 == SUPER_STRET
- movl self_stret+4(%ebp), %ecx
-.elseif $0 == GETIMP
- movl $$0, %ecx
-.else
- movl self+4(%ebp), %ecx
-.endif
- pushl %ecx // receiver
-
-.if $0 == GETIMP
- call _cache_getImp_corrupt_cache_error
-.else
- call _objc_msgSend_corrupt_cache_error
-.endif
+ // double wrap or miss
+ jmp LCacheMiss_f
.endmacro
movl %esp, %ebp
.cfi_def_cfa_register ebp
+
+ subl $$(8+5*16), %esp
+
+ movdqa %xmm3, 4*16(%esp)
+ movdqa %xmm2, 3*16(%esp)
+ movdqa %xmm1, 2*16(%esp)
+ movdqa %xmm0, 1*16(%esp)
- sub $$12, %esp // align stack
-
- pushl %edx // class
- pushl %ecx // selector
- pushl %eax // receiver
+ movl %edx, 8(%esp) // class
+ movl %ecx, 4(%esp) // selector
+ movl %eax, 0(%esp) // receiver
call __class_lookupMethodAndLoadCache3
// imp in eax
-
+
+ movdqa 4*16(%esp), %xmm3
+ movdqa 3*16(%esp), %xmm2
+ movdqa 2*16(%esp), %xmm1
+ movdqa 1*16(%esp), %xmm0
+
leave
.cfi_def_cfa esp, 4
.cfi_same_value ebp
END_ENTRY _method_invoke_stret
-#if !defined(NDEBUG)
+#if DEBUG
STATIC_ENTRY __objc_ignored_method
movl self(%esp), %eax
// to get the critical regions for which method caches
// cannot be garbage collected.
+.align 4
.private_extern _objc_entryPoints
_objc_entryPoints:
.quad _cache_getImp
1:
// loop
- cmpq $$0, (%r10)
- je LCacheMiss_f // if (bucket->sel == 0) cache miss
- cmpq 16(%r11), %r10
- je 3f // if (bucket == cache->buckets) wrap
+ cmpq $$1, (%r10)
+ jbe 3f // if (bucket->sel <= 1) wrap or miss
- subq $$16, %r10 // bucket--
+ addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
CacheHit $0 // call or return imp
3:
+ // wrap or miss
+ jb LCacheMiss_f // if (bucket->sel < 1) cache miss
// wrap
- movl 24(%r11), %r10d // r10 = mask a.k.a. last bucket index
- shlq $$4, %r10 // r10 = offset = mask<<4
- addq 16(%r11), %r10 // r10 = &cache->buckets[mask]
+ movq 8(%r10), %r10 // bucket->imp is really first bucket
jmp 2f
- // clone scanning loop to crash instead of hang when cache is corrupt
+ // Clone scanning loop to miss instead of hang when cache is corrupt.
+ // The slow path may detect any corruption and halt later.
1:
// loop
- cmpq $$0, (%r10)
- je LCacheMiss_f // if (bucket->sel == 0) cache miss
- cmpq 16(%r11), %r10
- je 3f // if (bucket == cache->buckets) wrap
+ cmpq $$1, (%r10)
+ jbe 3f // if (bucket->sel <= 1) wrap or miss
- subq $$16, %r10 // bucket--
+ addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
CacheHit $0 // call or return imp
3:
- // double wrap - busted
-.if $0 == STRET || $0 == SUPER_STRET || $0 == SUPER2_STRET
- movq %a2, %a1
- movq %a3, %a2
-.elseif $0 == GETIMP
- movq $$0, %a1
-.endif
- // a1 = receiver
- // a2 = SEL
- movq %r11, %a3 // a3 = isa
-.if $0 == GETIMP
- jmp _cache_getImp_corrupt_cache_error
-.else
- jmp _objc_msgSend_corrupt_cache_error
-.endif
+ // double wrap or miss
+ jmp LCacheMiss_f
.endmacro
// to get the critical regions for which method caches
// cannot be garbage collected.
+.align 4
.private_extern _objc_entryPoints
_objc_entryPoints:
.quad _cache_getImp
1:
// loop
- cmpq $$0, (%r10)
- je LCacheMiss_f // if (bucket->sel == 0) cache miss
- cmpq 16(%r11), %r10
- je 3f // if (bucket == cache->buckets) wrap
+ cmpq $$1, (%r10)
+ jbe 3f // if (bucket->sel <= 1) wrap or miss
- subq $$16, %r10 // bucket--
+ addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
CacheHit $0 // call or return imp
3:
+ // wrap or miss
+ jb LCacheMiss_f // if (bucket->sel < 1) cache miss
// wrap
- movl 24(%r11), %r10d // r10 = mask a.k.a. last bucket index
- shlq $$4, %r10 // r10 = offset = mask<<4
- addq 16(%r11), %r10 // r10 = &cache->buckets[mask]
+ movq 8(%r10), %r10 // bucket->imp is really first bucket
jmp 2f
- // clone scanning loop to crash instead of hang when cache is corrupt
+ // Clone scanning loop to miss instead of hang when cache is corrupt.
+ // The slow path may detect any corruption and halt later.
1:
// loop
- cmpq $$0, (%r10)
- je LCacheMiss_f // if (bucket->sel == 0) cache miss
- cmpq 16(%r11), %r10
- je 3f // if (bucket == cache->buckets) wrap
+ cmpq $$1, (%r10)
+ jbe 3f // if (bucket->sel <= 1) wrap or miss
- subq $$16, %r10 // bucket--
+ addq $$16, %r10 // bucket++
2:
.if $0 != STRET && $0 != SUPER_STRET && $0 != SUPER2_STRET
cmpq (%r10), %a2 // if (bucket->sel != _cmd)
CacheHit $0 // call or return imp
3:
- // double wrap - busted
-.if $0 == STRET || $0 == SUPER_STRET || $0 == SUPER2_STRET
- movq %a2, %a1
- movq %a3, %a2
-.elseif $0 == GETIMP
- movq $$0, %a1
-.endif
- // a1 = receiver
- // a2 = SEL
- movq %r11, %a3 // a3 = isa
-.if $0 == GETIMP
- jmp _cache_getImp_corrupt_cache_error
-.else
- jmp _objc_msgSend_corrupt_cache_error
-.endif
+ // double wrap or miss
+ jmp LCacheMiss_f
.endmacro
.quad 0
.quad 0
+
+ // Workaround for Skype evil (rdar://19715989)
+
+ .text
+ .align 4
+ .private_extern _map_images
+ .private_extern _map_2_images
+ .private_extern _hax
+_hax:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+_map_images:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ jmp _map_2_images
+
#endif
#define NSINTEGER_DEFINED 1
+#ifndef NS_DESIGNATED_INITIALIZER
+#if __has_attribute(objc_designated_initializer)
+#define NS_DESIGNATED_INITIALIZER __attribute__((objc_designated_initializer))
+#else
+#define NS_DESIGNATED_INITIALIZER
+#endif
+#endif
+
#endif
@property (readonly) NSUInteger hash;
@property (readonly) Class superclass;
-- (Class)class;
+- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'anObject.dynamicType' instead");
- (instancetype)self;
- (id)performSelector:(SEL)aSelector;
+ (void)load;
+ (void)initialize;
-- (instancetype)init;
+- (instancetype)init
+#if NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER
+ NS_DESIGNATED_INITIALIZER
+#endif
+ ;
-+ (instancetype)new;
-+ (instancetype)allocWithZone:(struct _NSZone *)zone;
-+ (instancetype)alloc;
-- (void)dealloc;
++ (instancetype)new OBJC_SWIFT_UNAVAILABLE("use object initializers instead");
++ (instancetype)allocWithZone:(struct _NSZone *)zone OBJC_SWIFT_UNAVAILABLE("use object initializers instead");
++ (instancetype)alloc OBJC_SWIFT_UNAVAILABLE("use object initializers instead");
+- (void)dealloc OBJC_SWIFT_UNAVAILABLE("use 'deinit' to define a de-initializer");
- (void)finalize;
- (void)doesNotRecognizeSelector:(SEL)aSelector;
- (id)forwardingTargetForSelector:(SEL)aSelector __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
-- (void)forwardInvocation:(NSInvocation *)anInvocation;
-- (NSMethodSignature *)methodSignatureForSelector:(SEL)aSelector;
+- (void)forwardInvocation:(NSInvocation *)anInvocation OBJC_SWIFT_UNAVAILABLE("");
+- (NSMethodSignature *)methodSignatureForSelector:(SEL)aSelector OBJC_SWIFT_UNAVAILABLE("");
-+ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)aSelector;
++ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)aSelector OBJC_SWIFT_UNAVAILABLE("");
- (BOOL)allowsWeakReference UNAVAILABLE_ATTRIBUTE;
- (BOOL)retainWeakReference UNAVAILABLE_ATTRIBUTE;
+ (NSUInteger)hash;
+ (Class)superclass;
-+ (Class)class;
++ (Class)class OBJC_SWIFT_UNAVAILABLE("use 'aClass.self' instead");
+ (NSString *)description;
+ (NSString *)debugDescription;
SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers)
#endif
-#if TARGET_OS_IPHONE
+#if TARGET_OS_IOS
NSOBJECT_ELSEWHERE_IN(5.1);
NSOBJECT_ELSEWHERE_IN(5.0);
NSOBJECT_ELSEWHERE_IN(4.3);
NSOBJECT_ELSEWHERE_IN(2.2);
NSOBJECT_ELSEWHERE_IN(2.1);
NSOBJECT_ELSEWHERE_IN(2.0);
-#else
+#elif TARGET_OS_MAC && !TARGET_OS_IPHONE
NSOBJECT_ELSEWHERE_IN(10.7);
NSOBJECT_ELSEWHERE_IN(10.6);
NSOBJECT_ELSEWHERE_IN(10.5);
NSOBJECT_ELSEWHERE_IN(10.2);
NSOBJECT_ELSEWHERE_IN(10.1);
NSOBJECT_ELSEWHERE_IN(10.0);
+#else
+ // NSObject has always been in libobjc on these platforms.
#endif
// TARGET_OS_MAC
namespace {
-#if TARGET_OS_EMBEDDED
-# define SIDE_TABLE_STRIPE 8
-#else
-# define SIDE_TABLE_STRIPE 64
-#endif
-
-// should be a multiple of cache line size (64)
-#define SIDE_TABLE_SIZE 128
-
// The order of these bits is important.
#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0)
#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit
// don't want the table to act as a root for `leaks`.
typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
-class SideTable {
-private:
- static uint8_t table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
-
-public:
+struct SideTable {
spinlock_t slock;
RefcountMap refcnts;
weak_table_t weak_table;
- SideTable() : slock(SPINLOCK_INITIALIZER)
- {
+ SideTable() {
memset(&weak_table, 0, sizeof(weak_table));
}
-
- ~SideTable()
- {
- // never delete side_table in case other threads retain during exit
- assert(0);
- }
- static SideTable *tableForPointer(const void *p)
- {
-# if SIDE_TABLE_STRIPE == 1
- return (SideTable *)table_buf;
-# else
- uintptr_t a = (uintptr_t)p;
- int index = ((a >> 4) ^ (a >> 9)) & (SIDE_TABLE_STRIPE - 1);
- return (SideTable *)&table_buf[index * SIDE_TABLE_SIZE];
-# endif
- }
-
- static void init() {
- // use placement new instead of static ctor to avoid dtor at exit
- for (int i = 0; i < SIDE_TABLE_STRIPE; i++) {
- new (&table_buf[i * SIDE_TABLE_SIZE]) SideTable;
- }
+ ~SideTable() {
+ _objc_fatal("Do not delete SideTable.");
}
+
+ void lock() { slock.lock(); }
+ void unlock() { slock.unlock(); }
+ bool trylock() { return slock.trylock(); }
+
+ // Address-ordered lock discipline for a pair of side tables.
+
+ template<bool HaveOld, bool HaveNew>
+ static void lockTwo(SideTable *lock1, SideTable *lock2);
+ template<bool HaveOld, bool HaveNew>
+ static void unlockTwo(SideTable *lock1, SideTable *lock2);
};
-STATIC_ASSERT(sizeof(SideTable) <= SIDE_TABLE_SIZE);
-__attribute__((aligned(SIDE_TABLE_SIZE))) uint8_t
-SideTable::table_buf[SIDE_TABLE_STRIPE * SIDE_TABLE_SIZE];
+
+template<>
+void SideTable::lockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
+ spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
+}
+
+template<>
+void SideTable::lockTwo<true, false>(SideTable *lock1, SideTable *) {
+ lock1->lock();
+}
+
+template<>
+void SideTable::lockTwo<false, true>(SideTable *, SideTable *lock2) {
+ lock2->lock();
+}
+
+template<>
+void SideTable::unlockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
+ spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
+}
+
+template<>
+void SideTable::unlockTwo<true, false>(SideTable *lock1, SideTable *) {
+ lock1->unlock();
+}
+
+template<>
+void SideTable::unlockTwo<false, true>(SideTable *, SideTable *lock2) {
+ lock2->unlock();
+}
+
+
+
+// We cannot use a C++ static initializer to initialize SideTables because
+// libc calls us before our C++ initializers run. We also don't want a global
+// pointer to this struct because of the extra indirection.
+// Do it the hard way.
+alignas(StripedMap<SideTable>) static uint8_t
+ SideTableBuf[sizeof(StripedMap<SideTable>)];
+
+static void SideTableInit() {
+ new (SideTableBuf) StripedMap<SideTable>();
+}
+
+static StripedMap<SideTable>& SideTables() {
+ return *reinterpret_cast<StripedMap<SideTable>*>(SideTableBuf);
+}
// anonymous namespace
};
}
-/**
- * This function stores a new value into a __weak variable. It would
- * be used anywhere a __weak variable is the target of an assignment.
- *
- * @param location The address of the weak pointer itself
- * @param newObj The new object this weak ptr should now point to
- *
- * @return \e newObj
- */
-id
-objc_storeWeak(id *location, id newObj)
+// Update a weak variable.
+// If HaveOld is true, the variable has an existing value
+// that needs to be cleaned up. This value might be nil.
+// If HaveNew is true, there is a new value that needs to be
+// assigned into the variable. This value might be nil.
+// If CrashIfDeallocating is true, the process is halted if newObj is
+// deallocating or newObj's class does not support weak references.
+// If CrashIfDeallocating is false, nil is stored instead.
+template <bool HaveOld, bool HaveNew, bool CrashIfDeallocating>
+static id
+storeWeak(id *location, objc_object *newObj)
{
+ assert(HaveOld || HaveNew);
+ if (!HaveNew) assert(newObj == nil);
+
+ Class previouslyInitializedClass = nil;
id oldObj;
SideTable *oldTable;
SideTable *newTable;
- spinlock_t *lock1;
-#if SIDE_TABLE_STRIPE > 1
- spinlock_t *lock2;
-#endif
// Acquire locks for old and new values.
// Order by lock address to prevent lock ordering problems.
// Retry if the old value changes underneath us.
retry:
- oldObj = *location;
-
- oldTable = SideTable::tableForPointer(oldObj);
- newTable = SideTable::tableForPointer(newObj);
-
- lock1 = &newTable->slock;
-#if SIDE_TABLE_STRIPE > 1
- lock2 = &oldTable->slock;
- if (lock1 > lock2) {
- spinlock_t *temp = lock1;
- lock1 = lock2;
- lock2 = temp;
- }
- if (lock1 != lock2) spinlock_lock(lock2);
-#endif
- spinlock_lock(lock1);
+ if (HaveOld) {
+ oldObj = *location;
+ oldTable = &SideTables()[oldObj];
+ } else {
+ oldTable = nil;
+ }
+ if (HaveNew) {
+ newTable = &SideTables()[newObj];
+ } else {
+ newTable = nil;
+ }
- if (*location != oldObj) {
- spinlock_unlock(lock1);
-#if SIDE_TABLE_STRIPE > 1
- if (lock1 != lock2) spinlock_unlock(lock2);
-#endif
+ SideTable::lockTwo<HaveOld, HaveNew>(oldTable, newTable);
+
+ if (HaveOld && *location != oldObj) {
+ SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
goto retry;
}
- weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
- newObj = weak_register_no_lock(&newTable->weak_table, newObj, location);
- // weak_register_no_lock returns nil if weak store should be rejected
+ // Prevent a deadlock between the weak reference machinery
+ // and the +initialize machinery by ensuring that no
+ // weakly-referenced object has an un-+initialized isa.
+ if (HaveNew && newObj) {
+ Class cls = newObj->getIsa();
+ if (cls != previouslyInitializedClass &&
+ !((objc_class *)cls)->isInitialized())
+ {
+ SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
+ _class_initialize(_class_getNonMetaClass(cls, (id)newObj));
+
+ // If this class is finished with +initialize then we're good.
+ // If this class is still running +initialize on this thread
+ // (i.e. +initialize called storeWeak on an instance of itself)
+ // then we may proceed but it will appear initializing and
+ // not yet initialized to the check above.
+ // Instead set previouslyInitializedClass to recognize it on retry.
+ previouslyInitializedClass = cls;
+
+ goto retry;
+ }
+ }
- // Set is-weakly-referenced bit in refcount table.
- if (newObj && !newObj->isTaggedPointer()) {
- newObj->setWeaklyReferenced_nolock();
+ // Clean up old value, if any.
+ if (HaveOld) {
+ weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
}
- // Do not set *location anywhere else. That would introduce a race.
- *location = newObj;
+ // Assign new value, if any.
+ if (HaveNew) {
+ newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table,
+ (id)newObj, location,
+ CrashIfDeallocating);
+ // weak_register_no_lock returns nil if weak store should be rejected
+
+ // Set is-weakly-referenced bit in refcount table.
+ if (newObj && !newObj->isTaggedPointer()) {
+ newObj->setWeaklyReferenced_nolock();
+ }
+
+ // Do not set *location anywhere else. That would introduce a race.
+ *location = (id)newObj;
+ }
+ else {
+ // No new value. The storage is not changed.
+ }
- spinlock_unlock(lock1);
-#if SIDE_TABLE_STRIPE > 1
- if (lock1 != lock2) spinlock_unlock(lock2);
-#endif
+ SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
- return newObj;
+ return (id)newObj;
}
+
+/**
+ * This function stores a new value into a __weak variable. It would
+ * be used anywhere a __weak variable is the target of an assignment.
+ *
+ * @param location The address of the weak pointer itself
+ * @param newObj The new object this weak ptr should now point to
+ *
+ * @return \e newObj
+ */
id
-objc_loadWeakRetained(id *location)
+objc_storeWeak(id *location, id newObj)
{
- id result;
-
- SideTable *table;
- spinlock_t *lock;
-
- retry:
- result = *location;
- if (!result) return nil;
-
- table = SideTable::tableForPointer(result);
- lock = &table->slock;
-
- spinlock_lock(lock);
- if (*location != result) {
- spinlock_unlock(lock);
- goto retry;
- }
-
- result = weak_read_no_lock(&table->weak_table, location);
-
- spinlock_unlock(lock);
- return result;
+ return storeWeak<true/*old*/, true/*new*/, true/*crash*/>
+ (location, (objc_object *)newObj);
}
+
/**
- * This loads the object referenced by a weak pointer and returns it, after
- * retaining and autoreleasing the object to ensure that it stays alive
- * long enough for the caller to use it. This function would be used
- * anywhere a __weak variable is used in an expression.
+ * This function stores a new value into a __weak variable.
+ * If the new object is deallocating or the new object's class
+ * does not support weak references, stores nil instead.
*
- * @param location The weak pointer address
+ * @param location The address of the weak pointer itself
+ * @param newObj The new object this weak ptr should now point to
*
- * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
+ * @return The value stored (either the new object or nil)
*/
id
-objc_loadWeak(id *location)
+objc_storeWeakOrNil(id *location, id newObj)
{
- if (!*location) return nil;
- return objc_autorelease(objc_loadWeakRetained(location));
+ return storeWeak<true/*old*/, true/*new*/, false/*crash*/>
+ (location, (objc_object *)newObj);
}
+
/**
* Initialize a fresh weak pointer to some object location.
* It would be used for code like:
* NSObject *o = ...;
* __weak id weakPtr = o;
*
- * @param addr Address of __weak ptr.
- * @param val Object ptr.
+ * This function IS NOT thread-safe with respect to concurrent
+ * modifications to the weak variable. (Concurrent weak clear is safe.)
+ *
+ * @param location Address of __weak ptr.
+ * @param newObj Object ptr.
*/
id
-objc_initWeak(id *addr, id val)
+objc_initWeak(id *location, id newObj)
+{
+ if (!newObj) {
+ *location = nil;
+ return nil;
+ }
+
+ return storeWeak<false/*old*/, true/*new*/, true/*crash*/>
+ (location, (objc_object*)newObj);
+}
+
+id
+objc_initWeakOrNil(id *location, id newObj)
+{
+ if (!newObj) {
+ *location = nil;
+ return nil;
+ }
+
+ return storeWeak<false/*old*/, true/*new*/, false/*crash*/>
+ (location, (objc_object*)newObj);
+}
+
+
+/**
+ * Destroys the relationship between a weak pointer
+ * and the object it is referencing in the internal weak
+ * table. If the weak pointer is not referencing anything,
+ * there is no need to edit the weak table.
+ *
+ * This function IS NOT thread-safe with respect to concurrent
+ * modifications to the weak variable. (Concurrent weak clear is safe.)
+ *
+ * @param location The weak pointer address.
+ */
+void
+objc_destroyWeak(id *location)
{
- *addr = 0;
- if (!val) return nil;
- return objc_storeWeak(addr, val);
+ (void)storeWeak<true/*old*/, false/*new*/, false/*crash*/>
+ (location, nil);
}
-__attribute__((noinline, used)) void
-objc_destroyWeak_slow(id *addr)
+
+id
+objc_loadWeakRetained(id *location)
{
- SideTable *oldTable;
- spinlock_t *lock;
- id oldObj;
+ id result;
- // No need to see weak refs, we are destroying
+ SideTable *table;
- // Acquire lock for old value only
- // retry if the old value changes underneath us
- retry:
- oldObj = *addr;
- oldTable = SideTable::tableForPointer(oldObj);
+ retry:
+ result = *location;
+ if (!result) return nil;
- lock = &oldTable->slock;
- spinlock_lock(lock);
+ table = &SideTables()[result];
- if (*addr != oldObj) {
- spinlock_unlock(lock);
+ table->lock();
+ if (*location != result) {
+ table->unlock();
goto retry;
}
- weak_unregister_no_lock(&oldTable->weak_table, oldObj, addr);
-
- spinlock_unlock(lock);
+ result = weak_read_no_lock(&table->weak_table, location);
+
+ table->unlock();
+ return result;
}
/**
- * Destroys the relationship between a weak pointer
- * and the object it is referencing in the internal weak
- * table. If the weak pointer is not referencing anything,
- * there is no need to edit the weak table.
+ * This loads the object referenced by a weak pointer and returns it, after
+ * retaining and autoreleasing the object to ensure that it stays alive
+ * long enough for the caller to use it. This function would be used
+ * anywhere a __weak variable is used in an expression.
*
- * @param addr The weak pointer address.
+ * @param location The weak pointer address
+ *
+ * @return The object pointed to by \e location, or \c nil if \e location is \c nil.
*/
-void
-objc_destroyWeak(id *addr)
+id
+objc_loadWeak(id *location)
{
- if (!*addr) return;
- return objc_destroyWeak_slow(addr);
+ if (!*location) return nil;
+ return objc_autorelease(objc_loadWeakRetained(location));
}
+
/**
* This function copies a weak pointer from one location to another,
* when the destination doesn't already contain a weak pointer. It
* would be used for code like:
*
- * __weak id weakPtr1 = ...;
- * __weak id weakPtr2 = weakPtr1;
+ * __weak id src = ...;
+ * __weak id dst = src;
*
- * @param to weakPtr2 in this ex
- * @param from weakPtr1
+ * This function IS NOT thread-safe with respect to concurrent
+ * modifications to the destination variable. (Concurrent weak clear is safe.)
+ *
+ * @param dst The destination variable.
+ * @param src The source variable.
*/
void
-objc_copyWeak(id *to, id *from)
+objc_copyWeak(id *dst, id *src)
{
- id val = objc_loadWeakRetained(from);
- objc_initWeak(to, val);
- objc_release(val);
+ id obj = objc_loadWeakRetained(src);
+ objc_initWeak(dst, obj);
+ objc_release(obj);
}
/**
* Move a weak pointer from one location to another.
* Before the move, the destination must be uninitialized.
* After the move, the source is nil.
+ *
+ * This function IS NOT thread-safe with respect to concurrent
+ * modifications to either weak variable. (Concurrent weak clear is safe.)
+ *
*/
void
-objc_moveWeak(id *to, id *from)
+objc_moveWeak(id *dst, id *src)
{
- objc_copyWeak(to, from);
- objc_storeWeak(from, 0);
+ objc_copyWeak(dst, src);
+ objc_destroyWeak(src);
+ *src = nil;
}
}
bool fastcheck() const {
-#ifdef NDEBUG
- return (m[0] == M0);
-#else
+#if DEBUG
return check();
+#else
+ return (m[0] == M0);
#endif
}
setHotPage(this);
-#ifndef NDEBUG
+#if DEBUG
// we expect any children to be completely empty
for (AutoreleasePoolPage *page = child; page; page = page->child) {
assert(page->empty());
{
// reinstate TLS value while we work
setHotPage((AutoreleasePoolPage *)p);
- pop(0);
+
+ if (AutoreleasePoolPage *page = coldPage()) {
+ if (!page->empty()) pop(page->begin()); // pop all of the pools
+ if (DebugMissingPools || DebugPoolAllocation) {
+ // pop() killed the pages already
+ } else {
+ page->kill(); // free all of the pages
+ }
+ }
+
+ // clear TLS value so TLS destruction doesn't loop
setHotPage(nil);
}
// The hot page is full.
// Step to the next non-full page, adding a new page if necessary.
// Then add the object to that page.
- assert(page == hotPage() && page->full());
+ assert(page == hotPage());
+ assert(page->full() || DebugPoolAllocation);
do {
if (page->child) page = page->child;
return page->add(obj);
}
+
+ static __attribute__((noinline))
+ id *autoreleaseNewPage(id obj)
+ {
+ AutoreleasePoolPage *page = hotPage();
+ if (page) return autoreleaseFullPage(obj, page);
+ else return autoreleaseNoPage(obj);
+ }
+
public:
static inline id autorelease(id obj)
{
static inline void *push()
{
- id *dest = autoreleaseFast(POOL_SENTINEL);
+ id *dest;
+ if (DebugPoolAllocation) {
+ // Each autorelease pool starts on a new pool page.
+ dest = autoreleaseNewPage(POOL_SENTINEL);
+ } else {
+ dest = autoreleaseFast(POOL_SENTINEL);
+ }
assert(*dest == POOL_SENTINEL);
return dest;
}
AutoreleasePoolPage *page;
id *stop;
- if (token) {
- page = pageForPointer(token);
- stop = (id *)token;
- assert(*stop == POOL_SENTINEL);
- } else {
- // Token 0 is top-level pool
- page = coldPage();
- assert(page);
- stop = page->begin();
+ page = pageForPointer(token);
+ stop = (id *)token;
+ if (DebugPoolAllocation && *stop != POOL_SENTINEL) {
+ // This check is not valid with DebugPoolAllocation off
+ // after an autorelease with a pool page but no pool in place.
+ _objc_fatal("invalid or prematurely-freed autorelease pool %p; ",
+ token);
}
if (PrintPoolHiwat) printHiwat();
page->releaseUntil(stop);
// memory: delete empty children
- // hysteresis: keep one empty child if this page is more than half full
- // special case: delete everything for pop(0)
- // special case: delete everything for pop(top) with DebugMissingPools
- if (!token ||
- (DebugMissingPools && page->empty() && !page->parent))
- {
+ if (DebugPoolAllocation && page->empty()) {
+ // special case: delete everything during page-per-pool debugging
+ AutoreleasePoolPage *parent = page->parent;
+ page->kill();
+ setHotPage(parent);
+ } else if (DebugMissingPools && page->empty() && !page->parent) {
+ // special case: delete everything for pop(top)
+ // when debugging missing autorelease pools
page->kill();
setHotPage(nil);
- } else if (page->child) {
+ }
+ else if (page->child) {
+ // hysteresis: keep one empty child if page is more than half full
if (page->lessThanHalfFull()) {
page->child->kill();
}
// Slow path of clearDeallocating()
-// for weakly-referenced objects with indexed isa
+// for objects with indexed isa
+// that were ever weakly referenced
+// or whose retain count ever overflowed to the side table.
NEVER_INLINE void
-objc_object::clearDeallocating_weak()
+objc_object::clearDeallocating_slow()
{
- assert(isa.indexed && isa.weakly_referenced);
+ assert(isa.indexed && (isa.weakly_referenced || isa.has_sidetable_rc));
- SideTable *table = SideTable::tableForPointer(this);
- spinlock_lock(&table->slock);
- weak_clear_no_lock(&table->weak_table, (id)this);
- spinlock_unlock(&table->slock);
+ SideTable& table = SideTables()[this];
+ table.lock();
+ if (isa.weakly_referenced) {
+ weak_clear_no_lock(&table.weak_table, (id)this);
+ }
+ if (isa.has_sidetable_rc) {
+ table.refcnts.erase(this);
+ }
+ table.unlock();
}
#endif
**********************************************************************/
-#if !NDEBUG
+#if DEBUG
// Used to assert that an object is not present in the side table.
bool
objc_object::sidetable_present()
{
bool result = false;
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
- spinlock_lock(&table->slock);
+ table.lock();
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it != table->refcnts.end()) result = true;
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it != table.refcnts.end()) result = true;
- if (weak_is_registered_no_lock(&table->weak_table, (id)this)) result = true;
+ if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true;
- spinlock_unlock(&table->slock);
+ table.unlock();
return result;
}
void
objc_object::sidetable_lock()
{
- SideTable *table = SideTable::tableForPointer(this);
- spinlock_lock(&table->slock);
+ SideTable& table = SideTables()[this];
+ table.lock();
}
void
objc_object::sidetable_unlock()
{
- SideTable *table = SideTable::tableForPointer(this);
- spinlock_unlock(&table->slock);
+ SideTable& table = SideTables()[this];
+ table.unlock();
}
bool weaklyReferenced)
{
assert(!isa.indexed); // should already be changed to not-indexed
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
- size_t& refcntStorage = table->refcnts[this];
+ size_t& refcntStorage = table.refcnts[this];
size_t oldRefcnt = refcntStorage;
// not deallocating - that was in the isa
assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
uintptr_t carry;
- size_t refcnt = addc(oldRefcnt, extra_rc<<SIDE_TABLE_RC_SHIFT, 0, &carry);
+ size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry);
if (carry) refcnt = SIDE_TABLE_RC_PINNED;
if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING;
if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED;
objc_object::sidetable_addExtraRC_nolock(size_t delta_rc)
{
assert(isa.indexed);
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
- size_t& refcntStorage = table->refcnts[this];
+ size_t& refcntStorage = table.refcnts[this];
size_t oldRefcnt = refcntStorage;
- // not deallocating - that is in the isa
- assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
- assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
+ // isa-side bits should not be set here
+ assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
+ assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true;
// Move some retain counts from the side table to the isa field.
-// Returns true if the sidetable retain count is now 0.
-bool
+// Returns the actual count subtracted, which may be less than the request.
+size_t
objc_object::sidetable_subExtraRC_nolock(size_t delta_rc)
{
assert(isa.indexed);
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
- size_t& refcntStorage = table->refcnts[this];
- size_t oldRefcnt = refcntStorage;
- // not deallocating - that is in the isa
- assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
- assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
-
- if (oldRefcnt < delta_rc) {
- _objc_inform_now_and_on_crash("refcount underflow error for object %p",
- this);
- _objc_fatal("refcount underflow error for %s %p",
- object_getClassName((id)this), this);
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it == table.refcnts.end() || it->second == 0) {
+ // Side table retain count is zero. Can't borrow.
+ return 0;
}
+ size_t oldRefcnt = it->second;
+
+ // isa-side bits should not be set here
+ assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0);
+ assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0);
size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT);
- if (newRefcnt == 0) {
- table->refcnts.erase(this);
- return true;
- }
- else {
- refcntStorage = newRefcnt;
- return false;
- }
+ assert(oldRefcnt > newRefcnt); // shouldn't underflow
+ it->second = newRefcnt;
+ return delta_rc;
}
objc_object::sidetable_getExtraRC_nolock()
{
assert(isa.indexed);
- SideTable *table = SideTable::tableForPointer(this);
- RefcountMap::iterator it = table->refcnts.find(this);
- assert(it != table->refcnts.end());
- return it->second >> SIDE_TABLE_RC_SHIFT;
+ SideTable& table = SideTables()[this];
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it == table.refcnts.end()) return 0;
+ else return it->second >> SIDE_TABLE_RC_SHIFT;
}
__attribute__((used,noinline,nothrow))
id
-objc_object::sidetable_retain_slow(SideTable *table)
+objc_object::sidetable_retain_slow(SideTable& table)
{
#if SUPPORT_NONPOINTER_ISA
assert(!isa.indexed);
#endif
- spinlock_lock(&table->slock);
- size_t& refcntStorage = table->refcnts[this];
+ table.lock();
+ size_t& refcntStorage = table.refcnts[this];
if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
refcntStorage += SIDE_TABLE_RC_ONE;
}
- spinlock_unlock(&table->slock);
+ table.unlock();
return (id)this;
}
#if SUPPORT_NONPOINTER_ISA
assert(!isa.indexed);
#endif
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
- if (spinlock_trylock(&table->slock)) {
- size_t& refcntStorage = table->refcnts[this];
+ if (table.trylock()) {
+ size_t& refcntStorage = table.refcnts[this];
if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) {
refcntStorage += SIDE_TABLE_RC_ONE;
}
- spinlock_unlock(&table->slock);
+ table.unlock();
return (id)this;
}
return sidetable_retain_slow(table);
#if SUPPORT_NONPOINTER_ISA
assert(!isa.indexed);
#endif
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
// NO SPINLOCK HERE
// _objc_rootTryRetain() is called exclusively by _objc_loadWeak(),
// which already acquired the lock on our behalf.
// fixme can't do this efficiently with os_lock_handoff_s
- // if (table->slock == 0) {
+ // if (table.slock == 0) {
// _objc_fatal("Do not call -_tryRetain.");
// }
bool result = true;
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it == table->refcnts.end()) {
- table->refcnts[this] = SIDE_TABLE_RC_ONE;
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it == table.refcnts.end()) {
+ table.refcnts[this] = SIDE_TABLE_RC_ONE;
} else if (it->second & SIDE_TABLE_DEALLOCATING) {
result = false;
} else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
uintptr_t
objc_object::sidetable_retainCount()
{
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
size_t refcnt_result = 1;
- spinlock_lock(&table->slock);
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it != table->refcnts.end()) {
+ table.lock();
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it != table.refcnts.end()) {
// this is valid for SIDE_TABLE_RC_PINNED too
refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT;
}
- spinlock_unlock(&table->slock);
+ table.unlock();
return refcnt_result;
}
bool
objc_object::sidetable_isDeallocating()
{
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
// NO SPINLOCK HERE
// _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(),
// fixme can't do this efficiently with os_lock_handoff_s
- // if (table->slock == 0) {
+ // if (table.slock == 0) {
// _objc_fatal("Do not call -_isDeallocating.");
// }
- RefcountMap::iterator it = table->refcnts.find(this);
- return (it != table->refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
+ RefcountMap::iterator it = table.refcnts.find(this);
+ return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING);
}
{
bool result = false;
- SideTable *table = SideTable::tableForPointer(this);
- spinlock_lock(&table->slock);
+ SideTable& table = SideTables()[this];
+ table.lock();
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it != table->refcnts.end()) {
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it != table.refcnts.end()) {
result = it->second & SIDE_TABLE_WEAKLY_REFERENCED;
}
- spinlock_unlock(&table->slock);
+ table.unlock();
return result;
}
assert(!isa.indexed);
#endif
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
- table->refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
+ table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED;
}
+// rdar://20206767
+// return uintptr_t instead of bool so that the various raw-isa
+// -release paths all return zero in eax
__attribute__((used,noinline,nothrow))
-bool
-objc_object::sidetable_release_slow(SideTable *table, bool performDealloc)
+uintptr_t
+objc_object::sidetable_release_slow(SideTable& table, bool performDealloc)
{
#if SUPPORT_NONPOINTER_ISA
assert(!isa.indexed);
#endif
bool do_dealloc = false;
- spinlock_lock(&table->slock);
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it == table->refcnts.end()) {
+ table.lock();
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it == table.refcnts.end()) {
do_dealloc = true;
- table->refcnts[this] = SIDE_TABLE_DEALLOCATING;
+ table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
} else if (it->second < SIDE_TABLE_DEALLOCATING) {
// SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
do_dealloc = true;
} else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
it->second -= SIDE_TABLE_RC_ONE;
}
- spinlock_unlock(&table->slock);
+ table.unlock();
if (do_dealloc && performDealloc) {
((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
}
}
-bool
+// rdar://20206767
+// return uintptr_t instead of bool so that the various raw-isa
+// -release paths all return zero in eax
+uintptr_t
objc_object::sidetable_release(bool performDealloc)
{
#if SUPPORT_NONPOINTER_ISA
assert(!isa.indexed);
#endif
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
bool do_dealloc = false;
- if (spinlock_trylock(&table->slock)) {
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it == table->refcnts.end()) {
+ if (table.trylock()) {
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it == table.refcnts.end()) {
do_dealloc = true;
- table->refcnts[this] = SIDE_TABLE_DEALLOCATING;
+ table.refcnts[this] = SIDE_TABLE_DEALLOCATING;
} else if (it->second < SIDE_TABLE_DEALLOCATING) {
// SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it.
do_dealloc = true;
} else if (! (it->second & SIDE_TABLE_RC_PINNED)) {
it->second -= SIDE_TABLE_RC_ONE;
}
- spinlock_unlock(&table->slock);
+ table.unlock();
if (do_dealloc && performDealloc) {
((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc);
}
void
objc_object::sidetable_clearDeallocating()
{
- SideTable *table = SideTable::tableForPointer(this);
+ SideTable& table = SideTables()[this];
// clear any weak table items
// clear extra retain count and deallocating bit
// (fixme warn or abort if extra retain count == 0 ?)
- spinlock_lock(&table->slock);
- RefcountMap::iterator it = table->refcnts.find(this);
- if (it != table->refcnts.end()) {
+ table.lock();
+ RefcountMap::iterator it = table.refcnts.find(this);
+ if (it != table.refcnts.end()) {
if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) {
- weak_clear_no_lock(&table->weak_table, (id)this);
+ weak_clear_no_lock(&table.weak_table, (id)this);
}
- table->refcnts.erase(it);
+ table.refcnts.erase(it);
}
- spinlock_unlock(&table->slock);
+ table.unlock();
}
objc_autoreleasePoolPop(void *ctxt)
{
if (UseGC) return;
-
- // fixme rdar://9167170
- if (!ctxt) return;
-
AutoreleasePoolPage::pop(ctxt);
}
AutoreleasePoolPage::printAll();
}
+
+// Same as objc_release but suitable for tail-calling
+// if you need the value back and don't want to push a frame before this point.
+__attribute__((noinline))
+static id
+objc_releaseAndReturn(id obj)
+{
+ objc_release(obj);
+ return obj;
+}
+
+// Same as objc_retainAutorelease but suitable for tail-calling
+// if you don't want to push a frame before this point.
+__attribute__((noinline))
+static id
+objc_retainAutoreleaseAndReturn(id obj)
+{
+ return objc_retainAutorelease(obj);
+}
+
+
+// Prepare a value at +1 for return through a +0 autoreleasing convention.
id
objc_autoreleaseReturnValue(id obj)
{
- if (fastAutoreleaseForReturn(obj)) return obj;
+ if (prepareOptimizedReturn(ReturnAtPlus1)) return obj;
return objc_autorelease(obj);
}
+// Prepare a value at +0 for return through a +0 autoreleasing convention.
id
objc_retainAutoreleaseReturnValue(id obj)
{
- return objc_autoreleaseReturnValue(objc_retain(obj));
+ if (prepareOptimizedReturn(ReturnAtPlus0)) return obj;
+
+ // not objc_autoreleaseReturnValue(objc_retain(obj))
+ // because we don't need another optimization attempt
+ return objc_retainAutoreleaseAndReturn(obj);
}
+// Accept a value returned through a +0 autoreleasing convention for use at +1.
id
objc_retainAutoreleasedReturnValue(id obj)
{
- if (fastRetainFromReturn(obj)) return obj;
+ if (acceptOptimizedReturn() == ReturnAtPlus1) return obj;
return objc_retain(obj);
}
+// Accept a value returned through a +0 autoreleasing convention for use at +0.
+id
+objc_unsafeClaimAutoreleasedReturnValue(id obj)
+{
+ if (acceptOptimizedReturn() == ReturnAtPlus0) return obj;
+
+ return objc_releaseAndReturn(obj);
+}
+
id
objc_retainAutorelease(id obj)
{
void arr_init(void)
{
AutoreleasePoolPage::init();
- SideTable::init();
+ SideTableInit();
}
@implementation NSObject
#ifndef _OBJC_LIST_H_
#define _OBJC_LIST_H_
-#if __OBJC__ && !__OBJC2__
+#if __OBJC__ && !__OBJC2__ && !__cplusplus
#include <objc/Object.h>
#include <Availability.h>
*
*************************************************************************/
-static unsigned log2u (unsigned x) { return (x<2) ? 0 : log2u (x>>1)+1; };
-
#define PTRSIZE sizeof(void *)
#if !SUPPORT_ZONES
#else
/* iff necessary this modulo can be optimized since the nbBuckets is of the form 2**n-1 */
# define BUCKETOF(table, data) (((HashBucket *)table->buckets)+((*table->prototype->hash)(table->info, data) % table->nbBuckets))
- static unsigned exp2m1 (unsigned x) { return (1 << x) - 1; };
-# define GOOD_CAPACITY(c) (exp2m1 (log2u (c)+1))
+# define GOOD_CAPACITY(c) (exp2m1u (log2u (c)+1))
# define MORE_CAPACITY(b) (b*2+1)
#endif
static char *z = NULL;
static size_t zSize = 0;
-static mutex_t lock = MUTEX_INITIALIZER;
+static mutex_t uniquerLock;
static const char *CopyIntoReadOnly (const char *str) {
size_t len = strlen (str) + 1;
return result;
}
- mutex_lock (&lock);
+ mutex_locker_t lock(uniquerLock);
if (zSize < len) {
zSize = CHUNK_SIZE *((len + CHUNK_SIZE - 1) / CHUNK_SIZE);
/* not enough room, we try to allocate. If no room left, too bad */
bcopy (str, result, len);
z += len;
zSize -= len;
- mutex_unlock (&lock);
return result;
};
const void *value;
} MapPair;
-static unsigned log2u(unsigned x) { return (x<2) ? 0 : log2u(x>>1)+1; };
-
-static INLINE unsigned exp2u(unsigned x) { return (1 << x); };
-
static INLINE unsigned xorHash(unsigned hash) {
unsigned xored = (hash & 0xffff) ^ (hash >> 16);
return ((xored * 65521) + hash);
// key DOES exist in table - use table's key for insertion
} else {
// key DOES NOT exist in table - copy the new key before insertion
- realKey = (void *)_strdup_internal((char *)key);
+ realKey = (void *)strdup((char *)key);
}
return NXMapInsert(table, realKey, value);
}
if ((realKey = NXMapMember(table, key, &realValue)) != NX_MAPNOTAKEY) {
// key DOES exist in table - remove pair and free key
realValue = NXMapRemove(table, realKey);
- _free_internal(realKey); // the key from the table, not necessarily the one given
+ free(realKey); // the key from the table, not necessarily the one given
return realValue;
} else {
// key DOES NOT exist in table - nothing to do
#if __OBJC2__
// objc_msgSendSuper2() takes the current search class, not its superclass.
OBJC_EXPORT id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+ __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_2_0);
OBJC_EXPORT void objc_msgSendSuper2_stret(struct objc_super *super, SEL op,...)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+ __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_2_0)
OBJC_ARM64_UNAVAILABLE;
// objc_msgSend_noarg() may be faster for methods with no additional arguments.
--- /dev/null
+/*
+ * Copyright (c) 2006-2007 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OBJC_ACCESSORS_H_
+#define _OBJC_ACCESSORS_H_
+
+#include <objc/objc.h>
+#include <stddef.h>
+
+__BEGIN_DECLS
+
+#if SUPPORT_GC
+
+extern void objc_setProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
+extern id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
+
+extern void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy);
+extern id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic);
+
+#endif
+
+__END_DECLS
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2006-2008 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <string.h>
+#include <stddef.h>
+
+#include <libkern/OSAtomic.h>
+
+#include "objc-private.h"
+#include "objc-auto.h"
+#include "runtime.h"
+#include "objc-accessors.h"
+
+// stub interface declarations to make compiler happy.
+
+@interface __NSCopyable
+- (id)copyWithZone:(void *)zone;
+@end
+
+@interface __NSMutableCopyable
+- (id)mutableCopyWithZone:(void *)zone;
+@end
+
+static StripedMap<spinlock_t> PropertyLocks;
+
+#define MUTABLE_COPY 2
+
+id objc_getProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
+ if (offset == 0) {
+ return object_getClass(self);
+ }
+
+ // Retain release world
+ id *slot = (id*) ((char*)self + offset);
+ if (!atomic) return *slot;
+
+ // Atomic retain release world
+ spinlock_t& slotlock = PropertyLocks[slot];
+ slotlock.lock();
+ id value = objc_retain(*slot);
+ slotlock.unlock();
+
+ // for performance, we (safely) issue the autorelease OUTSIDE of the spinlock.
+ return objc_autoreleaseReturnValue(value);
+}
+
+
+static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy) __attribute__((always_inline));
+
+static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy)
+{
+ if (offset == 0) {
+ object_setClass(self, newValue);
+ return;
+ }
+
+ id oldValue;
+ id *slot = (id*) ((char*)self + offset);
+
+ if (copy) {
+ newValue = [newValue copyWithZone:nil];
+ } else if (mutableCopy) {
+ newValue = [newValue mutableCopyWithZone:nil];
+ } else {
+ if (*slot == newValue) return;
+ newValue = objc_retain(newValue);
+ }
+
+ if (!atomic) {
+ oldValue = *slot;
+ *slot = newValue;
+ } else {
+ spinlock_t& slotlock = PropertyLocks[slot];
+ slotlock.lock();
+ oldValue = *slot;
+ *slot = newValue;
+ slotlock.unlock();
+ }
+
+ objc_release(oldValue);
+}
+
+void objc_setProperty_non_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy)
+{
+ bool copy = (shouldCopy && shouldCopy != MUTABLE_COPY);
+ bool mutableCopy = (shouldCopy == MUTABLE_COPY);
+ reallySetProperty(self, _cmd, newValue, offset, atomic, copy, mutableCopy);
+}
+
+void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset)
+{
+ reallySetProperty(self, _cmd, newValue, offset, true, false, false);
+}
+
+void objc_setProperty_nonatomic(id self, SEL _cmd, id newValue, ptrdiff_t offset)
+{
+ reallySetProperty(self, _cmd, newValue, offset, false, false, false);
+}
+
+
+void objc_setProperty_atomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset)
+{
+ reallySetProperty(self, _cmd, newValue, offset, true, true, false);
+}
+
+void objc_setProperty_nonatomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset)
+{
+ reallySetProperty(self, _cmd, newValue, offset, false, true, false);
+}
+
+
+#if SUPPORT_GC
+
+id objc_getProperty_gc(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) {
+ return *(id*) ((char*)self + offset);
+}
+
+void objc_setProperty_gc(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) {
+ if (shouldCopy) {
+ newValue = (shouldCopy == MUTABLE_COPY ? [newValue mutableCopyWithZone:nil] : [newValue copyWithZone:nil]);
+ }
+ objc_assign_ivar(newValue, self, offset);
+}
+
+// objc_getProperty and objc_setProperty are resolver functions in objc-auto.mm
+
+#else
+
+id
+objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic)
+{
+ return objc_getProperty_non_gc(self, _cmd, offset, atomic);
+}
+
+void
+objc_setProperty(id self, SEL _cmd, ptrdiff_t offset, id newValue,
+ BOOL atomic, signed char shouldCopy)
+{
+ objc_setProperty_non_gc(self, _cmd, offset, newValue, atomic, shouldCopy);
+}
+
+#endif
+
+
+// This entry point was designed wrong. When used as a getter, src needs to be locked so that
+// if simultaneously used for a setter then there would be contention on src.
+// So we need two locks - one of which will be contended.
+void objc_copyStruct(void *dest, const void *src, ptrdiff_t size, BOOL atomic, BOOL hasStrong) {
+ static StripedMap<spinlock_t> StructLocks;
+ spinlock_t *srcLock = nil;
+ spinlock_t *dstLock = nil;
+ if (atomic) {
+ srcLock = &StructLocks[src];
+ dstLock = &StructLocks[dest];
+ spinlock_t::lockTwo(srcLock, dstLock);
+ }
+#if SUPPORT_GC
+ if (UseGC && hasStrong) {
+ auto_zone_write_barrier_memmove(gc_zone, dest, src, size);
+ } else
+#endif
+ {
+ memmove(dest, src, size);
+ }
+ if (atomic) {
+ spinlock_t::unlockTwo(srcLock, dstLock);
+ }
+}
+
+void objc_copyCppObjectAtomic(void *dest, const void *src, void (*copyHelper) (void *dest, const void *source)) {
+ static StripedMap<spinlock_t> CppObjectLocks;
+ spinlock_t *srcLock = &CppObjectLocks[src];
+ spinlock_t *dstLock = &CppObjectLocks[dest];
+ spinlock_t::lockTwo(srcLock, dstLock);
+
+ // let C++ code perform the actual copy.
+ copyHelper(dest, src);
+
+ spinlock_t::unlockTwo(srcLock, dstLock);
+}
#endif
+/*
+ * OBJC_NO_GC 1: GC is not supported
+ * OBJC_NO_GC undef: GC is supported
+ *
+ * OBJC_NO_GC_API undef: Libraries must export any symbols that
+ * dual-mode code may links to.
+ * OBJC_NO_GC_API 1: Libraries need not export GC-related symbols.
+ */
+#if TARGET_OS_EMBEDDED || TARGET_OS_IPHONE || TARGET_OS_WIN32
+ /* GC is unsupported. GC API symbols are not exported. */
+# define OBJC_NO_GC 1
+# define OBJC_NO_GC_API 1
+#elif TARGET_OS_MAC && __x86_64h__
+ /* GC is unsupported. GC API symbols are exported. */
+# define OBJC_NO_GC 1
+# undef OBJC_NO_GC_API
+#else
+ /* GC is supported. */
+# undef OBJC_NO_GC
+# undef OBJC_GC_API
+#endif
+
+
+/* NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER == 1
+ * marks -[NSObject init] as a designated initializer. */
+#if !defined(NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER)
+# define NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER 1
+#endif
+
+
/* OBJC_OLD_DISPATCH_PROTOTYPES == 0 enforces the rule that the dispatch
* functions must be cast to an appropriate function pointer type. */
#if !defined(OBJC_OLD_DISPATCH_PROTOTYPES)
# endif
#endif
-#if !defined(OBJC_HIDE_64)
+/* OBJC_SWIFT_UNAVAILABLE: unavailable in Swift */
+#if !defined(OBJC_SWIFT_UNAVAILABLE)
+# if __has_feature(attribute_availability_swift)
+# define OBJC_SWIFT_UNAVAILABLE(_msg) __attribute__((availability(swift, unavailable, message=_msg)))
+# else
+# define OBJC_SWIFT_UNAVAILABLE(_msg)
+# endif
+#endif
+
/* OBJC_ARM64_UNAVAILABLE: unavailable on arm64 (i.e. stret dispatch) */
#if !defined(OBJC_ARM64_UNAVAILABLE)
# if defined(__arm64__)
# define OBJC_ARM64_UNAVAILABLE
# endif
#endif
-#endif
/* OBJC_GC_UNAVAILABLE: unavailable with -fobjc-gc or -fobjc-gc-only */
#if !defined(OBJC_GC_UNAVAILABLE)
# define OBJC_INLINE __inline
#endif
+// Declares an enum type or option bits type as appropriate for each language.
+#if (__cplusplus && __cplusplus >= 201103L && (__has_extension(cxx_strong_enums) || __has_feature(objc_fixed_enum))) || (!__cplusplus && __has_feature(objc_fixed_enum))
+#define OBJC_ENUM(_type, _name) enum _name : _type _name; enum _name : _type
+#if (__cplusplus)
+#define OBJC_OPTIONS(_type, _name) _type _name; enum : _type
+#else
+#define OBJC_OPTIONS(_type, _name) enum _name : _type _name; enum _name : _type
+#endif
+#else
+#define OBJC_ENUM(_type, _name) _type _name; enum
+#define OBJC_OPTIONS(_type, _name) _type _name; enum
+#endif
+
#endif
} pointer_set_t;
static pointer_set_t *new_pointer_set() {
- pointer_set_t *result = (pointer_set_t *)malloc_zone_malloc(_objc_internal_zone(), sizeof(pointer_set_t));
+ pointer_set_t *result = (pointer_set_t *)malloc(sizeof(pointer_set_t));
result->items = (long *)calloc(64, sizeof(long));
result->count = 0;
result->capacity = 63; // last valid ptr, also mask
long i;
set->count = 0;
set->capacity = 2*(oldCapacity+1)-1;
- set->items = (long *)malloc_zone_calloc(_objc_internal_zone(), 2*(oldCapacity+1), sizeof(long));
+ set->items = (long *)calloc(2*(oldCapacity+1), sizeof(long));
for (i = 0; i < oldCapacity; ++i)
if (oldItems[i]) pointer_set_add(set, oldItems[i]);
free(oldItems);
/*
Quickly dump heap to a named file in a pretty raw format.
*/
-BOOL _objc_dumpHeap(auto_zone_t *zone, const char *filename) {
+bool _objc_dumpHeap(auto_zone_t *zone, const char *filename) {
// just write interesting info to disk
int fd = secure_open(filename, O_WRONLY|O_CREAT, geteuid());
if (fd < 0) return NO;
#endif
-/* GC is unsupported on some architectures. */
-
-#if TARGET_OS_EMBEDDED || TARGET_OS_IPHONE || TARGET_OS_WIN32
-# define OBJC_NO_GC 1
-#endif
-
-
/* objc_collect() options */
enum {
// choose one
static OBJC_INLINE void objc_collect(unsigned long options __unused) { }
static OBJC_INLINE BOOL objc_collectingEnabled(void) { return NO; }
+#if TARGET_OS_MAC && !TARGET_OS_EMBEDDED && !TARGET_IPHONE_SIMULATOR
+static OBJC_INLINE malloc_zone_t *objc_collectableZone(void) { return nil; }
+#endif
static OBJC_INLINE void objc_setCollectionThreshold(size_t threshold __unused) { }
static OBJC_INLINE void objc_setCollectionRatio(size_t ratio __unused) { }
static OBJC_INLINE void objc_startCollectorThread(void) { }
static OBJC_INLINE id objc_assign_global(id val, id *dest)
{ return (*dest = val); }
+static OBJC_INLINE id objc_assign_threadlocal(id val, id *dest)
+ { return (*dest = val); }
+
static OBJC_INLINE id objc_assign_ivar(id val, id dest, ptrdiff_t offset)
{ return (*(id*)((char *)dest+offset) = val); }
#include "objc-private.h"
-#include "objc-config.h"
-#include "objc-auto.h"
-#include "objc-accessors.h"
-#ifndef OBJC_NO_GC
+#if OBJC_NO_GC && OBJC_NO_GC_API
+
+// No GC and no GC symbols needed. We're done here.
+
+#elif OBJC_NO_GC && !OBJC_NO_GC_API
+
+// No GC but we do need to export GC symbols.
+// These are mostly the same as the OBJC_NO_GC inline versions in objc-auto.h.
+
+OBJC_EXPORT void objc_collect(unsigned long options __unused) { }
+OBJC_EXPORT BOOL objc_collectingEnabled(void) { return NO; }
+OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold __unused) { }
+OBJC_EXPORT void objc_setCollectionRatio(size_t ratio __unused) { }
+OBJC_EXPORT void objc_startCollectorThread(void) { }
+
+#if TARGET_OS_WIN32
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation)
+ { void *original = InterlockedCompareExchangePointer((void * volatile *)objectLocation, (void *)replacement, (void *)predicate); return (original == predicate); }
+
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation)
+ { void *original = InterlockedCompareExchangePointer((void * volatile *)objectLocation, (void *)replacement, (void *)predicate); return (original == predicate); }
+#else
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation)
+ { return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); }
+
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation)
+ { return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); }
+#endif
+
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation)
+ { return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation); }
+
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation)
+ { return objc_atomicCompareAndSwapPtrBarrier(predicate, replacement, objectLocation); }
+
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation)
+ { return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation); }
+
+OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation)
+ { return objc_atomicCompareAndSwapPtrBarrier(predicate, replacement, objectLocation); }
+
+
+OBJC_EXPORT id objc_assign_strongCast(id val, id *dest)
+ { return (*dest = val); }
+
+OBJC_EXPORT id objc_assign_global(id val, id *dest)
+ { return (*dest = val); }
+
+OBJC_EXPORT id objc_assign_threadlocal(id val, id *dest)
+ { return (*dest = val); }
+
+OBJC_EXPORT id objc_assign_ivar(id val, id dest, ptrdiff_t offset)
+ { return (*(id*)((char *)dest+offset) = val); }
+
+OBJC_EXPORT id objc_read_weak(id *location)
+ { return *location; }
+
+OBJC_EXPORT id objc_assign_weak(id value, id *location)
+ { return (*location = value); }
+
+OBJC_EXPORT void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ { return memmove(dst, src, size); }
+
+OBJC_EXPORT void objc_finalizeOnMainThread(Class cls __unused) { }
+OBJC_EXPORT BOOL objc_is_finalized(void *ptr __unused) { return NO; }
+OBJC_EXPORT void objc_clear_stack(unsigned long options __unused) { }
+
+OBJC_EXPORT BOOL objc_collecting_enabled(void) { return NO; }
+OBJC_EXPORT void objc_set_collection_threshold(size_t threshold __unused) { }
+OBJC_EXPORT void objc_set_collection_ratio(size_t ratio __unused) { }
+OBJC_EXPORT void objc_start_collector_thread(void) { }
+
+OBJC_EXPORT id objc_allocate_object(Class cls, int extra)
+ { return class_createInstance(cls, extra); }
+
+OBJC_EXPORT void objc_registerThreadWithCollector() { }
+OBJC_EXPORT void objc_unregisterThreadWithCollector() { }
+OBJC_EXPORT void objc_assertRegisteredThreadWithCollector() { }
+
+OBJC_EXPORT malloc_zone_t* objc_collect_init(int(*callback)() __unused) { return nil; }
+OBJC_EXPORT void* objc_collectableZone() { return nil; }
+
+OBJC_EXPORT BOOL objc_isAuto(id object __unused) { return NO; }
+OBJC_EXPORT BOOL objc_dumpHeap(char *filename __unused, unsigned long length __unused)
+ { return NO; }
+
+// OBJC_NO_GC && !OBJC_NO_GC_API
+#else
+// !OBJC_NO_GC
+
+// Garbage collection.
#include <stdint.h>
#include <stdbool.h>
#include <dispatch/private.h>
#include "objc-private.h"
+#include "objc-config.h"
+#include "objc-accessors.h"
+#include "objc-auto.h"
#include "objc-references.h"
#include "maptable.h"
#include "message.h"
#include "objc-gdb.h"
-#if !defined(NDEBUG) && !__OBJC2__
+#if DEBUG && !__OBJC2__
#include "objc-exception.h"
#endif
static auto_zone_t *gc_zone_init(void);
static void gc_block_init(void);
static void registeredClassTableInit(void);
-static BOOL objc_isRegisteredClass(Class candidate);
+static bool objc_isRegisteredClass(Class candidate);
int8_t UseGC = -1;
-static BOOL WantsMainThreadFinalization = NO;
+static bool WantsMainThreadFinalization = NO;
auto_zone_t *gc_zone = nil;
auto_zone_foreach_object_t foreach;
auto_zone_cursor_t cursor;
size_t cursor_size;
- volatile BOOL finished;
- volatile BOOL started;
+ volatile bool finished;
+ volatile bool started;
struct BatchFinalizeBlock *next;
} BatchFinalizeBlock_t;
void objc_collect(unsigned long options) {
if (!UseGC) return;
- BOOL onMainThread = pthread_main_np() ? YES : NO;
+ bool onMainThread = pthread_main_np();
// while we're here, sneak off and do some finalization work (if any)
if (onMainThread) batchFinalizeOnMainThread();
amode |= AUTO_ZONE_COLLECT_LOCAL_COLLECTION;
}
if (options & OBJC_WAIT_UNTIL_DONE) {
- __block BOOL done = NO;
+ __block bool done = NO;
// If executing on the main thread, use the main thread work queue condition to block,
// so main thread finalization can complete. Otherwise, use a thread-local condition.
pthread_mutex_t localMutex = PTHREAD_MUTEX_INITIALIZER, *mutex = &localMutex;
size_t cursor_size,
void (*finalizeAnObject)(void *, void*))
{
-#if !defined(NDEBUG) && !__OBJC2__
+#if DEBUG && !__OBJC2__
// debug: don't call try/catch before exception handlers are installed
objc_exception_functions_t table = {};
objc_exception_get_functions(&table);
_objc_rootFinalize(self);
}
-static BOOL _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
+static bool _NSResurrectedObject_resolveInstanceMethod(id self, SEL _cmd, SEL name) {
class_addMethod((Class)self, name, (IMP)_NSResurrectedObject_instanceMethod, "@@:");
return YES;
}
-static BOOL _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
+static bool _NSResurrectedObject_resolveClassMethod(id self, SEL _cmd, SEL name) {
class_addMethod(self->ISA(), name, (IMP)_NSResurrectedObject_classMethod, "@@:");
return YES;
}
* Collection support
**********************************************************************/
-static BOOL objc_isRegisteredClass(Class candidate);
+static bool objc_isRegisteredClass(Class candidate);
static const unsigned char *objc_layout_for_address(auto_zone_t *zone, void *address) {
id object = (id)address;
}
// Always called by _objcInit, even if GC is off.
-void gc_init(BOOL wantsGC)
+void gc_init(bool wantsGC)
{
assert(UseGC == -1);
UseGC = wantsGC;
// Verify that a particular pointer is to a class.
// Safe from any thread anytime
-static BOOL objc_isRegisteredClass(Class candidate) {
+static bool objc_isRegisteredClass(Class candidate) {
assert(UseGC);
// nil is never a valid ISA.
if (candidate == nil) return NO;
}
-
-
-
+// not OBJC_NO_GC
#endif
// symbols defined in assembly files
// Don't use the symbols directly; they're thumb-biased on some ARM archs.
#define TRAMP(tramp) \
- static inline uintptr_t tramp(void) { \
+ static inline __unused uintptr_t tramp(void) { \
extern void *_##tramp; \
return ((uintptr_t)&_##tramp) & ~1UL; \
}
static inline void _lock() {
#if __OBJC2__
- rwlock_write(&runtimeLock);
+ runtimeLock.write();
#else
- mutex_lock(&classLock);
+ classLock.lock();
#endif
}
static inline void _unlock() {
#if __OBJC2__
- rwlock_unlock_write(&runtimeLock);
+ runtimeLock.unlockWrite();
#else
- mutex_unlock(&classLock);
+ classLock.unlock();
#endif
}
static inline void _assert_locked() {
#if __OBJC2__
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
#else
- mutex_assert_locked(&classLock);
+ classLock.assertLocked();
#endif
}
extern Method _cache_getMethod(Class cls, SEL sel, IMP objc_msgForward_internal_imp);
extern void flush_cache(Class cls);
-extern BOOL _cache_fill(Class cls, Method meth, SEL sel);
+extern bool _cache_fill(Class cls, Method meth, SEL sel);
extern void _cache_addForwardEntry(Class cls, SEL sel);
extern IMP _cache_addIgnoredEntry(Class cls, SEL sel);
extern void _cache_free(Cache cache);
/* Local prototypes */
-static BOOL _cache_isEmpty(Cache cache);
+static bool _cache_isEmpty(Cache cache);
static Cache _cache_malloc(uintptr_t slotCount);
static Cache _cache_create(Class cls);
static Cache _cache_expand(Class cls);
static void _cache_collect_free(void *data, size_t size);
#if defined(CACHE_ALLOCATOR)
-static BOOL cache_allocator_is_block(void *block);
+static bool cache_allocator_is_block(void *block);
static Cache cache_allocator_calloc(size_t size);
static void cache_allocator_free(void *block);
#endif
* Returns YES if the given cache is some empty cache.
* Empty caches should never be allocated on the heap.
**********************************************************************/
-static BOOL _cache_isEmpty(Cache cache)
+static bool _cache_isEmpty(Cache cache)
{
return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0);
}
Cache new_cache;
size_t size;
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// Allocate table (why not check for failure?)
size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount);
#if defined(OBJC_INSTRUMENTED)
// Custom cache allocator can't handle instrumentation.
size += sizeof(CacheInstrumentation);
- new_cache = _calloc_internal(size, 1);
+ new_cache = calloc(size, 1);
new_cache->mask = slotCount - 1;
#elif !defined(CACHE_ALLOCATOR)
// fixme cache allocator implementation isn't 64-bit clean
- new_cache = _calloc_internal(size, 1);
+ new_cache = calloc(size, 1);
new_cache->mask = (unsigned int)(slotCount - 1);
#else
- if (size < CACHE_ALLOCATOR_MIN || UseInternalZone) {
- new_cache = (Cache)_calloc_internal(size, 1);
+ if (size < CACHE_ALLOCATOR_MIN) {
+ new_cache = (Cache)calloc(size, 1);
new_cache->mask = slotCount - 1;
// occupied and buckets and instrumentation are all zero
} else {
static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); }
static void _cache_free_block(void *block)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
#if !TARGET_OS_WIN32
if (PrintCaches) {
{
unsigned int i;
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(cacheUpdateLock);
for (i = 0; i < cache->mask + 1; i++) {
cache_entry *entry = (cache_entry *)cache->buckets[i];
}
_cache_free_block(cache);
-
- mutex_unlock(&cacheUpdateLock);
}
{
Cache new_cache;
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// Allocate new cache block
new_cache = _cache_malloc(INIT_CACHE_SIZE);
uintptr_t slotCount;
uintptr_t index;
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// First growth goes from empty cache to a real one
old_cache = cls->cache;
*
* Cache locks: cacheUpdateLock must not be held.
**********************************************************************/
-BOOL _cache_fill(Class cls, Method smt, SEL sel)
+bool _cache_fill(Class cls, Method smt, SEL sel)
{
uintptr_t newOccupied;
uintptr_t index;
cache_entry *entry;
Cache cache;
- mutex_assert_unlocked(&cacheUpdateLock);
+ cacheUpdateLock.assertUnlocked();
// Never cache before +initialize is done
if (!cls->isInitialized()) {
// Keep tally of cache additions
totalCacheFills += 1;
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(cacheUpdateLock);
entry = (cache_entry *)smt;
// Don't use _cache_getMethod() because _cache_getMethod() doesn't
// return forward:: entries.
if (_cache_getImp(cls, sel)) {
- mutex_unlock(&cacheUpdateLock);
return NO; // entry is already cached, didn't add new one
}
}
buckets[index] = entry;
- mutex_unlock(&cacheUpdateLock);
-
return YES; // successfully added new cache entry
}
{
cache_entry *smt;
- smt = (cache_entry *)_malloc_internal(sizeof(cache_entry));
+ smt = (cache_entry *)malloc(sizeof(cache_entry));
smt->name = sel;
smt->imp = _objc_msgForward_impcache;
if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack
// Entry not added to cache. Don't leak the method struct.
- _free_internal(smt);
+ free(smt);
}
}
#if SUPPORT_GC && !SUPPORT_IGNORED_SELECTOR_CONSTANT
static cache_entry *alloc_ignored_entries(void)
{
- cache_entry *e = (cache_entry *)_malloc_internal(5 * sizeof(cache_entry));
+ cache_entry *e = (cache_entry *)malloc(5 * sizeof(cache_entry));
e[0] = (cache_entry){ @selector(retain), 0,(IMP)&_objc_ignored_method};
e[1] = (cache_entry){ @selector(release), 0,(IMP)&_objc_ignored_method};
e[2] = (cache_entry){ @selector(autorelease),0,(IMP)&_objc_ignored_method};
Cache cache;
unsigned int index;
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// Locate cache. Ignore unused cache.
cache = cls->cache;
void flush_cache(Class cls)
{
if (cls) {
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(cacheUpdateLock);
_cache_flush(cls);
- mutex_unlock(&cacheUpdateLock);
}
}
{
first = 0;
garbage_refs = (void**)
- _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
+ malloc(INIT_GARBAGE_COUNT * sizeof(void *));
garbage_max = INIT_GARBAGE_COUNT;
}
else if (garbage_count == garbage_max)
{
garbage_refs = (void**)
- _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
+ realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
garbage_max *= 2;
}
}
**********************************************************************/
static void _cache_collect_free(void *data, size_t size)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
_garbage_make_room ();
garbage_byte_size += size;
**********************************************************************/
void _cache_collect(bool collectALot)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// Done if the garbage is not full
if (garbage_byte_size < garbage_threshold && !collectALot) {
cache_allocator_block *b;
cache_allocator_region **rgnP;
cache_allocator_region *newRegion = (cache_allocator_region *)
- _calloc_internal(1, sizeof(cache_allocator_region));
+ calloc(1, sizeof(cache_allocator_region));
// Round size up to quantum boundary, and apply the minimum size.
size += CACHE_QUANTUM - (size % CACHE_QUANTUM);
{
cache_allocator_region *rgn;
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) {
void *p = cache_region_calloc(rgn, size);
* If ptr is a dead block from the cache allocator, result is undefined.
* Cache locks: cacheUpdateLock must be held by the caller
**********************************************************************/
-static BOOL cache_allocator_is_block(void *ptr)
+static bool cache_allocator_is_block(void *ptr)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL);
}
cache_allocator_block *cur;
cache_allocator_region *rgn;
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
if (! (rgn = cache_allocator_region_for_block(dead))) {
// free of non-pointer
/***********************************************************************
* _class_printDuplicateCacheEntries.
**********************************************************************/
-void _class_printDuplicateCacheEntries(BOOL detail)
+void _class_printDuplicateCacheEntries(bool detail)
{
NXHashState state;
Class cls;
extern IMP cache_getImp(Class cls, SEL sel);
-extern void cache_fill(Class cls, SEL sel, IMP imp);
+extern void cache_fill(Class cls, SEL sel, IMP imp, id receiver);
-extern void cache_eraseMethods(Class cls, method_list_t *mlist);
+extern void cache_erase_nolock(Class cls);
-extern void cache_eraseImp(Class cls, SEL sel, IMP imp);
-
-extern void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp);
-
-extern void cache_erase_nolock(cache_t *cache);
+extern void cache_delete(Class cls);
extern void cache_collect(bool collectALot);
INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2)
};
-static size_t log2u(size_t x)
-{
- unsigned int log;
-
- log = 0;
- while (x >>= 1)
- log += 1;
-
- return log;
-}
-
-static void cache_collect_free(struct bucket_t *data, size_t size);
+static void cache_collect_free(struct bucket_t *data, mask_t capacity);
static int _collecting_in_critical(void);
static void _garbage_make_room(void);
static size_t cache_allocations;
static size_t cache_collections;
+static void recordNewCache(mask_t capacity)
+{
+ size_t bucket = log2u(capacity);
+ if (bucket < countof(cache_counts)) {
+ cache_counts[bucket]++;
+ }
+ cache_allocations++;
+}
+
+static void recordDeadCache(mask_t capacity)
+{
+ size_t bucket = log2u(capacity);
+ if (bucket < countof(cache_counts)) {
+ cache_counts[bucket]--;
+ }
+}
/***********************************************************************
* Pointers used by compiled class objects
* These use asm to avoid conflicts with the compiler's internal declarations
**********************************************************************/
+// EMPTY_BYTES includes space for a cache end marker bucket.
+// This end marker doesn't actually have the wrap-around pointer
+// because cache scans always find an empty bucket before they might wrap.
+// 1024 buckets is fairly common.
+#if DEBUG
+ // Use a smaller size to exercise heap-allocated empty caches.
+# define EMPTY_BYTES ((8+1)*16)
+#else
+# define EMPTY_BYTES ((1024+1)*16)
+#endif
+
+#define stringize(x) #x
+#define stringize2(x) stringize(x)
+
// "cache" is cache->buckets; "vtable" is cache->mask/occupied
// hack to avoid conflicts with compiler's internal declaration
asm("\n .section __TEXT,__const"
- "\n .globl __objc_empty_cache"
-#if __LP64__
- "\n .align 3"
- "\n __objc_empty_cache: .quad 0"
-#else
- "\n .align 2"
- "\n __objc_empty_cache: .long 0"
-#endif
"\n .globl __objc_empty_vtable"
"\n .set __objc_empty_vtable, 0"
+ "\n .globl __objc_empty_cache"
+ "\n .align 3"
+ "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES)
);
-#if __arm__
+#if __arm__ || __x86_64__ || __i386__
// objc_msgSend has few registers available.
// Cache scan increments and wraps at special end-marking bucket.
#define CACHE_END_MARKER 1
return (i+1) & mask;
}
-#elif __i386__ || __x86_64__ || __arm64__
-// objc_msgSend has lots of registers and/or memory operands available.
+#elif __arm64__
+// objc_msgSend has lots of registers available.
// Cache scan decrements. No end marker needed.
#define CACHE_END_MARKER 0
static inline mask_t cache_next(mask_t i, mask_t mask) {
#endif
-// cannot mix sel-side caches with ignored selector constant
-// ignored selector constant also not implemented for class-side caches here
#if SUPPORT_IGNORED_SELECTOR_CONSTANT
-#error sorry
+#error sorry not implemented
#endif
: "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \
); } while(0)
-#elif __arm__
+#elif __arm__ || __arm64__
#define mega_barrier() \
__asm__ __volatile__( \
"dsb ish" \
: : : "memory")
-#elif __arm64__
-// Use atomic double-word updates instead.
+#else
+#error unknown architecture
+#endif
+
+#if __arm64__
+
+// Use atomic double-word instructions to update cache entries.
// This requires cache buckets not cross cache line boundaries.
-#undef mega_barrier
#define stp(onep, twop, destp) \
__asm__ ("stp %[one], %[two], [%[dest]]" \
: "=m" (((uint64_t *)(destp))[0]), \
#define ldp(onep, twop, srcp) \
__asm__ ("ldp %[one], %[two], [%[src]]" \
: [one] "=r" (onep), \
- [two] "=r" (twop), \
+ [two] "=r" (twop) \
: "m" (((uint64_t *)(srcp))[0]), \
- "m" (((uint64_t *)(srcp))[1]) \
+ "m" (((uint64_t *)(srcp))[1]), \
[src] "r" (srcp) \
: /* no clobbers */ \
)
-#else
-#error unknown architecture
#endif
return (mask_t)(key & mask);
}
-cache_t *getCache(Class cls, SEL sel __unused)
+cache_t *getCache(Class cls)
{
assert(cls);
return &cls->cache;
}
-cache_key_t getKey(Class cls __unused, SEL sel)
+cache_key_t getKey(SEL sel)
{
assert(sel);
return (cache_key_t)sel;
}
-
-
#if __arm64__
void bucket_t::set(cache_key_t newKey, IMP newImp)
stp(newKey, newImp, this);
}
-void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
-{
- // ensure other threads see buckets contents before buckets pointer
- // see Barrier Litmus Tests and Cookbook,
- // "Address Dependency with object construction"
- __sync_synchronize();
-
- // LDP/STP guarantees that all observers get
- // old mask/buckets or new mask/buckets
-
- mask_t newOccupied = 0;
- uint64_t mask_and_occupied =
- (uint64_t)newMask | ((uint64_t)newOccupied << 32);
- stp(newBuckets, mask_and_occupied, this);
-}
-
-// arm64
#else
-// not arm64
void bucket_t::set(cache_key_t newKey, IMP newImp)
{
}
}
+#endif
+
void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask)
{
// objc_msgSend uses mask and buckets with no locks.
_occupied = 0;
}
-// not arm64
-#endif
struct bucket_t *cache_t::buckets()
{
_occupied++;
}
-void cache_t::setEmpty()
+void cache_t::initializeToEmpty()
{
bzero(this, sizeof(*this));
_buckets = (bucket_t *)&_objc_empty_cache;
size_t cache_t::bytesForCapacity(uint32_t cap)
{
// fixme put end marker inline when capacity+1 malloc is inefficient
- return sizeof(cache_t) * (cap + 1);
+ return sizeof(bucket_t) * (cap + 1);
}
bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap)
// This can't overflow mask_t because newCapacity is a power of 2.
// fixme instead put the end mark inline when +1 is malloc-inefficient
bucket_t *newBuckets = (bucket_t *)
- _calloc_internal(cache_t::bytesForCapacity(newCapacity), 1);
+ calloc(cache_t::bytesForCapacity(newCapacity), 1);
bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
end->setKey((cache_key_t)(uintptr_t)1);
end->setImp((IMP)(newBuckets - 1));
#else
-# error unknown architecture
+ // End marker's key is 1 and imp points to the first bucket.
+ end->setKey((cache_key_t)(uintptr_t)1);
+ end->setImp((IMP)newBuckets);
#endif
+ if (PrintCaches) recordNewCache(newCapacity);
+
return newBuckets;
}
#else
+size_t cache_t::bytesForCapacity(uint32_t cap)
+{
+ return sizeof(bucket_t) * cap;
+}
+
bucket_t *allocateBuckets(mask_t newCapacity)
{
- return (bucket_t *)_calloc_internal(newCapacity, sizeof(bucket_t));
+ if (PrintCaches) recordNewCache(newCapacity);
+
+ return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1);
}
#endif
-bool cache_t::canBeFreed()
+bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true)
{
- return buckets() != (bucket_t *)&_objc_empty_cache;
-}
+ cacheUpdateLock.assertLocked();
+ size_t bytes = cache_t::bytesForCapacity(capacity);
-void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
-{
- if (PrintCaches) {
- size_t bucket = log2u(newCapacity);
- if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
- cache_counts[bucket]++;
+ // Use _objc_empty_cache if the buckets is small enough.
+ if (bytes <= EMPTY_BYTES) {
+ return (bucket_t *)&_objc_empty_cache;
+ }
+
+ // Use shared empty buckets allocated on the heap.
+ static bucket_t **emptyBucketsList = nil;
+ static mask_t emptyBucketsListCount = 0;
+
+ mask_t index = log2u(capacity);
+
+ if (index >= emptyBucketsListCount) {
+ if (!allocate) return nil;
+
+ mask_t newListCount = index + 1;
+ bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1);
+ emptyBucketsList = (bucket_t**)
+ realloc(emptyBucketsList, newListCount * sizeof(bucket_t *));
+ // Share newBuckets for every un-allocated size smaller than index.
+ // The array is therefore always fully populated.
+ for (mask_t i = emptyBucketsListCount; i < newListCount; i++) {
+ emptyBucketsList[i] = newBuckets;
}
- cache_allocations++;
-
- if (oldCapacity) {
- bucket = log2u(oldCapacity);
- if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) {
- cache_counts[bucket]--;
- }
+ emptyBucketsListCount = newListCount;
+
+ if (PrintCaches) {
+ _objc_inform("CACHES: new empty buckets at %p (capacity %zu)",
+ newBuckets, (size_t)capacity);
}
}
+ return emptyBucketsList[index];
+}
+
+
+bool cache_t::isConstantEmptyCache()
+{
+ return
+ occupied() == 0 &&
+ buckets() == emptyBucketsForCapacity(capacity(), false);
+}
+
+bool cache_t::canBeFreed()
+{
+ return !isConstantEmptyCache();
+}
+
+
+void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity)
+{
bool freeOld = canBeFreed();
bucket_t *oldBuckets = buckets();
setBucketsAndMask(newBuckets, newCapacity - 1);
if (freeOld) {
- cache_collect_free(oldBuckets, oldCapacity * sizeof(bucket_t));
+ cache_collect_free(oldBuckets, oldCapacity);
cache_collect(false);
}
}
-// called by objc_msgSend
-extern "C"
-void objc_msgSend_corrupt_cache_error(id receiver, SEL sel, Class isa)
-{
- cache_t::bad_cache(receiver, sel, isa);
-}
-
-extern "C"
-void cache_getImp_corrupt_cache_error(id receiver, SEL sel, Class isa)
-{
- cache_t::bad_cache(receiver, sel, isa);
-}
-
void cache_t::bad_cache(id receiver, SEL sel, Class isa)
{
// Log in separate steps in case the logging itself causes a crash.
}
-bucket_t * cache_t::find(cache_key_t k)
+bucket_t * cache_t::find(cache_key_t k, id receiver)
{
assert(k != 0);
// hack
Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache));
- cache_t::bad_cache(nil, (SEL)k, cls);
+ cache_t::bad_cache(receiver, (SEL)k, cls);
}
void cache_t::expand()
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
uint32_t oldCapacity = capacity();
uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE;
}
-static void cache_fill_nolock(Class cls, SEL sel, IMP imp)
+static void cache_fill_nolock(Class cls, SEL sel, IMP imp, id receiver)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// Never cache before +initialize is done
if (!cls->isInitialized()) return;
// before we grabbed the cacheUpdateLock.
if (cache_getImp(cls, sel)) return;
- cache_t *cache = getCache(cls, sel);
- cache_key_t key = getKey(cls, sel);
+ cache_t *cache = getCache(cls);
+ cache_key_t key = getKey(sel);
// Use the cache as-is if it is less than 3/4 full
mask_t newOccupied = cache->occupied() + 1;
- if ((newOccupied * 4) <= (cache->mask() + 1) * 3) {
- // Cache is less than 3/4 full.
- } else {
+ mask_t capacity = cache->capacity();
+ if (cache->isConstantEmptyCache()) {
+ // Cache is read-only. Replace it.
+ cache->reallocate(capacity, capacity ?: INIT_CACHE_SIZE);
+ }
+ else if (newOccupied <= capacity / 4 * 3) {
+ // Cache is less than 3/4 full. Use it as-is.
+ }
+ else {
// Cache is too full. Expand it.
cache->expand();
}
- // Scan for the first unused slot (or used for this class) and insert there
+ // Scan for the first unused slot and insert there.
// There is guaranteed to be an empty slot because the
// minimum size is 4 and we resized at 3/4 full.
- bucket_t *bucket = cache->find(key);
+ bucket_t *bucket = cache->find(key, receiver);
if (bucket->key() == 0) cache->incrementOccupied();
bucket->set(key, imp);
}
-void cache_fill(Class cls, SEL sel, IMP imp)
+void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
{
#if !DEBUG_TASK_THREADS
- mutex_lock(&cacheUpdateLock);
- cache_fill_nolock(cls, sel, imp);
- mutex_unlock(&cacheUpdateLock);
+ mutex_locker_t lock(cacheUpdateLock);
+ cache_fill_nolock(cls, sel, imp, receiver);
#else
_collecting_in_critical();
return;
}
-// Reset any entry for cls/sel to the uncached lookup
-static void cache_eraseMethod_nolock(Class cls, SEL sel)
-{
- mutex_assert_locked(&cacheUpdateLock);
-
- cache_t *cache = getCache(cls, sel);
- cache_key_t key = getKey(cls, sel);
-
- bucket_t *bucket = cache->find(key);
- if (bucket->key() == key) {
- bucket->setImp(_objc_msgSend_uncached_impcache);
- }
-}
-
-
-// Resets cache entries for all methods in mlist for cls and its subclasses.
-void cache_eraseMethods(Class cls, method_list_t *mlist)
+// Reset this entire cache to the uncached lookup by reallocating it.
+// This must not shrink the cache - that breaks the lock-free scheme.
+void cache_erase_nolock(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
- mutex_lock(&cacheUpdateLock);
-
- foreach_realized_class_and_subclass(cls, ^(Class c){
- for (uint32_t m = 0; m < mlist->count; m++) {
- SEL sel = mlist->get(m).name;
- cache_eraseMethod_nolock(c, sel);
- }
- });
+ cacheUpdateLock.assertLocked();
- mutex_unlock(&cacheUpdateLock);
-}
-
-
-// Reset any copies of imp in this cache to the uncached lookup
-void cache_eraseImp_nolock(Class cls, SEL sel, IMP imp)
-{
- mutex_assert_locked(&cacheUpdateLock);
+ cache_t *cache = getCache(cls);
- cache_t *cache = getCache(cls, sel);
+ mask_t capacity = cache->capacity();
+ if (capacity > 0 && cache->occupied() > 0) {
+ auto oldBuckets = cache->buckets();
+ auto buckets = emptyBucketsForCapacity(capacity);
+ cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied
- bucket_t *b = cache->buckets();
- mask_t count = cache->capacity();
- for (mask_t i = 0; i < count; i++) {
- if (b[i].imp() == imp) {
- b[i].setImp(_objc_msgSend_uncached_impcache);
- }
+ cache_collect_free(oldBuckets, capacity);
+ cache_collect(false);
}
}
-void cache_eraseImp(Class cls, SEL sel, IMP imp)
-{
- mutex_lock(&cacheUpdateLock);
- cache_eraseImp_nolock(cls, sel, imp);
- mutex_unlock(&cacheUpdateLock);
-}
-
-
-// Reset this entire cache to the uncached lookup by reallocating it.
-// This must not shrink the cache - that breaks the lock-free scheme.
-void cache_erase_nolock(cache_t *cache)
+void cache_delete(Class cls)
{
- mutex_assert_locked(&cacheUpdateLock);
-
- mask_t capacity = cache->capacity();
- if (capacity > 0 && cache->occupied() > 0) {
- cache->reallocate(capacity, capacity);
+ mutex_locker_t lock(cacheUpdateLock);
+ if (cls->cache.canBeFreed()) {
+ if (PrintCaches) recordDeadCache(cls->cache.capacity());
+ free(cls->cache.buckets());
}
}
{
first = 0;
garbage_refs = (bucket_t**)
- _malloc_internal(INIT_GARBAGE_COUNT * sizeof(void *));
+ malloc(INIT_GARBAGE_COUNT * sizeof(void *));
garbage_max = INIT_GARBAGE_COUNT;
}
else if (garbage_count == garbage_max)
{
garbage_refs = (bucket_t**)
- _realloc_internal(garbage_refs, garbage_max * 2 * sizeof(void *));
+ realloc(garbage_refs, garbage_max * 2 * sizeof(void *));
garbage_max *= 2;
}
}
* precisely the block's size.
* Cache locks: cacheUpdateLock must be held by the caller.
**********************************************************************/
-static void cache_collect_free(bucket_t *data, size_t size)
+static void cache_collect_free(bucket_t *data, mask_t capacity)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
+
+ if (PrintCaches) recordDeadCache(capacity);
_garbage_make_room ();
- garbage_byte_size += size;
+ garbage_byte_size += cache_t::bytesForCapacity(capacity);
garbage_refs[garbage_count++] = data;
}
**********************************************************************/
void cache_collect(bool collectALot)
{
- mutex_assert_locked(&cacheUpdateLock);
+ cacheUpdateLock.assertLocked();
// Done if the garbage is not full
if (garbage_byte_size < garbage_threshold && !collectALot) {
size_t total_count = 0;
size_t total_size = 0;
- for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) {
+ for (i = 0; i < countof(cache_counts); i++) {
int count = cache_counts[i];
int slots = 1 << i;
size_t size = count * slots * sizeof(bucket_t);
}
if (!cls->ext) {
uint32_t size = (uint32_t)sizeof(old_class_ext);
- cls->ext = (old_class_ext *)_calloc_internal(size, 1);
+ cls->ext = (old_class_ext *)calloc(size, 1);
cls->ext->size = size;
}
}
if ( mlist->obsolete == fixed_up_method_list ) {
// method list OK
} else {
- BOOL isBundle = (cls->info & CLS_FROM_BUNDLE) ? YES : NO;
+ bool isBundle = cls->info & CLS_FROM_BUNDLE;
if (!isBundle) {
old_mlist = mlist;
size = sizeof(old_method_list) - sizeof(old_method) + old_mlist->method_count * sizeof(old_method);
- mlist = (old_method_list *)_malloc_internal(size);
+ mlist = (old_method_list *)malloc(size);
memmove(mlist, old_mlist, size);
} else {
// Mach-O bundles are fixed up in place.
*
* void *iterator = nil;
* old_method_list *mlist;
-* mutex_lock(&methodListLock);
+* mutex_locker_t lock(methodListLock);
* while ((mlist = nextMethodList(cls, &iterator))) {
* // do something with mlist
* }
-* mutex_unlock(&methodListLock);
**********************************************************************/
static old_method_list *nextMethodList(Class cls,
void **it)
Method meth;
bool triedResolver = NO;
- mutex_assert_unlocked(&methodListLock);
+ methodListLock.assertUnlocked();
// Optimistic cache lookup
if (cache) {
// be added but ignored indefinitely because the cache was re-filled
// with the old value after the cache flush on behalf of the category.
retry:
- mutex_lock(&methodListLock);
+ methodListLock.lock();
// Ignore GC selectors
if (ignoreSelector(sel)) {
// No implementation found. Try method resolver once.
if (resolver && !triedResolver) {
- mutex_unlock(&methodListLock);
+ methodListLock.unlock();
_class_resolveMethod(cls, sel, inst);
triedResolver = YES;
goto retry;
methodPC = _objc_msgForward_impcache;
done:
- mutex_unlock(&methodListLock);
+ methodListLock.unlock();
// paranoia: look for ignored selectors with non-ignored implementations
assert(!(ignoreSelector(sel) && methodPC != (IMP)&_objc_ignored_method));
{
old_property_list *result = nil;
- mutex_assert_locked(&classLock);
+ classLock.assertLocked();
if (! ((cls->info & CLS_EXT) && cls->ext)) {
// No class ext
result = nil;
}
// fixme leak
- cls->ivar_layout = _ustrdup_internal(layout);
+ cls->ivar_layout = ustrdupMaybeNil(layout);
}
// SPI: Instance-specific object layout.
{
if (!cls) return;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
allocateExt(cls);
// fixme leak
- cls->ext->weak_ivar_layout = _ustrdup_internal(layout);
-
- mutex_unlock(&classLock);
+ cls->ext->weak_ivar_layout = ustrdupMaybeNil(layout);
}
* Return the ordinary class for this class or metaclass.
* Used by +initialize.
**********************************************************************/
-Class _class_getNonMetaClass(Class cls, id obj __unused)
+Class _class_getNonMetaClass(Class cls, id obj)
{
// fixme ick
if (cls->isMetaClass()) {
- if (strncmp(cls->name, "_%", 2) == 0) {
+ if (cls->info & CLS_CONSTRUCTING) {
+ // Class is under construction and isn't in the class_hash,
+ // so objc_getClass doesn't work.
+ cls = obj; // fixme this may be nil in some paths
+ }
+ else if (strncmp(cls->name, "_%", 2) == 0) {
// Posee's meta's name is smashed and isn't in the class_hash,
// so objc_getClass doesn't work.
const char *baseName = strchr(cls->name, '%'); // get posee's real name
cls = objc_getClass(baseName);
- } else {
+ }
+ else {
cls = objc_getClass(cls->name);
}
assert(cls);
**********************************************************************/
OBJC_EXPORT struct objc_method_list *class_nextMethodList(Class cls, void **it)
{
- old_method_list *result;
-
OBJC_WARN_DEPRECATED;
- mutex_lock(&methodListLock);
- result = nextMethodList(cls, it);
- mutex_unlock(&methodListLock);
- return (struct objc_method_list *)result;
+ mutex_locker_t lock(methodListLock);
+ return (struct objc_method_list *) nextMethodList(cls, it);
}
OBJC_WARN_DEPRECATED;
// Add the methods.
- mutex_lock(&methodListLock);
- _objc_insertMethods(cls, (old_method_list *)meths, nil);
- mutex_unlock(&methodListLock);
+ {
+ mutex_locker_t lock(methodListLock);
+ _objc_insertMethods(cls, (old_method_list *)meths, nil);
+ }
// Must flush when dynamically adding methods. No need to flush
// all the class method caches. If cls is a meta class, though,
OBJC_WARN_DEPRECATED;
// Remove the methods
- mutex_lock(&methodListLock);
- _objc_removeMethods(cls, (old_method_list *)meths);
- mutex_unlock(&methodListLock);
+ {
+ mutex_locker_t lock(methodListLock);
+ _objc_removeMethods(cls, (old_method_list *)meths);
+ }
// Must flush when dynamically removing methods. No need to flush
// all the class method caches. If cls is a meta class, though,
static Method _class_getMethod(Class cls, SEL sel)
{
- Method result;
-
- mutex_lock(&methodListLock);
- result = (Method)_getMethod(cls, sel);
- mutex_unlock(&methodListLock);
-
- return result;
+ mutex_locker_t lock(methodListLock);
+ return (Method)_getMethod(cls, sel);
}
static Method _class_getMethodNoSuper(Class cls, SEL sel)
{
- Method result;
-
- mutex_lock(&methodListLock);
- result = (Method)_findMethodInClass(cls, sel);
- mutex_unlock(&methodListLock);
-
- return result;
+ mutex_locker_t lock(methodListLock);
+ return (Method)_findMethodInClass(cls, sel);
}
static Method _class_getMethodNoSuper_nolock(Class cls, SEL sel)
{
- mutex_assert_locked(&methodListLock);
+ methodListLock.assertLocked();
return (Method)_findMethodInClass(cls, sel);
}
extern "C"
Class _objc_getOrigClass(const char *name)
{
- Class ret;
-
// Look for class among the posers
- ret = Nil;
- mutex_lock(&classLock);
- if (posed_class_hash)
- ret = (Class) NXMapGet (posed_class_hash, name);
- mutex_unlock(&classLock);
- if (ret)
- return ret;
+ {
+ mutex_locker_t lock(classLock);
+ if (posed_class_hash) {
+ Class cls = (Class) NXMapGet (posed_class_hash, name);
+ if (cls) return cls;
+ }
+ }
// Not a poser. Do a normal lookup.
- ret = objc_getClass (name);
- if (!ret)
- _objc_inform ("class `%s' not linked into application", name);
+ Class cls = objc_getClass (name);
+ if (cls) return cls;
- return ret;
+ _objc_inform ("class `%s' not linked into application", name);
+ return nil;
}
Class objc_getOrigClass(const char *name)
**********************************************************************/
static void _objc_addOrigClass (Class origClass)
{
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Create the poser's hash table on first use
if (!posed_class_hash)
{
- posed_class_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
- 8,
- _objc_internal_zone ());
+ posed_class_hash = NXCreateMapTable(NXStrValueMapPrototype, 8);
}
// Add the named class iff it is not already there (or collides?)
if (NXMapGet (posed_class_hash, origClass->name) == 0)
NXMapInsert (posed_class_hash, origClass->name, origClass);
-
- mutex_unlock(&classLock);
}
void change_class_references(Class imposter,
Class original,
Class copy,
- BOOL changeSuperRefs)
+ bool changeSuperRefs)
{
header_info *hInfo;
Class clsObject;
// Build a string to use to replace the name of the original class.
#if TARGET_OS_WIN32
# define imposterNamePrefix "_%"
- imposterNamePtr = _malloc_internal(strlen(original->name) + strlen(imposterNamePrefix) + 1);
+ imposterNamePtr = malloc(strlen(original->name) + strlen(imposterNamePrefix) + 1);
strcpy(imposterNamePtr, imposterNamePrefix);
strcat(imposterNamePtr, original->name);
# undef imposterNamePrefix
// its normal life in addition to changing the behavior of
// the original. As a hack we don't bother to copy the metaclass.
// For some reason we modify the original rather than the copy.
- copy = (Class)_malloc_internal(sizeof(objc_class));
+ copy = (Class)malloc(sizeof(objc_class));
memmove(copy, imposter, sizeof(objc_class));
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Remove both the imposter and the original class.
NXHashRemove (class_hash, imposter);
NXHashInsert (class_hash, imposter);
NXHashInsert (class_hash, original);
- mutex_unlock(&classLock);
-
return imposter;
}
unsigned int subclassCount;
#endif
- mutex_lock(&classLock);
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(classLock);
+ mutex_locker_t lock2(cacheUpdateLock);
// Leaf classes are fastest because there are no subclass caches to flush.
// fixme instrument
if (target->ISA() && (target->ISA()->info & CLS_LEAF)) {
_cache_flush (target->ISA());
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
return; // done
} else {
// Reset target and handle it by one of the methods below.
if (collectALot) {
_cache_collect(true);
}
-
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
}
Class supercls;
NXHashState state;
- mutex_lock(&classLock);
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(classLock);
+ mutex_locker_t lock2(cacheUpdateLock);
state = NXInitHashState(class_hash);
while (NXNextHashState(class_hash, &state, (void**)&cls)) {
cls->ISA()->clearInfo(CLS_FLUSH_CACHE);
}
}
-
- mutex_unlock(&cacheUpdateLock);
- mutex_unlock(&classLock);
}
}
-BOOL _class_hasLoadMethod(Class cls)
+bool _class_hasLoadMethod(Class cls)
{
if (cls->ISA()->info & CLS_HAS_LOAD_METHOD) return YES;
- return (_class_getLoadMethod_nocheck(cls) ? YES : NO);
+ return _class_getLoadMethod_nocheck(cls);
}
}
-static spinlock_t impLock = SPINLOCK_INITIALIZER;
+static spinlock_t impLock;
IMP method_setImplementation(Method m_gen, IMP imp)
{
return m->method_imp;
}
- spinlock_lock(&impLock);
+ impLock.lock();
old = m->method_imp;
m->method_imp = imp;
- spinlock_unlock(&impLock);
+ impLock.unlock();
return old;
}
return;
}
- spinlock_lock(&impLock);
+ impLock.lock();
m1_imp = m1->method_imp;
m1->method_imp = m2->method_imp;
m2->method_imp = m1_imp;
- spinlock_unlock(&impLock);
+ impLock.unlock();
}
return nil;
}
- objc_property_attribute_t *result;
- mutex_lock(&classLock);
- result = copyPropertyAttributeList(oldproperty(prop)->attributes,outCount);
- mutex_unlock(&classLock);
- return result;
+ mutex_locker_t lock(classLock);
+ return copyPropertyAttributeList(oldproperty(prop)->attributes,outCount);
}
char * property_copyAttributeValue(objc_property_t prop, const char *name)
{
if (!prop || !name || *name == '\0') return nil;
- char *result;
- mutex_lock(&classLock);
- result = copyPropertyAttributeValue(oldproperty(prop)->attributes, name);
- mutex_unlock(&classLock);
- return result;
+ mutex_locker_t lock(classLock);
+ return copyPropertyAttributeValue(oldproperty(prop)->attributes, name);
}
* class_addMethod
**********************************************************************/
static IMP _class_addMethod(Class cls, SEL name, IMP imp,
- const char *types, BOOL replace)
+ const char *types, bool replace)
{
old_method *m;
IMP result = nil;
if (!types) types = "";
- mutex_lock(&methodListLock);
+ mutex_locker_t lock(methodListLock);
if ((m = _findMethodInClass(cls, name))) {
// already exists
} else {
// fixme could be faster
old_method_list *mlist =
- (old_method_list *)_calloc_internal(sizeof(old_method_list), 1);
+ (old_method_list *)calloc(sizeof(old_method_list), 1);
mlist->obsolete = fixed_up_method_list;
mlist->method_count = 1;
mlist->method_list[0].method_name = name;
- mlist->method_list[0].method_types = _strdup_internal(types);
+ mlist->method_list[0].method_types = strdup(types);
if (!ignoreSelector(name)) {
mlist->method_list[0].method_imp = imp;
} else {
result = nil;
}
- mutex_unlock(&methodListLock);
-
return result;
}
if (!cls) return NO;
old = _class_addMethod(cls, name, imp, types, NO);
- return old ? NO : YES;
+ return !old;
}
BOOL class_addIvar(Class cls, const char *name, size_t size,
uint8_t alignment, const char *type)
{
- BOOL result = YES;
+ bool result = YES;
if (!cls) return NO;
if (ISMETA(cls)) return NO;
if (!type) type = "";
if (name && 0 == strcmp(name, "")) name = nil;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Check for existing ivar with this name
// fixme check superclasses?
// allocate new ivar list
cls->ivars = (old_ivar_list *)
- _calloc_internal(oldSize+sizeof(old_ivar), 1);
+ calloc(oldSize+sizeof(old_ivar), 1);
if (old) memcpy(cls->ivars, old, oldSize);
if (old && malloc_size(old)) free(old);
cls->ivars->ivar_count = newCount;
ivar = &cls->ivars->ivar_list[newCount-1];
// set ivar name and type
- ivar->ivar_name = _strdup_internal(name);
- ivar->ivar_type = _strdup_internal(type);
+ ivar->ivar_name = strdup(name);
+ ivar->ivar_type = strdup(type);
// align if necessary
alignBytes = 1 << alignment;
cls->instance_size += (long)size;
}
- mutex_unlock(&classLock);
-
return result;
}
if (!cls) return NO;
if (class_conformsToProtocol(cls, protocol_gen)) return NO;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// fixme optimize - protocol list doesn't escape?
- plist = (old_protocol_list*)_calloc_internal(sizeof(old_protocol_list), 1);
+ plist = (old_protocol_list*)calloc(sizeof(old_protocol_list), 1);
plist->count = 1;
plist->list[0] = protocol;
plist->next = cls->protocols;
// fixme metaclass?
- mutex_unlock(&classLock);
-
return YES;
}
* Used by category attachment and class_addProperty()
* Locking: acquires classLock
**********************************************************************/
-BOOL
+bool
_class_addProperties(Class cls,
old_property_list *additions)
{
if (!(cls->info & CLS_EXT)) return NO;
newlist = (old_property_list *)
- _memdup_internal(additions, sizeof(*newlist) - sizeof(newlist->first)
+ memdup(additions, sizeof(*newlist) - sizeof(newlist->first)
+ (additions->entsize * additions->count));
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
allocateExt(cls);
if (!cls->ext->propertyLists) {
else if (cls->info & CLS_NO_PROPERTY_ARRAY) {
// cls has one property list - make a new array
old_property_list **newarray = (old_property_list **)
- _malloc_internal(3 * sizeof(*newarray));
+ malloc(3 * sizeof(*newarray));
newarray[0] = newlist;
newarray[1] = (old_property_list *)cls->ext->propertyLists;
newarray[2] = nil;
int count = 0;
while (cls->ext->propertyLists[count]) count++;
newarray = (old_property_list **)
- _malloc_internal((count+2) * sizeof(*newarray));
+ malloc((count+2) * sizeof(*newarray));
newarray[0] = newlist;
memcpy(&newarray[1], &cls->ext->propertyLists[0],
count * sizeof(*newarray));
cls->ext->propertyLists = newarray;
}
- mutex_unlock(&classLock);
-
return YES;
}
* Adds a property to a class. Returns NO if the proeprty already exists.
* Locking: acquires classLock
**********************************************************************/
-static BOOL
+static bool
_class_addProperty(Class cls, const char *name,
const objc_property_attribute_t *attrs, unsigned int count,
- BOOL replace)
+ bool replace)
{
if (!cls) return NO;
if (!name) return NO;
}
else if (prop) {
// replace existing
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
try_free(prop->attributes);
prop->attributes = copyPropertyAttributeString(attrs, count);
- mutex_unlock(&classLock);
return YES;
}
else {
old_property_list proplist;
proplist.entsize = sizeof(old_property);
proplist.count = 1;
- proplist.first.name = _strdup_internal(name);
+ proplist.first.name = strdup(name);
proplist.first.attributes = copyPropertyAttributeString(attrs, count);
return _class_addProperties(cls, &proplist);
return nil;
}
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
for (plist = cls->protocols; plist != nil; plist = plist->next) {
count += (int)plist->count;
result[p] = nil;
}
- mutex_unlock(&classLock);
-
if (outCount) *outCount = count;
return result;
}
**********************************************************************/
objc_property_t class_getProperty(Class cls, const char *name)
{
- old_property *result;
if (!cls || !name) return nil;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
- for (result = nil; cls && !result; cls = cls->superclass) {
+ for (; cls; cls = cls->superclass) {
uintptr_t iterator = 0;
old_property_list *plist;
while ((plist = nextPropertyList(cls, &iterator))) {
for (i = 0; i < plist->count; i++) {
old_property *p = property_list_nth(plist, i);
if (0 == strcmp(name, p->name)) {
- result = p;
- goto done;
+ return (objc_property_t)p;
}
}
}
}
- done:
- mutex_unlock(&classLock);
-
- return (objc_property_t)result;
+ return nil;
}
return nil;
}
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
iterator = 0;
while ((plist = nextPropertyList(cls, &iterator))) {
result[p] = nil;
}
- mutex_unlock(&classLock);
-
if (outCount) *outCount = count;
return (objc_property_t *)result;
}
return nil;
}
- mutex_lock(&methodListLock);
+ mutex_locker_t lock(methodListLock);
iterator = nil;
while ((mlist = nextMethodList(cls, &iterator))) {
result[m] = nil;
}
- mutex_unlock(&methodListLock);
-
if (outCount) *outCount = count;
return result;
}
* objc_allocateClass.
**********************************************************************/
-void set_superclass(Class cls, Class supercls, BOOL cls_is_new)
+void set_superclass(Class cls, Class supercls, bool cls_is_new)
{
Class meta = cls->ISA();
set_superclass(cls, supercls, YES);
// Set basic info
- cls->name = _strdup_internal(name);
- meta->name = _strdup_internal(name);
+ cls->name = strdup(name);
+ meta->name = strdup(name);
cls->version = 0;
meta->version = 7;
cls->info = CLS_CLASS | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF;
return;
}
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Build ivar layouts
if (UseGC) {
else if (cls->ivars == nil) {
// No local ivars. Use superclass's layout.
cls->ivar_layout =
- _ustrdup_internal(cls->superclass->ivar_layout);
+ ustrdupMaybeNil(cls->superclass->ivar_layout);
}
else {
// Has local ivars. Build layout based on superclass.
// No local ivars. Use superclass's layout.
const uint8_t *weak =
class_getWeakIvarLayout(cls->superclass);
- if (weak) {
- cls->ext->weak_ivar_layout = _ustrdup_internal(weak);
- } else {
- cls->ext->weak_ivar_layout = nil;
- }
+ cls->ext->weak_ivar_layout = ustrdupMaybeNil(weak);
}
else {
// Has local ivars. Build layout based on superclass.
// No way to add weak ivars yet.
const uint8_t *weak =
class_getWeakIvarLayout(cls->superclass);
- if (weak) {
- cls->ext->weak_ivar_layout = _ustrdup_internal(weak);
- } else {
- cls->ext->weak_ivar_layout = nil;
- }
+ cls->ext->weak_ivar_layout = ustrdupMaybeNil(weak);
}
}
NXHashInsertIfAbsent(class_hash, cls);
objc_addRegisteredClass(cls);
//objc_addRegisteredClass(cls->ISA()); if we ever allocate classes from GC
-
- mutex_unlock(&classLock);
}
duplicate->info |= original->info & (CLS_EXT|CLS_NO_PROPERTY_ARRAY);
duplicate->ivar_layout = original->ivar_layout;
if (original->ext) {
- duplicate->ext = (old_class_ext *)_malloc_internal(original->ext->size);
+ duplicate->ext = (old_class_ext *)malloc(original->ext->size);
memcpy(duplicate->ext, original->ext, original->ext->size);
} else {
duplicate->ext = nil;
free(originalMethods);
}
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
NXHashInsert(class_hash, duplicate);
objc_addRegisteredClass(duplicate);
- mutex_unlock(&classLock);
return duplicate;
}
return;
}
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
NXHashRemove(class_hash, cls);
objc_removeRegisteredClass(cls);
unload_class(cls->ISA());
unload_class(cls);
- mutex_unlock(&classLock);
}
**********************************************************************/
Class object_setClass(id obj, Class cls)
{
- if (obj) return obj->changeIsa(cls);
- else return Nil;
+ if (!obj) return nil;
+
+ // Prevent a deadlock between the weak reference machinery
+ // and the +initialize machinery by ensuring that no
+ // weakly-referenced object has an un-+initialized isa.
+ // Unresolved future classes are not so protected.
+ if (!cls->isFuture() && !cls->isInitialized()) {
+ _class_initialize(_class_getNonMetaClass(cls, nil));
+ }
+
+ return obj->changeIsa(cls);
}
return nil;
}
-static BOOL is_scanned_offset(ptrdiff_t ivar_offset, const uint8_t *layout) {
+static bool is_scanned_offset(ptrdiff_t ivar_offset, const uint8_t *layout) {
ptrdiff_t index = 0, ivar_index = ivar_offset / sizeof(void*);
uint8_t byte;
while ((byte = *layout++)) {
}
BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
- BOOL resolved = msg(_class_getNonMetaClass(cls, inst),
+ bool resolved = msg(_class_getNonMetaClass(cls, inst),
SEL_resolveClassMethod, sel);
// Cache the result (good or bad) so the resolver doesn't fire next time.
}
BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend;
- BOOL resolved = msg(cls, SEL_resolveInstanceMethod, sel);
+ bool resolved = msg(cls, SEL_resolveInstanceMethod, sel);
// Cache the result (good or bad) so the resolver doesn't fire next time.
// +resolveInstanceMethod adds to self a.k.a. cls
// inst is an instance of cls or a subclass thereof, or nil if none is known.
// Non-nil inst is faster in some cases. See lookUpImpOrForward() for details.
-BOOL class_respondsToSelector_inst(Class cls, SEL sel, id inst)
+bool class_respondsToSelector_inst(Class cls, SEL sel, id inst)
{
IMP imp;
// We're not returning a callable IMP anyway.
imp = lookUpImpOrNil(cls, sel, inst,
NO/*initialize*/, YES/*cache*/, YES/*resolver*/);
- return imp ? YES : NO;
+ return bool(imp);
}
implementingClass,
sel_getName(selector));
- static spinlock_t lock = SPINLOCK_INITIALIZER;
- spinlock_lock(&lock);
+ static spinlock_t lock;
+ lock.lock();
write (objcMsgLogFD, buf, strlen(buf));
- spinlock_unlock(&lock);
+ lock.unlock();
// Tell caller to not cache the method
return false;
#endif
-/***********************************************************************
-* _malloc_internal
-* _calloc_internal
-* _realloc_internal
-* _strdup_internal
-* _strdupcat_internal
-* _memdup_internal
-* _free_internal
-* Convenience functions for the internal malloc zone.
-**********************************************************************/
-void *_malloc_internal(size_t size)
-{
- return malloc_zone_malloc(_objc_internal_zone(), size);
-}
-
-void *_calloc_internal(size_t count, size_t size)
-{
- return malloc_zone_calloc(_objc_internal_zone(), count, size);
-}
-
-void *_realloc_internal(void *ptr, size_t size)
-{
- return malloc_zone_realloc(_objc_internal_zone(), ptr, size);
-}
-
-char *_strdup_internal(const char *str)
-{
- size_t len;
- char *dup;
- if (!str) return nil;
- len = strlen(str);
- dup = (char *)malloc_zone_malloc(_objc_internal_zone(), len + 1);
- memcpy(dup, str, len + 1);
- return dup;
-}
-
-uint8_t *_ustrdup_internal(const uint8_t *str)
-{
- return (uint8_t *)_strdup_internal((char *)str);
-}
-
-// allocate a new string that concatenates s1+s2.
-char *_strdupcat_internal(const char *s1, const char *s2)
-{
- size_t len1 = strlen(s1);
- size_t len2 = strlen(s2);
- char *dup = (char *)
- malloc_zone_malloc(_objc_internal_zone(), len1 + len2 + 1);
- memcpy(dup, s1, len1);
- memcpy(dup + len1, s2, len2 + 1);
- return dup;
-}
-
-void *_memdup_internal(const void *mem, size_t len)
-{
- void *dup = malloc_zone_malloc(_objc_internal_zone(), len);
- memcpy(dup, mem, len);
- return dup;
-}
-
-void _free_internal(void *ptr)
-{
- malloc_zone_free(_objc_internal_zone(), ptr);
-}
-
-size_t _malloc_size_internal(void *ptr)
-{
- malloc_zone_t *zone = _objc_internal_zone();
- return zone->size(zone, ptr);
-}
-
Class _calloc_class(size_t size)
{
#if SUPPORT_GC
if (UseGC) return (Class) malloc_zone_calloc(gc_zone, 1, size);
#endif
- return (Class) _calloc_internal(1, size);
+ return (Class) calloc(1, size);
}
Class class_getSuperclass(Class cls)
unsigned int i;
if (count == 0) return strdup("");
-#ifndef NDEBUG
+#if DEBUG
// debug build: sanitize input
for (i = 0; i < count; i++) {
assert(attrs[i].name);
*/
static unsigned int
iteratePropertyAttributes(const char *attrs,
- BOOL (*fn)(unsigned int index,
+ bool (*fn)(unsigned int index,
void *ctx1, void *ctx2,
const char *name, size_t nlen,
const char *value, size_t vlen),
{
if (!attrs) return 0;
-#ifndef NDEBUG
+#if DEBUG
const char *attrsend = attrs + strlen(attrs);
#endif
unsigned int attrcount = 0;
valueStart = start;
valueEnd = end;
- BOOL more = (*fn)(attrcount, ctx1, ctx2,
+ bool more = (*fn)(attrcount, ctx1, ctx2,
nameStart, nameEnd-nameStart,
valueStart, valueEnd-valueStart);
attrcount++;
}
-static BOOL
+static bool
copyOneAttribute(unsigned int index, void *ctxa, void *ctxs,
const char *name, size_t nlen, const char *value, size_t vlen)
{
}
-static BOOL
+static bool
findOneAttribute(unsigned int index, void *ctxa, void *ctxs,
const char *name, size_t nlen, const char *value, size_t vlen)
{
#include <TargetConditionals.h>
+// Avoid the !NDEBUG double negative.
+#if !NDEBUG
+# define DEBUG 1
+#else
+# define DEBUG 0
+#endif
+
// Define SUPPORT_GC=1 to enable garbage collection.
-// Be sure to edit OBJC_NO_GC in objc-auto.h as well.
-#if TARGET_OS_EMBEDDED || TARGET_OS_IPHONE || TARGET_OS_WIN32
+// Be sure to edit OBJC_NO_GC and OBJC_NO_GC_API in objc-api.h as well.
+#if TARGET_OS_EMBEDDED || TARGET_OS_IPHONE || TARGET_OS_WIN32 || (TARGET_OS_MAC && __x86_64h__)
# define SUPPORT_GC 0
#else
# define SUPPORT_GC 1
#endif
// Define SUPPORT_NONPOINTER_ISA=1 to enable extra data in the isa field.
-#if !__LP64__ || TARGET_OS_WIN32 || TARGET_IPHONE_SIMULATOR || __x86_64__
+#if !__LP64__ || TARGET_OS_WIN32 || TARGET_IPHONE_SIMULATOR
# define SUPPORT_NONPOINTER_ISA 0
#else
# define SUPPORT_NONPOINTER_ISA 1
// Define SUPPORT_ZEROCOST_EXCEPTIONS to use "zero-cost" exceptions for OBJC2.
// Be sure to edit objc-exception.h as well (objc_add/removeExceptionHandler)
-#if !__OBJC2__ || defined(__arm__)
+#if !__OBJC2__ || (defined(__arm__) && __USING_SJLJ_EXCEPTIONS__)
# define SUPPORT_ZEROCOST_EXCEPTIONS 0
#else
# define SUPPORT_ZEROCOST_EXCEPTIONS 1
// OPTION(var, env, help)
OPTION( PrintImages, OBJC_PRINT_IMAGES, "log image and library names as they are loaded")
+OPTION( PrintImageTimes, OBJC_PRINT_IMAGE_TIMES, "measure duration of image loading steps")
OPTION( PrintLoading, OBJC_PRINT_LOAD_METHODS, "log calls to class and category +load methods")
OPTION( PrintInitializing, OBJC_PRINT_INITIALIZE_METHODS, "log calls to class +initialize methods")
OPTION( PrintResolving, OBJC_PRINT_RESOLVED_METHODS, "log methods created by +resolveClassMethod: and +resolveInstanceMethod:")
OPTION( DebugNonFragileIvars, OBJC_DEBUG_NONFRAGILE_IVARS, "capriciously rearrange non-fragile ivars")
OPTION( DebugAltHandlers, OBJC_DEBUG_ALT_HANDLERS, "record more info about bad alt handler use")
OPTION( DebugMissingPools, OBJC_DEBUG_MISSING_POOLS, "warn about autorelease with no pool in place, which may be a leak")
+OPTION( DebugPoolAllocation, OBJC_DEBUG_POOL_ALLOCATION, "halt when autorelease pools are popped out of order, and allow heap debuggers to track autorelease pools")
OPTION( DebugDuplicateClasses, OBJC_DEBUG_DUPLICATE_CLASSES, "halt when multiple classes with the same name are present")
-OPTION( UseInternalZone, OBJC_USE_INTERNAL_ZONE, "allocate runtime data in a dedicated malloc zone")
-
OPTION( DisableGC, OBJC_DISABLE_GC, "force GC OFF, even if the executable wants it on")
OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch")
OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache")
static void _objc_trap(void) __attribute__((noreturn));
+// Return true if c is a UTF8 continuation byte
+static bool isUTF8Continuation(char c)
+{
+ return (c & 0xc0) == 0x80; // continuation byte is 0b10xxxxxx
+}
+
// Add "message" to any forthcoming crash log.
static void _objc_crashlog(const char *message)
{
}
#endif
- static mutex_t crashlog_lock = MUTEX_INITIALIZER;
- mutex_lock(&crashlog_lock);
+ static mutex_t crashlog_lock;
+ mutex_locker_t lock(crashlog_lock);
char *oldmsg = (char *)CRGetCrashLogMessage();
+ size_t oldlen;
+ const size_t limit = 8000;
if (!oldmsg) {
newmsg = strdup(message);
+ } else if ((oldlen = strlen(oldmsg)) > limit) {
+ // limit total length by dropping old contents
+ char *truncmsg = oldmsg + oldlen - limit;
+ // advance past partial UTF-8 bytes
+ while (isUTF8Continuation(*truncmsg)) truncmsg++;
+ asprintf(&newmsg, "... %s\n%s", truncmsg, message);
} else {
asprintf(&newmsg, "%s\n%s", oldmsg, message);
}
if (oldmsg) free(oldmsg);
CRSetCrashLogMessage(newmsg);
}
-
- mutex_unlock(&crashlog_lock);
}
// Returns true if logs should be sent to stderr as well as syslog.
int ret = fstat(STDERR_FILENO, &st);
if (ret < 0) return false;
mode_t m = st.st_mode & S_IFMT;
- if (m == S_IFREG || m == S_IFSOCK) return true;
- if (!(m == S_IFIFO || m == S_IFCHR)) return false;
-
- // if it could be a pipe back to launchd, fail
- int64_t val = 0;
- vproc_swap_integer(NULL, VPROC_GSK_IS_MANAGED, NULL, &val);
- if (val) return false;
-
- return true;
+ if (m == S_IFREG || m == S_IFSOCK || m == S_IFIFO || m == S_IFCHR) {
+ return true;
+ }
+ return false;
}
// Print "message" to the console.
struct _Unwind_Context;
typedef int _Unwind_Action;
-static const _Unwind_Action _UA_SEARCH_PHASE = 1;
-static const _Unwind_Action _UA_CLEANUP_PHASE = 2;
-static const _Unwind_Action _UA_HANDLER_FRAME = 4;
-static const _Unwind_Action _UA_FORCE_UNWIND = 8;
+enum : _Unwind_Action {
+ _UA_SEARCH_PHASE = 1,
+ _UA_CLEANUP_PHASE = 2,
+ _UA_HANDLER_FRAME = 4,
+ _UA_FORCE_UNWIND = 8
+};
typedef int _Unwind_Reason_Code;
-static const _Unwind_Reason_Code _URC_NO_REASON = 0;
-static const _Unwind_Reason_Code _URC_FOREIGN_EXCEPTION_CAUGHT = 1;
-static const _Unwind_Reason_Code _URC_FATAL_PHASE2_ERROR = 2;
-static const _Unwind_Reason_Code _URC_FATAL_PHASE1_ERROR = 3;
-static const _Unwind_Reason_Code _URC_NORMAL_STOP = 4;
-static const _Unwind_Reason_Code _URC_END_OF_STACK = 5;
-static const _Unwind_Reason_Code _URC_HANDLER_FOUND = 6;
-static const _Unwind_Reason_Code _URC_INSTALL_CONTEXT = 7;
-static const _Unwind_Reason_Code _URC_CONTINUE_UNWIND = 8;
+enum : _Unwind_Reason_Code {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8
+};
struct dwarf_eh_bases
{
struct _Unwind_Exception *exceptionObject,
struct _Unwind_Context *context)
{
- BOOL unwinding = ((actions & _UA_CLEANUP_PHASE) ||
+ bool unwinding = ((actions & _UA_CLEANUP_PHASE) ||
(actions & _UA_FORCE_UNWIND));
if (PrintExceptions) {
else {
// Record all ranges with the same landing pad as our match.
frame->ips = (frame_ips *)
- _malloc_internal((range_count + 1) * sizeof(frame->ips[0]));
+ malloc((range_count + 1) * sizeof(frame->ips[0]));
unsigned int r = 0;
p = call_site_table;
while (p < call_site_table_end) {
struct alt_handler_list *next_DEBUGONLY;
};
-static pthread_mutex_t DebugLock = PTHREAD_MUTEX_INITIALIZER;
+static mutex_t DebugLock;
static struct alt_handler_list *DebugLists;
static uintptr_t DebugCounter;
void alt_handler_error(uintptr_t token) __attribute__((noinline));
static struct alt_handler_list *
-fetch_handler_list(BOOL create)
+fetch_handler_list(bool create)
{
_objc_pthread_data *data = _objc_fetch_pthread_data(create);
if (!data) return nil;
struct alt_handler_list *list = data->handlerList;
if (!list) {
if (!create) return nil;
- list = (struct alt_handler_list *)_calloc_internal(1, sizeof(*list));
+ list = (struct alt_handler_list *)calloc(1, sizeof(*list));
data->handlerList = list;
if (DebugAltHandlers) {
// Save this list so the debug code can find it from other threads
- pthread_mutex_lock(&DebugLock);
+ mutex_locker_t lock(DebugLock);
list->next_DEBUGONLY = DebugLists;
DebugLists = list;
- pthread_mutex_unlock(&DebugLock);
}
}
if (list) {
if (DebugAltHandlers) {
// Detach from the list-of-lists.
- pthread_mutex_lock(&DebugLock);
+ mutex_locker_t lock(DebugLock);
struct alt_handler_list **listp = &DebugLists;
while (*listp && *listp != list) listp = &(*listp)->next_DEBUGONLY;
if (*listp) *listp = (*listp)->next_DEBUGONLY;
- pthread_mutex_unlock(&DebugLock);
}
if (list->handlers) {
for (unsigned int i = 0; i < list->allocated; i++) {
if (list->handlers[i].frame.ips) {
- _free_internal(list->handlers[i].frame.ips);
+ free(list->handlers[i].frame.ips);
}
}
- _free_internal(list->handlers);
+ free(list->handlers);
}
- _free_internal(list);
+ free(list);
}
}
if (list->used == list->allocated) {
list->allocated = list->allocated*2 ?: 4;
list->handlers = (struct alt_handler_data *)
- _realloc_internal(list->handlers,
+ realloc(list->handlers,
list->allocated * sizeof(list->handlers[0]));
bzero(&list->handlers[list->used], (list->allocated - list->used) * sizeof(list->handlers[0]));
i = list->used;
if (DebugAltHandlers) {
// Record backtrace in case this handler is misused later.
- pthread_mutex_lock(&DebugLock);
+ mutex_locker_t lock(DebugLock);
token = DebugCounter++;
if (token == 0) token = DebugCounter++;
if (!data->debug) {
data->debug = (struct alt_handler_debug *)
- _calloc_internal(sizeof(*data->debug), 1);
+ calloc(sizeof(*data->debug), 1);
} else {
bzero(data->debug, sizeof(*data->debug));
}
data->debug->backtraceSize =
backtrace(data->debug->backtrace, BACKTRACE_COUNT);
data->debug->token = token;
-
- pthread_mutex_unlock(&DebugLock);
}
if (PrintAltHandlers) {
(void *)data->frame.ip_end, (void *)data->frame.cfa);
}
- if (data->debug) _free_internal(data->debug);
- if (data->frame.ips) _free_internal(data->frame.ips);
+ if (data->debug) free(data->debug);
+ if (data->frame.ips) free(data->frame.ips);
bzero(data, sizeof(*data));
list->used--;
}
objc_alt_handler_error();
}
- pthread_mutex_lock(&DebugLock);
+ DebugLock.lock();
// Search other threads' alt handler lists for this handler.
struct alt_handler_list *list;
for (i = 0; i < data->debug->backtraceSize; i++){
len += 4 + strlen(symbols[i]) + 1;
}
- symbolString = (char *)_calloc_internal(len, 1);
+ symbolString = (char *)calloc(len, 1);
for (i = 0; i < data->debug->backtraceSize; i++){
strcat(symbolString, " ");
strcat(symbolString, symbols[i]);
"Thread '%s': Dispatch queue: '%s': \n%s",
data->debug->thread, data->debug->queue, symbolString);
- pthread_mutex_unlock(&DebugLock);
- _free_internal(symbolString);
+ DebugLock.unlock();
+ free(symbolString);
objc_alt_handler_error();
}
}
}
- pthread_mutex_lock(&DebugLock);
+ DebugLock.unlock();
// not found
_objc_inform_now_and_on_crash
(void *)copy.frame.cfa);
}
if (copy.fn) (*copy.fn)(nil, copy.context);
- if (copy.frame.ips) _free_internal(copy.frame.ips);
+ if (copy.frame.ips) free(copy.frame.ips);
}
}
}
return NULL;
}
-BOOL
+bool
_hasObjcContents(const header_info *hi)
{
// Look for an __OBJC,* section other than __OBJC,__image_info
#include "objc-runtime-new.h"
-
-__BEGIN_DECLS
-
// classref_t is not fixed up at launch; use remapClass() to convert
extern SEL *_getObjc2SelectorRefs(const header_info *hi, size_t *count);
extern category_t **_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count);
extern protocol_t **_getObjc2ProtocolList(const header_info *hi, size_t *count);
extern protocol_t **_getObjc2ProtocolRefs(const header_info *hi, size_t *count);
+using Initializer = void(*)(void);
+extern Initializer* getLibobjcInitializers(const header_info *hi, size_t *count);
-__END_DECLS
+extern classref_t *_getObjc2NonlazyClassList(const headerType *mhdr, size_t *count);
+extern category_t **_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count);
+extern Initializer* getLibobjcInitializers(const headerType *mhdr, size_t *count);
#endif
#include "objc-private.h"
#include "objc-file.h"
+// Segment and section names are 16 bytes and may be un-terminated.
+bool segnameEquals(const char *lhs, const char *rhs) {
+ return 0 == strncmp(lhs, rhs, 16);
+}
+
+bool segnameStartsWith(const char *segname, const char *prefix) {
+ return 0 == strncmp(segname, prefix, strlen(prefix));
+}
+
+bool sectnameEquals(const char *lhs, const char *rhs) {
+ return segnameEquals(lhs, rhs);
+}
+
+bool sectnameStartsWith(const char *sectname, const char *prefix) {
+ return segnameStartsWith(sectname, prefix);
+}
+
+
+// Look for a __DATA or __DATA_CONST or __DATA_DIRTY section
+// with the given name that stores an array of T.
+template <typename T>
+T* getDataSection(const headerType *mhdr, const char *sectname,
+ size_t *outBytes, size_t *outCount)
+{
+ unsigned long byteCount = 0;
+ T* data = (T*)getsectiondata(mhdr, "__DATA", sectname, &byteCount);
+ if (!data) {
+ data = (T*)getsectiondata(mhdr, "__DATA_CONST", sectname, &byteCount);
+ }
+ if (!data) {
+ data = (T*)getsectiondata(mhdr, "__DATA_DIRTY", sectname, &byteCount);
+ }
+ if (outBytes) *outBytes = byteCount;
+ if (outCount) *outCount = byteCount / sizeof(T);
+ return data;
+}
+
#define GETSECT(name, type, sectname) \
- type *name(const header_info *hi, size_t *outCount) \
- { \
- unsigned long byteCount = 0; \
- type *data = (type *) \
- getsectiondata(hi->mhdr, SEG_DATA, sectname, &byteCount); \
- *outCount = byteCount / sizeof(type); \
- return data; \
+ type *name(const headerType *mhdr, size_t *outCount) { \
+ return getDataSection<type>(mhdr, sectname, nil, outCount); \
+ } \
+ type *name(const header_info *hi, size_t *outCount) { \
+ return getDataSection<type>(hi->mhdr, sectname, nil, outCount); \
}
// function name content type section name
GETSECT(_getObjc2SelectorRefs, SEL, "__objc_selrefs");
GETSECT(_getObjc2MessageRefs, message_ref_t, "__objc_msgrefs");
-GETSECT(_getObjc2ClassRefs, Class, "__objc_classrefs");
-GETSECT(_getObjc2SuperRefs, Class, "__objc_superrefs");
-GETSECT(_getObjc2ClassList, classref_t, "__objc_classlist");
-GETSECT(_getObjc2NonlazyClassList, classref_t, "__objc_nlclslist");
+GETSECT(_getObjc2ClassRefs, Class, "__objc_classrefs");
+GETSECT(_getObjc2SuperRefs, Class, "__objc_superrefs");
+GETSECT(_getObjc2ClassList, classref_t, "__objc_classlist");
+GETSECT(_getObjc2NonlazyClassList, classref_t, "__objc_nlclslist");
GETSECT(_getObjc2CategoryList, category_t *, "__objc_catlist");
GETSECT(_getObjc2NonlazyCategoryList, category_t *, "__objc_nlcatlist");
GETSECT(_getObjc2ProtocolList, protocol_t *, "__objc_protolist");
GETSECT(_getObjc2ProtocolRefs, protocol_t *, "__objc_protorefs");
+GETSECT(getLibobjcInitializers, Initializer, "__objc_init_func");
objc_image_info *
_getObjcImageInfo(const headerType *mhdr, size_t *outBytes)
{
- unsigned long byteCount = 0;
- objc_image_info *data = (objc_image_info *)
- getsectiondata(mhdr, SEG_DATA, "__objc_imageinfo", &byteCount);
- *outBytes = byteCount;
- return data;
+ return getDataSection<objc_image_info>(mhdr, "__objc_imageinfo",
+ outBytes, nil);
}
static const segmentType *
-getsegbynamefromheader(const headerType *head, const char *segname)
+getsegbynamefromheader(const headerType *mhdr, const char *segname)
{
- const segmentType *sgp;
- unsigned long i;
-
- sgp = (const segmentType *) (head + 1);
- for (i = 0; i < head->ncmds; i++){
- if (sgp->cmd == SEGMENT_CMD) {
- if (strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0) {
- return sgp;
- }
+ const segmentType *seg = (const segmentType *) (mhdr + 1);
+ for (unsigned long i = 0; i < mhdr->ncmds; i++){
+ if (seg->cmd == SEGMENT_CMD && segnameEquals(seg->segname, segname)) {
+ return seg;
}
- sgp = (const segmentType *)((char *)sgp + sgp->cmdsize);
+ seg = (const segmentType *)((char *)seg + seg->cmdsize);
}
- return NULL;
+ return nil;
}
-BOOL
-_hasObjcContents(const header_info *hi)
+// Look for an __objc* section other than __objc_imageinfo
+static bool segmentHasObjcContents(const segmentType *seg)
{
- // Look for a __DATA,__objc* section other than __DATA,__objc_imageinfo
- const segmentType *seg = getsegbynamefromheader(hi->mhdr, "__DATA");
- if (!seg) return NO;
-
- const sectionType *sect;
- uint32_t i;
- for (i = 0; i < seg->nsects; i++) {
- sect = ((const sectionType *)(seg+1))+i;
- if (0 == strncmp(sect->sectname, "__objc_", 7) &&
- 0 != strncmp(sect->sectname, "__objc_imageinfo", 16))
- {
- return YES;
+ if (seg) {
+ for (uint32_t i = 0; i < seg->nsects; i++) {
+ const sectionType *sect = ((const sectionType *)(seg+1))+i;
+ if (sectnameStartsWith(sect->sectname, "__objc_") &&
+ !sectnameEquals(sect->sectname, "__objc_imageinfo"))
+ {
+ return true;
+ }
}
}
- return NO;
+ return false;
+}
+
+// Look for an __objc* section other than __objc_imageinfo
+bool
+_hasObjcContents(const header_info *hi)
+{
+ const segmentType *data =
+ getsegbynamefromheader(hi->mhdr, "__DATA");
+ const segmentType *data_const =
+ getsegbynamefromheader(hi->mhdr, "__DATA_CONST");
+ const segmentType *data_dirty =
+ getsegbynamefromheader(hi->mhdr, "__DATA_CONST");
+
+ return segmentHasObjcContents(data)
+ || segmentHasObjcContents(data_const)
+ || segmentHasObjcContents(data_dirty);
}
#endif
/* classInitLock protects CLS_INITIALIZED and CLS_INITIALIZING, and
* is signalled when any class is done initializing.
* Threads that are waiting for a class to finish initializing wait on this. */
-static monitor_t classInitLock = MONITOR_INITIALIZER;
+static monitor_t classInitLock;
/***********************************************************************
return nil;
} else {
list = (_objc_initializing_classes *)
- _calloc_internal(1, sizeof(_objc_initializing_classes));
+ calloc(1, sizeof(_objc_initializing_classes));
data->initializingClasses = list;
}
}
// Allow 4 simultaneous class inits on this thread before realloc.
list->classesAllocated = 4;
classes = (Class *)
- _calloc_internal(list->classesAllocated, sizeof(Class));
+ calloc(list->classesAllocated, sizeof(Class));
list->metaclasses = classes;
}
return list;
{
if (list != nil) {
if (list->metaclasses != nil) {
- _free_internal(list->metaclasses);
+ free(list->metaclasses);
}
- _free_internal(list);
+ free(list);
}
}
// class list is full - reallocate
list->classesAllocated = list->classesAllocated * 2 + 1;
list->metaclasses = (Class *)
- _realloc_internal(list->metaclasses,
+ realloc(list->metaclasses,
list->classesAllocated * sizeof(Class));
// zero out the new entries
list->metaclasses[i++] = cls;
{
PendingInitialize *pending;
- monitor_assert_locked(&classInitLock);
+ classInitLock.assertLocked();
assert(!supercls || supercls->isInitialized());
if (PrintInitializing) {
// mark this class as fully +initialized
cls->setInitialized();
- monitor_notifyAll(&classInitLock);
+ classInitLock.notifyAll();
_setThisThreadIsNotInitializingClass(cls);
// mark any subclasses that were merely waiting for this class
while (pending) {
PendingInitialize *next = pending->next;
if (pending->subclass) _finishInitializing(pending->subclass, cls);
- _free_internal(pending);
+ free(pending);
pending = next;
}
}
{
PendingInitialize *pending;
- monitor_assert_locked(&classInitLock);
+ classInitLock.assertLocked();
if (PrintInitializing) {
_objc_inform("INITIALIZE: %s waiting for superclass +[%s initialize]",
if (!pendingInitializeMap) {
pendingInitializeMap =
- NXCreateMapTableFromZone(NXPtrValueMapPrototype,
- 10, _objc_internal_zone());
+ NXCreateMapTable(NXPtrValueMapPrototype, 10);
// fixme pre-size this table for CF/NSObject +initialize
}
- pending = (PendingInitialize *)_malloc_internal(sizeof(*pending));
+ pending = (PendingInitialize *)malloc(sizeof(*pending));
pending->subclass = cls;
pending->next = (PendingInitialize *)
NXMapGet(pendingInitializeMap, supercls);
/***********************************************************************
* class_initialize. Send the '+initialize' message on demand to any
* uninitialized class. Force initialization of superclasses first.
-*
-* Called only from _class_lookupMethodAndLoadCache (or itself).
**********************************************************************/
void _class_initialize(Class cls)
{
}
// Try to atomically set CLS_INITIALIZING.
- monitor_enter(&classInitLock);
- if (!cls->isInitialized() && !cls->isInitializing()) {
- cls->setInitializing();
- reallyInitialize = YES;
+ {
+ monitor_locker_t lock(classInitLock);
+ if (!cls->isInitialized() && !cls->isInitializing()) {
+ cls->setInitializing();
+ reallyInitialize = YES;
+ }
}
- monitor_exit(&classInitLock);
if (reallyInitialize) {
// We successfully set the CLS_INITIALIZING bit. Initialize the class.
// the info bits and notify waiting threads.
// If not, update them later. (This can happen if this +initialize
// was itself triggered from inside a superclass +initialize.)
-
- monitor_enter(&classInitLock);
+ monitor_locker_t lock(classInitLock);
if (!supercls || supercls->isInitialized()) {
_finishInitializing(cls, supercls);
} else {
_finishInitializingAfter(cls, supercls);
}
- monitor_exit(&classInitLock);
return;
}
if (_thisThreadIsInitializingClass(cls)) {
return;
} else {
- monitor_enter(&classInitLock);
+ monitor_locker_t lock(classInitLock);
while (!cls->isInitialized()) {
- monitor_wait(&classInitLock);
+ classInitLock.wait();
}
- monitor_exit(&classInitLock);
return;
}
}
__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0);
#endif
+#ifndef OBJC_NO_GC
// GC startup callback from Foundation
OBJC_EXPORT malloc_zone_t *objc_collect_init(int (*callback)(void))
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_NA);
+#endif
// Plainly-implemented GC barriers. Rosetta used to use these.
OBJC_EXPORT id objc_assign_strongCast_generic(id value, id *dest)
__asm__("_objc_autorelease")
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
-// wraps objc_autorelease(obj) in a useful way when used with return values
+// Prepare a value at +1 for return through a +0 autoreleasing convention.
OBJC_EXPORT
id
objc_autoreleaseReturnValue(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
-// wraps objc_autorelease(objc_retain(obj)) in a useful way when used with return values
+// Prepare a value at +0 for return through a +0 autoreleasing convention.
OBJC_EXPORT
id
objc_retainAutoreleaseReturnValue(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
-// called ONLY by ARR by callers to undo the autorelease (if possible), otherwise objc_retain
+// Accept a value returned through a +0 autoreleasing convention for use at +1.
OBJC_EXPORT
id
objc_retainAutoreleasedReturnValue(id obj)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+// Accept a value returned through a +0 autoreleasing convention for use at +0.
+OBJC_EXPORT
+id
+objc_unsafeClaimAutoreleasedReturnValue(id obj)
+ __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0);
+
OBJC_EXPORT
void
objc_storeStrong(id *location, id obj)
OBJC_EXPORT
id
-objc_initWeak(id *addr, id val)
+objc_initWeak(id *location, id val)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+// Like objc_storeWeak, but stores nil if the new object is deallocating
+// or the new object's class does not support weak references.
+// Returns the value stored (either the new object or nil).
+OBJC_EXPORT
+id
+objc_storeWeakOrNil(id *location, id obj)
+ __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0);
+
+// Like objc_initWeak, but stores nil if the new object is deallocating
+// or the new object's class does not support weak references.
+// Returns the value stored (either the new object or nil).
+OBJC_EXPORT
+id
+objc_initWeakOrNil(id *location, id val)
+ __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0);
+
OBJC_EXPORT
void
-objc_destroyWeak(id *addr)
+objc_destroyWeak(id *location)
__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
OBJC_EXPORT
* Allocates and returns a compressed string matching the given layout bitmap.
**********************************************************************/
static unsigned char *
-compress_layout(const uint8_t *bits, size_t bitmap_bits, BOOL weak)
+compress_layout(const uint8_t *bits, size_t bitmap_bits, bool weak)
{
- BOOL all_set = YES;
- BOOL none_set = YES;
+ bool all_set = YES;
+ bool none_set = YES;
unsigned char *result;
// overallocate a lot; reallocate at correct size later
unsigned char * const layout = (unsigned char *)
- _calloc_internal(bitmap_bits + 1, 1);
+ calloc(bitmap_bits + 1, 1);
unsigned char *l = layout;
size_t i = 0;
} else if (all_set && !weak) {
result = NULL; // NULL ivar layout means all-scanned
} else {
- result = (unsigned char *)_strdup_internal((char *)layout);
+ result = (unsigned char *)strdup((char *)layout);
}
- _free_internal(layout);
+ free(layout);
return result;
}
layout_bitmap
layout_bitmap_create(const unsigned char *layout_string,
size_t layoutStringInstanceSize,
- size_t instanceSize, BOOL weak)
+ size_t instanceSize, bool weak)
{
layout_bitmap result;
size_t words = instanceSize / sizeof(id);
result.weak = weak;
result.bitCount = words;
result.bitsAllocated = words;
- result.bits = (uint8_t *)_calloc_internal((words+7)/8, 1);
+ result.bits = (uint8_t *)calloc((words+7)/8, 1);
if (!layout_string) {
if (!weak) {
* The returned bitmap must be freed with layout_bitmap_free().
**********************************************************************/
layout_bitmap
-layout_bitmap_create_empty(size_t instanceSize, BOOL weak)
+layout_bitmap_create_empty(size_t instanceSize, bool weak)
{
layout_bitmap result;
size_t words = instanceSize / sizeof(id);
result.weak = weak;
result.bitCount = words;
result.bitsAllocated = words;
- result.bits = (uint8_t *)_calloc_internal((words+7)/8, 1);
+ result.bits = (uint8_t *)calloc((words+7)/8, 1);
return result;
}
void
layout_bitmap_free(layout_bitmap bits)
{
- if (bits.bits) _free_internal(bits.bits);
+ if (bits.bits) free(bits.bits);
}
const unsigned char *
const unsigned char *result =
compress_layout(bits.bits, bits.bitCount, bits.weak);
-#ifndef NDEBUG
+#if DEBUG
// paranoia: cycle to bitmap and back to string again, and compare
layout_bitmap check = layout_bitmap_create(result, bits.bitCount*sizeof(id),
bits.bitCount*sizeof(id), bits.weak);
size_t newAllocated = bits->bitsAllocated * 2;
if (newAllocated < newCount) newAllocated = newCount;
bits->bits = (uint8_t *)
- _realloc_internal(bits->bits, (newAllocated+7) / 8);
+ realloc(bits->bits, (newAllocated+7) / 8);
bits->bitsAllocated = newAllocated;
}
assert(bits->bitsAllocated >= bits->bitCount);
* dst must be at least as long as src.
* Returns YES if any of dst's bits were changed.
**********************************************************************/
-BOOL
+bool
layout_bitmap_splat(layout_bitmap dst, layout_bitmap src,
size_t oldSrcInstanceSize)
{
- BOOL changed;
+ bool changed;
size_t oldSrcBitCount;
size_t bit;
* dst must be at least as long as src.
* Returns YES if any of dst's bits were changed.
**********************************************************************/
-BOOL
+bool
layout_bitmap_or(layout_bitmap dst, layout_bitmap src, const char *msg)
{
- BOOL changed = NO;
+ bool changed = NO;
size_t bit;
if (dst.bitCount < src.bitCount) {
* dst must be at least as long as src.
* Returns YES if any of dst's bits were changed.
**********************************************************************/
-BOOL
+bool
layout_bitmap_clear(layout_bitmap dst, layout_bitmap src, const char *msg)
{
- BOOL changed = NO;
+ bool changed = NO;
size_t bit;
if (dst.bitCount < src.bitCount) {
*
**********************************************************************/
static char *scan_ivar_type_for_layout(char *type, long offset, long bits_size, unsigned char *bits, long *next_offset);
-static char *scan_basic_ivar_type(char *type, long *size, long *alignment, BOOL *is_reference) {
+static char *scan_basic_ivar_type(char *type, long *size, long *alignment, bool *is_reference) {
// assume it is a non-reference type
*is_reference = NO;
static char *scan_ivar_type_for_layout(char *type, long offset, long bits_size, unsigned char *bits, long *next_offset) {
long size; // size of a basic type
long alignment; // alignment of the basic type
- BOOL is_reference; // true if the type indicates a reference to a garbage collected object
+ bool is_reference; // true if the type indicates a reference to a garbage collected object
// get the first character
char ch = *type;
{
IMP method;
- recursive_mutex_assert_locked(&loadMethodLock);
+ loadMethodLock.assertLocked();
method = cls->getLoadMethod();
if (!method) return; // Don't bother if cls has no +load method
if (loadable_classes_used == loadable_classes_allocated) {
loadable_classes_allocated = loadable_classes_allocated*2 + 16;
loadable_classes = (struct loadable_class *)
- _realloc_internal(loadable_classes,
+ realloc(loadable_classes,
loadable_classes_allocated *
sizeof(struct loadable_class));
}
{
IMP method;
- recursive_mutex_assert_locked(&loadMethodLock);
+ loadMethodLock.assertLocked();
method = _category_getLoadMethod(cat);
if (loadable_categories_used == loadable_categories_allocated) {
loadable_categories_allocated = loadable_categories_allocated*2 + 16;
loadable_categories = (struct loadable_category *)
- _realloc_internal(loadable_categories,
+ realloc(loadable_categories,
loadable_categories_allocated *
sizeof(struct loadable_category));
}
**********************************************************************/
void remove_class_from_loadable_list(Class cls)
{
- recursive_mutex_assert_locked(&loadMethodLock);
+ loadMethodLock.assertLocked();
if (loadable_classes) {
int i;
**********************************************************************/
void remove_category_from_loadable_list(Category cat)
{
- recursive_mutex_assert_locked(&loadMethodLock);
+ loadMethodLock.assertLocked();
if (loadable_categories) {
int i;
}
// Destroy the detached list.
- if (classes) _free_internal(classes);
+ if (classes) free(classes);
}
*
* Called only by call_load_methods().
**********************************************************************/
-static BOOL call_category_loads(void)
+static bool call_category_loads(void)
{
int i, shift;
- BOOL new_categories_added = NO;
+ bool new_categories_added = NO;
// Detach current loadable list.
struct loadable_category *cats = loadable_categories;
if (used == allocated) {
allocated = allocated*2 + 16;
cats = (struct loadable_category *)
- _realloc_internal(cats, allocated *
+ realloc(cats, allocated *
sizeof(struct loadable_category));
}
cats[used++] = loadable_categories[i];
}
// Destroy the new list.
- if (loadable_categories) _free_internal(loadable_categories);
+ if (loadable_categories) free(loadable_categories);
// Reattach the (now augmented) detached list.
// But if there's nothing left to load, destroy the list.
loadable_categories_used = used;
loadable_categories_allocated = allocated;
} else {
- if (cats) _free_internal(cats);
+ if (cats) free(cats);
loadable_categories = nil;
loadable_categories_used = 0;
loadable_categories_allocated = 0;
**********************************************************************/
void call_load_methods(void)
{
- static BOOL loading = NO;
- BOOL more_categories;
+ static bool loading = NO;
+ bool more_categories;
- recursive_mutex_assert_locked(&loadMethodLock);
+ loadMethodLock.assertLocked();
// Re-entrant calls do nothing; the outermost call will finish the job.
if (loading) return;
--- /dev/null
+/*
+ * Copyright (c) 2015 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+extern void lockdebug_mutex_lock(mutex_tt<true> *lock);
+extern void lockdebug_mutex_try_lock(mutex_tt<true> *lock);
+extern void lockdebug_mutex_unlock(mutex_tt<true> *lock);
+extern void lockdebug_mutex_assert_locked(mutex_tt<true> *lock);
+extern void lockdebug_mutex_assert_unlocked(mutex_tt<true> *lock);
+
+static inline void lockdebug_mutex_lock(mutex_tt<false> *lock) { }
+static inline void lockdebug_mutex_try_lock(mutex_tt<false> *lock) { }
+static inline void lockdebug_mutex_unlock(mutex_tt<false> *lock) { }
+static inline void lockdebug_mutex_assert_locked(mutex_tt<false> *lock) { }
+static inline void lockdebug_mutex_assert_unlocked(mutex_tt<false> *lock) { }
+
+
+extern void lockdebug_monitor_enter(monitor_tt<true> *lock);
+extern void lockdebug_monitor_leave(monitor_tt<true> *lock);
+extern void lockdebug_monitor_wait(monitor_tt<true> *lock);
+extern void lockdebug_monitor_assert_locked(monitor_tt<true> *lock);
+extern void lockdebug_monitor_assert_unlocked(monitor_tt<true> *lock);
+
+static inline void lockdebug_monitor_enter(monitor_tt<false> *lock) { }
+static inline void lockdebug_monitor_leave(monitor_tt<false> *lock) { }
+static inline void lockdebug_monitor_wait(monitor_tt<false> *lock) { }
+static inline void lockdebug_monitor_assert_locked(monitor_tt<false> *lock) { }
+static inline void lockdebug_monitor_assert_unlocked(monitor_tt<false> *lock) {}
+
+
+extern void
+lockdebug_recursive_mutex_lock(recursive_mutex_tt<true> *lock);
+extern void
+lockdebug_recursive_mutex_unlock(recursive_mutex_tt<true> *lock);
+extern void
+lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<true> *lock);
+extern void
+lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock);
+
+static inline void
+lockdebug_recursive_mutex_lock(recursive_mutex_tt<false> *lock) { }
+static inline void
+lockdebug_recursive_mutex_unlock(recursive_mutex_tt<false> *lock) { }
+static inline void
+lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<false> *lock) { }
+static inline void
+lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<false> *lock) { }
+
+
+extern void lockdebug_rwlock_read(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_try_read_success(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_unlock_read(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_write(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_try_write_success(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_unlock_write(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_assert_reading(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_assert_writing(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_assert_locked(rwlock_tt<true> *lock);
+extern void lockdebug_rwlock_assert_unlocked(rwlock_tt<true> *lock);
+
+static inline void lockdebug_rwlock_read(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_try_read_success(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_unlock_read(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_write(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_try_write_success(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_unlock_write(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_assert_reading(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_assert_writing(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_assert_locked(rwlock_tt<false> *) { }
+static inline void lockdebug_rwlock_assert_unlocked(rwlock_tt<false> *) { }
#include "objc-private.h"
-#if !defined(NDEBUG) && !TARGET_OS_WIN32
+#if DEBUG && !TARGET_OS_WIN32
/***********************************************************************
* Recording - per-thread list of mutexes and monitors held
{
_objc_lock_list *locks = (_objc_lock_list *)value;
// fixme complain about any still-held locks?
- if (locks) _free_internal(locks);
+ if (locks) free(locks);
}
static struct _objc_lock_list *
if (!create) {
return NULL;
} else {
- locks = (_objc_lock_list *)_calloc_internal(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
+ locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
locks->allocated = 16;
locks->used = 0;
tls_set(lock_tls, locks);
return locks;
} else {
_objc_lock_list *oldlocks = locks;
- locks = (_objc_lock_list *)_calloc_internal(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
+ locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
locks->used = oldlocks->used;
locks->allocated = oldlocks->used * 2;
memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount));
tls_set(lock_tls, locks);
- _free_internal(oldlocks);
+ free(oldlocks);
}
}
* Mutex checking
**********************************************************************/
-int
-_mutex_lock_debug(mutex_t *lock, const char *name)
+void
+lockdebug_mutex_lock(mutex_t *lock)
{
_objc_lock_list *locks = getLocks(YES);
if (hasLock(locks, lock, MUTEX)) {
- _objc_fatal("deadlock: relocking mutex %s\n", name+1);
+ _objc_fatal("deadlock: relocking mutex");
}
setLock(locks, lock, MUTEX);
-
- return _mutex_lock_nodebug(lock);
}
-int
-_mutex_try_lock_debug(mutex_t *lock, const char *name)
+// try-lock success is the only case with lockdebug effects.
+// try-lock when already locked is OK (will fail)
+// try-lock failure does nothing.
+void
+lockdebug_mutex_try_lock_success(mutex_t *lock)
{
_objc_lock_list *locks = getLocks(YES);
-
- // attempting to relock in try_lock is OK
- int result = _mutex_try_lock_nodebug(lock);
-
- if (result) {
- setLock(locks, lock, MUTEX);
- }
-
- return result;
+ setLock(locks, lock, MUTEX);
}
-int
-_mutex_unlock_debug(mutex_t *lock, const char *name)
+void
+lockdebug_mutex_unlock(mutex_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, MUTEX)) {
- _objc_fatal("unlocking unowned mutex %s\n", name+1);
+ _objc_fatal("unlocking unowned mutex");
}
clearLock(locks, lock, MUTEX);
-
- return _mutex_unlock_nodebug(lock);
}
+
void
-_mutex_assert_locked_debug(mutex_t *lock, const char *name)
+lockdebug_mutex_assert_locked(mutex_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, MUTEX)) {
- _objc_fatal("mutex %s incorrectly not held\n",name+1);
+ _objc_fatal("mutex incorrectly not locked");
}
}
-
void
-_mutex_assert_unlocked_debug(mutex_t *lock, const char *name)
+lockdebug_mutex_assert_unlocked(mutex_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (hasLock(locks, lock, MUTEX)) {
- _objc_fatal("mutex %s incorrectly held\n", name+1);
+ _objc_fatal("mutex incorrectly locked");
}
}
* Recursive mutex checking
**********************************************************************/
-int
-_recursive_mutex_lock_debug(recursive_mutex_t *lock, const char *name)
+void
+lockdebug_recursive_mutex_lock(recursive_mutex_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(YES);
-
setLock(locks, lock, RECURSIVE);
-
- return _recursive_mutex_lock_nodebug(lock);
}
-int
-_recursive_mutex_try_lock_debug(recursive_mutex_t *lock, const char *name)
-{
- _objc_lock_list *locks = getLocks(YES);
-
- int result = _recursive_mutex_try_lock_nodebug(lock);
-
- if (result) {
- setLock(locks, lock, RECURSIVE);
- }
-
- return result;
-}
-
-int
-_recursive_mutex_unlock_debug(recursive_mutex_t *lock, const char *name)
+void
+lockdebug_recursive_mutex_unlock(recursive_mutex_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, RECURSIVE)) {
- _objc_fatal("unlocking unowned recursive mutex %s\n", name+1);
+ _objc_fatal("unlocking unowned recursive mutex");
}
clearLock(locks, lock, RECURSIVE);
-
- return _recursive_mutex_unlock_nodebug(lock);
}
+
void
-_recursive_mutex_assert_locked_debug(recursive_mutex_t *lock, const char *name)
+lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, RECURSIVE)) {
- _objc_fatal("recursive mutex %s incorrectly not held\n",name+1);
+ _objc_fatal("recursive mutex incorrectly not locked");
}
}
-
void
-_recursive_mutex_assert_unlocked_debug(recursive_mutex_t *lock, const char *name)
+lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (hasLock(locks, lock, RECURSIVE)) {
- _objc_fatal("recursive mutex %s incorrectly held\n", name+1);
+ _objc_fatal("recursive mutex incorrectly locked");
}
}
* Monitor checking
**********************************************************************/
-int
-_monitor_enter_debug(monitor_t *lock, const char *name)
+void
+lockdebug_monitor_enter(monitor_t *lock)
{
_objc_lock_list *locks = getLocks(YES);
if (hasLock(locks, lock, MONITOR)) {
- _objc_fatal("deadlock: relocking monitor %s\n", name+1);
+ _objc_fatal("deadlock: relocking monitor");
}
setLock(locks, lock, MONITOR);
-
- return _monitor_enter_nodebug(lock);
}
-int
-_monitor_exit_debug(monitor_t *lock, const char *name)
+void
+lockdebug_monitor_leave(monitor_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, MONITOR)) {
- _objc_fatal("unlocking unowned monitor%s\n", name+1);
+ _objc_fatal("unlocking unowned monitor");
}
clearLock(locks, lock, MONITOR);
-
- return _monitor_exit_nodebug(lock);
}
-int
-_monitor_wait_debug(monitor_t *lock, const char *name)
+void
+lockdebug_monitor_wait(monitor_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, MONITOR)) {
- _objc_fatal("waiting in unowned monitor%s\n", name+1);
+ _objc_fatal("waiting in unowned monitor");
}
-
- return _monitor_wait_nodebug(lock);
}
+
void
-_monitor_assert_locked_debug(monitor_t *lock, const char *name)
+lockdebug_monitor_assert_locked(monitor_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, MONITOR)) {
- _objc_fatal("monitor %s incorrectly not held\n",name+1);
+ _objc_fatal("monitor incorrectly not locked");
}
}
void
-_monitor_assert_unlocked_debug(monitor_t *lock, const char *name)
+lockdebug_monitor_assert_unlocked(monitor_t *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (hasLock(locks, lock, MONITOR)) {
- _objc_fatal("monitor %s incorrectly held\n", name+1);
+ _objc_fatal("monitor incorrectly held");
}
}
* rwlock checking
**********************************************************************/
-void
-_rwlock_read_debug(rwlock_t *lock, const char *name)
+void
+lockdebug_rwlock_read(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(YES);
if (hasLock(locks, lock, RDLOCK)) {
// Recursive rwlock read is bad (may deadlock vs pending writer)
- _objc_fatal("recursive rwlock read %s\n", name+1);
+ _objc_fatal("recursive rwlock read");
}
if (hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("deadlock: read after write for rwlock %s\n", name+1);
+ _objc_fatal("deadlock: read after write for rwlock");
}
setLock(locks, lock, RDLOCK);
-
- _rwlock_read_nodebug(lock);
}
-int
-_rwlock_try_read_debug(rwlock_t *lock, const char *name)
+// try-read success is the only case with lockdebug effects.
+// try-read when already reading is OK (won't deadlock)
+// try-read when already writing is OK (will fail)
+// try-read failure does nothing.
+void
+lockdebug_rwlock_try_read_success(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(YES);
-
- // try-read when already reading is OK (won't deadlock against writer)
- // try-read when already writing is OK (will fail)
- int result = _rwlock_try_read_nodebug(lock);
-
- if (result) {
- setLock(locks, lock, RDLOCK);
- }
-
- return result;
+ setLock(locks, lock, RDLOCK);
}
void
-_rwlock_unlock_read_debug(rwlock_t *lock, const char *name)
+lockdebug_rwlock_unlock_read(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, RDLOCK)) {
- _objc_fatal("un-reading unowned rwlock %s\n", name+1);
+ _objc_fatal("un-reading unowned rwlock");
}
clearLock(locks, lock, RDLOCK);
-
- _rwlock_unlock_read_nodebug(lock);
}
-void
-_rwlock_write_debug(rwlock_t *lock, const char *name)
+
+void
+lockdebug_rwlock_write(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(YES);
if (hasLock(locks, lock, RDLOCK)) {
// Lock promotion not allowed (may deadlock)
- _objc_fatal("deadlock: write after read for rwlock %s\n", name+1);
+ _objc_fatal("deadlock: write after read for rwlock");
}
if (hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("recursive rwlock write %s\n", name+1);
+ _objc_fatal("recursive rwlock write");
}
setLock(locks, lock, WRLOCK);
-
- _rwlock_write_nodebug(lock);
}
-
-int
-_rwlock_try_write_debug(rwlock_t *lock, const char *name)
+// try-write success is the only case with lockdebug effects.
+// try-write when already reading is OK (will fail)
+// try-write when already writing is OK (will fail)
+// try-write failure does nothing.
+void
+lockdebug_rwlock_try_write_success(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(YES);
-
- // try-write when already reading is OK (will fail)
- // try-write when already writing is OK (will fail)
- int result = _rwlock_try_write_nodebug(lock);
-
- if (result) {
- setLock(locks, lock, WRLOCK);
- }
-
- return result;
+ setLock(locks, lock, WRLOCK);
}
void
-_rwlock_unlock_write_debug(rwlock_t *lock, const char *name)
+lockdebug_rwlock_unlock_write(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("un-writing unowned rwlock %s\n", name+1);
+ _objc_fatal("un-writing unowned rwlock");
}
clearLock(locks, lock, WRLOCK);
-
- _rwlock_unlock_write_nodebug(lock);
}
void
-_rwlock_assert_reading_debug(rwlock_t *lock, const char *name)
+lockdebug_rwlock_assert_reading(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, RDLOCK)) {
- _objc_fatal("rwlock %s incorrectly not reading\n", name+1);
+ _objc_fatal("rwlock incorrectly not reading");
}
}
void
-_rwlock_assert_writing_debug(rwlock_t *lock, const char *name)
+lockdebug_rwlock_assert_writing(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("rwlock %s incorrectly not writing\n", name+1);
+ _objc_fatal("rwlock incorrectly not writing");
}
}
void
-_rwlock_assert_locked_debug(rwlock_t *lock, const char *name)
+lockdebug_rwlock_assert_locked(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("rwlock %s incorrectly neither reading nor writing\n",
- name+1);
+ _objc_fatal("rwlock incorrectly neither reading nor writing");
}
}
void
-_rwlock_assert_unlocked_debug(rwlock_t *lock, const char *name)
+lockdebug_rwlock_assert_unlocked(rwlock_tt<true> *lock)
{
_objc_lock_list *locks = getLocks(NO);
if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
- _objc_fatal("rwlock %s incorrectly not unlocked\n", name+1);
+ _objc_fatal("rwlock incorrectly not unlocked");
}
}
#include "objc-private.h"
-static ALWAYS_INLINE bool fastAutoreleaseForReturn(id obj);
-static ALWAYS_INLINE bool fastRetainFromReturn(id obj);
+
+enum ReturnDisposition : bool {
+ ReturnAtPlus0 = false, ReturnAtPlus1 = true
+};
+
+static ALWAYS_INLINE
+bool prepareOptimizedReturn(ReturnDisposition disposition);
#if SUPPORT_TAGGED_POINTERS
inline Class
objc_object::changeIsa(Class newCls)
{
+ // This is almost always rue but there are
+ // enough edge cases that we can't assert it.
+ // assert(newCls->isFuture() ||
+ // newCls->isInitializing() || newCls->isInitialized());
+
assert(!isTaggedPointer());
isa_t oldisa;
transcribeToSideTable = false;
oldisa = LoadExclusive(&isa.bits);
if ((oldisa.bits == 0 || oldisa.indexed) &&
- newCls->canAllocIndexed())
+ !newCls->isFuture() && newCls->canAllocIndexed())
{
// 0 -> indexed
// indexed -> indexed
objc_object::clearDeallocating()
{
if (!isa.indexed) {
+ // Slow path for raw pointer isa.
sidetable_clearDeallocating();
}
- else if (isa.weakly_referenced) {
- clearDeallocating_weak();
+ else if (isa.weakly_referenced || isa.has_sidetable_rc) {
+ // Slow path for non-pointer isa with weak refs and/or side table data.
+ clearDeallocating_slow();
}
assert(!sidetable_present());
if (isa.indexed &&
!isa.weakly_referenced &&
!isa.has_assoc &&
- !isa.has_cxx_dtor)
+ !isa.has_cxx_dtor &&
+ !isa.has_sidetable_rc)
{
assert(!sidetable_present());
free(this);
if (!handleUnderflow) {
return rootRelease_underflow(performDealloc);
}
- // Add some retain counts inline and prepare
- // to remove them from the side table.
- if (!sideTableLocked) sidetable_lock();
- sideTableLocked = true;
- newisa.extra_rc = RC_HALF - 1; // redo the decrement
- if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry;
-
- // Remove the retain counts from the side table.
- bool zeroed = sidetable_subExtraRC_nolock(RC_HALF);
- if (zeroed) {
- // Side table count is now zero. Clear the marker bit.
- do {
- oldisa = LoadExclusive(&isa.bits);
- newisa.has_sidetable_rc = false;
- } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits));
+
+ // Transfer retain count from side table to inline storage.
+
+ if (!sideTableLocked) {
+ sidetable_lock();
+ sideTableLocked = true;
+ if (!isa.indexed) {
+ // Lost a race vs the indexed -> not indexed transition
+ // before we got the side table lock. Stop now to avoid
+ // breaking the safety checks in the sidetable ExtraRC code.
+ goto unindexed;
+ }
}
- // Decrement successful after borrowing from side table.
- // This decrement cannot be the deallocating decrement - the side
- // table lock and has_sidetable_rc bit ensure that if everyone
- // else tried to -release while we worked, the last one would block.
- sidetable_unlock();
- return false;
+ // Try to remove some retain counts from the side table.
+ size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF);
+
+ // To avoid races, has_sidetable_rc must remain set
+ // even if the side table count is now zero.
+
+ if (borrowed > 0) {
+ // Side table retain count decreased.
+ // Try to add them to the inline count.
+ newisa.extra_rc = borrowed - 1; // redo the original decrement too
+ bool stored = StoreExclusive(&isa.bits, oldisa.bits, newisa.bits);
+ if (!stored) {
+ // Inline update failed.
+ // Try it again right now. This prevents livelock on LL/SC
+ // architectures where the side table access itself may have
+ // dropped the reservation.
+ isa_t oldisa2 = LoadExclusive(&isa.bits);
+ isa_t newisa2 = oldisa2;
+ if (newisa2.indexed) {
+ uintptr_t overflow;
+ newisa2.bits =
+ addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow);
+ if (!overflow) {
+ stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits,
+ newisa2.bits);
+ }
+ }
+ }
+
+ if (!stored) {
+ // Inline update failed.
+ // Put the retains back in the side table.
+ sidetable_addExtraRC_nolock(borrowed);
+ goto retry;
+ }
+
+ // Decrement successful after borrowing from side table.
+ // This decrement cannot be the deallocating decrement - the side
+ // table lock and has_sidetable_rc bit ensure that if everyone
+ // else tried to -release while we worked, the last one would block.
+ sidetable_unlock();
+ return false;
+ }
+ else {
+ // Side table is empty after all. Fall-through to the dealloc path.
+ }
}
// Really deallocate.
assert(!UseGC);
if (isTaggedPointer()) return (id)this;
- if (fastAutoreleaseForReturn((id)this)) return (id)this;
+ if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
return rootAutorelease2();
}
inline Class
objc_object::changeIsa(Class cls)
{
+ // This is almost always rue but there are
+ // enough edge cases that we can't assert it.
+ // assert(cls->isFuture() ||
+ // cls->isInitializing() || cls->isInitialized());
+
assert(!isTaggedPointer());
isa_t oldisa, newisa;
assert(!UseGC);
if (isTaggedPointer()) return (id)this;
- if (fastAutoreleaseForReturn((id)this)) return (id)this;
+ if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this;
return rootAutorelease2();
}
#if SUPPORT_RETURN_AUTORELEASE
/***********************************************************************
- Fast handling of returned autoreleased values.
+ Fast handling of return through Cocoa's +0 autoreleasing convention.
The caller and callee cooperate to keep the returned object
- out of the autorelease pool.
-
- Caller:
- ret = callee();
- objc_retainAutoreleasedReturnValue(ret);
- // use ret here
-
- Callee:
- // compute ret
- [ret retain];
- return objc_autoreleaseReturnValue(ret);
-
- objc_autoreleaseReturnValue() examines the caller's instructions following
- the return. If the caller's instructions immediately call
- objc_autoreleaseReturnValue, then the callee omits the -autorelease and saves
- the result in thread-local storage. If the caller does not look like it
- cooperates, then the callee calls -autorelease as usual.
-
- objc_autoreleaseReturnValue checks if the returned value is the same as the
- one in thread-local storage. If it is, the value is used directly. If not,
- the value is assumed to be truly autoreleased and is retained again. In
- either case, the caller now has a retained reference to the value.
-
- Tagged pointer objects do participate in the fast autorelease scheme,
+ out of the autorelease pool and eliminate redundant retain/release pairs.
+
+ An optimized callee looks at the caller's instructions following the
+ return. If the caller's instructions are also optimized then the callee
+ skips all retain count operations: no autorelease, no retain/autorelease.
+ Instead it saves the result's current retain count (+0 or +1) in
+ thread-local storage. If the caller does not look optimized then
+ the callee performs autorelease or retain/autorelease as usual.
+
+ An optimized caller looks at the thread-local storage. If the result
+ is set then it performs any retain or release needed to change the
+ result from the retain count left by the callee to the retain count
+ desired by the caller. Otherwise the caller assumes the result is
+ currently at +0 from an unoptimized callee and performs any retain
+ needed for that case.
+
+ There are two optimized callees:
+ objc_autoreleaseReturnValue
+ result is currently +1. The unoptimized path autoreleases it.
+ objc_retainAutoreleaseReturnValue
+ result is currently +0. The unoptimized path retains and autoreleases it.
+
+ There are two optimized callers:
+ objc_retainAutoreleasedReturnValue
+ caller wants the value at +1. The unoptimized path retains it.
+ objc_unsafeClaimAutoreleasedReturnValue
+ caller wants the value at +0 unsafely. The unoptimized path does nothing.
+
+ Example:
+
+ Callee:
+ // compute ret at +1
+ return objc_autoreleaseReturnValue(ret);
+
+ Caller:
+ ret = callee();
+ ret = objc_retainAutoreleasedReturnValue(ret);
+ // use ret at +1 here
+
+ Callee sees the optimized caller, sets TLS, and leaves the result at +1.
+ Caller sees the TLS, clears it, and accepts the result at +1 as-is.
+
+ The callee's recognition of the optimized caller is architecture-dependent.
+ i386 and x86_64: Callee looks for `mov rax, rdi` followed by a call or
+ jump instruction to objc_retainAutoreleasedReturnValue or
+ objc_unsafeClaimAutoreleasedReturnValue.
+ armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register).
+ arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register).
+
+ Tagged pointer objects do participate in the optimized return scheme,
because it saves message sends. They are not entered in the autorelease
- pool in the slow case.
+ pool in the unoptimized case.
**********************************************************************/
# if __x86_64__
static ALWAYS_INLINE bool
-callerAcceptsFastAutorelease(const void * const ra0)
+callerAcceptsOptimizedReturn(const void * const ra0)
{
const uint8_t *ra1 = (const uint8_t *)ra0;
const uint16_t *ra2;
#endif
ra1 += 6l + (long)*(const int32_t *)(ra1 + 2);
sym = (const void **)ra1;
- if (*sym != objc_retainAutoreleasedReturnValue)
+ if (*sym != objc_retainAutoreleasedReturnValue &&
+ *sym != objc_unsafeClaimAutoreleasedReturnValue)
{
return false;
}
# elif __arm__
static ALWAYS_INLINE bool
-callerAcceptsFastAutorelease(const void *ra)
+callerAcceptsOptimizedReturn(const void *ra)
{
// if the low bit is set, we're returning to thumb mode
if ((uintptr_t)ra & 1) {
# elif __arm64__
static ALWAYS_INLINE bool
-callerAcceptsFastAutorelease(const void *ra)
+callerAcceptsOptimizedReturn(const void *ra)
{
// fd 03 1d aa mov fp, fp
if (*(uint32_t *)ra == 0xaa1d03fd) {
# elif __i386__ && TARGET_IPHONE_SIMULATOR
static inline bool
-callerAcceptsFastAutorelease(const void *ra)
+callerAcceptsOptimizedReturn(const void *ra)
{
return false;
}
#warning unknown architecture
static ALWAYS_INLINE bool
-callerAcceptsFastAutorelease(const void *ra)
+callerAcceptsOptimizedReturn(const void *ra)
{
return false;
}
# endif
-static ALWAYS_INLINE
-bool fastAutoreleaseForReturn(id obj)
+static ALWAYS_INLINE ReturnDisposition
+getReturnDisposition()
{
- assert(tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY) == nil);
+ return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY);
+}
- if (callerAcceptsFastAutorelease(__builtin_return_address(0))) {
- tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, obj);
- return true;
- }
- return false;
+static ALWAYS_INLINE void
+setReturnDisposition(ReturnDisposition disposition)
+{
+ tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition);
}
-static ALWAYS_INLINE
-bool fastRetainFromReturn(id obj)
+// Try to prepare for optimized return with the given disposition (+0 or +1).
+// Returns true if the optimized path is successful.
+// Otherwise the return value must be retained and/or autoreleased as usual.
+static ALWAYS_INLINE bool
+prepareOptimizedReturn(ReturnDisposition disposition)
{
- if (obj == tls_get_direct(AUTORELEASE_POOL_RECLAIM_KEY)) {
- tls_set_direct(AUTORELEASE_POOL_RECLAIM_KEY, 0);
+ assert(getReturnDisposition() == ReturnAtPlus0);
+
+ if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) {
+ if (disposition) setReturnDisposition(disposition);
return true;
}
}
+// Try to accept an optimized return.
+// Returns the disposition of the returned object (+0 or +1).
+// An un-optimized return is +0.
+static ALWAYS_INLINE ReturnDisposition
+acceptOptimizedReturn()
+{
+ ReturnDisposition disposition = getReturnDisposition();
+ setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state
+ return disposition;
+}
+
+
// SUPPORT_RETURN_AUTORELEASE
#else
// not SUPPORT_RETURN_AUTORELEASE
-static ALWAYS_INLINE
-bool fastAutoreleaseForReturn(id obj)
+static ALWAYS_INLINE bool
+prepareOptimizedReturn(ReturnDisposition disposition __unused)
{
return false;
}
-static ALWAYS_INLINE
-bool fastRetainFromReturn(id obj)
+static ALWAYS_INLINE ReturnDisposition
+acceptOptimizedReturn()
{
- return false;
+ return ReturnAtPlus0;
}
return false;
}
+bool header_info::isPreoptimized() const
+{
+ return false;
+}
+
objc_selopt_t *preoptimizedSelectors(void)
{
return nil;
}
+Protocol *getPreoptimizedProtocol(const char *name)
+{
+ return nil;
+}
+
Class getPreoptimizedClass(const char *name)
{
return nil;
#include <objc-shared-cache.h>
+using objc_opt::objc_stringhash_offset_t;
+using objc_opt::objc_protocolopt_t;
using objc_opt::objc_clsopt_t;
using objc_opt::objc_headeropt_t;
using objc_opt::objc_opt_t;
return preoptimized;
}
+
+/***********************************************************************
+* Return YES if this image's dyld shared cache optimizations are valid.
+**********************************************************************/
+bool header_info::isPreoptimized() const
+{
+ // preoptimization disabled for some reason
+ if (!preoptimized) return NO;
+
+ // image not from shared cache, or not fixed inside shared cache
+ if (!_objcHeaderOptimizedByDyld(this)) return NO;
+
+ return YES;
+}
+
+
objc_selopt_t *preoptimizedSelectors(void)
{
return opt ? opt->selopt() : nil;
}
+
+Protocol *getPreoptimizedProtocol(const char *name)
+{
+ objc_protocolopt_t *protocols = opt ? opt->protocolopt() : nil;
+ if (!protocols) return nil;
+
+ return (Protocol *)protocols->getProtocol(name);
+}
+
+
Class getPreoptimizedClass(const char *name)
{
objc_clsopt_t *classes = opt ? opt->clsopt() : nil;
void *cls;
void *hi;
uint32_t count = classes->getClassAndHeader(name, cls, hi);
- if (count == 1 && ((header_info *)hi)->loaded) {
+ if (count == 1 && ((header_info *)hi)->isLoaded()) {
// exactly one matching class, and its image is loaded
return (Class)cls;
}
void *hilist[count];
classes->getClassesAndHeaders(name, clslist, hilist);
for (uint32_t i = 0; i < count; i++) {
- if (((header_info *)hilist[i])->loaded) {
+ if (((header_info *)hilist[i])->isLoaded()) {
return (Class)clslist[i];
}
}
uint32_t count = classes->getClassAndHeader(name, cls, hi);
if (count == 0) return nil;
- Class *result = (Class *)_calloc_internal(count, sizeof(Class));
- if (count == 1 && ((header_info *)hi)->loaded) {
+ Class *result = (Class *)calloc(count, sizeof(Class));
+ if (count == 1 && ((header_info *)hi)->isLoaded()) {
// exactly one matching class, and its image is loaded
result[(*outCount)++] = (Class)cls;
return result;
void *hilist[count];
classes->getClassesAndHeaders(name, clslist, hilist);
for (uint32_t i = 0; i < count; i++) {
- if (((header_info *)hilist[i])->loaded) {
+ if (((header_info *)hilist[i])->isLoaded()) {
result[(*outCount)++] = (Class)clslist[i];
}
}
else start = i+1;
}
-#if !NDEBUG
+#if DEBUG
for (uint32_t i = 0; i < count; i++) {
header_info *hi = headers+i;
if (mhdr == hi->mhdr) {
return (x + WORD_MASK) & ~WORD_MASK;
}
+
+// Mix-in for classes that must not be copied.
+class nocopy_t {
+ private:
+ nocopy_t(const nocopy_t&) = delete;
+ const nocopy_t& operator=(const nocopy_t&) = delete;
+ protected:
+ nocopy_t() { }
+ ~nocopy_t() { }
+};
+
+
#if TARGET_OS_MAC
# ifndef __STDC_LIMIT_MACROS
# include <unistd.h>
# include <pthread.h>
# include <crt_externs.h>
-# include <AssertMacros.h>
# undef check
# include <Availability.h>
# include <TargetConditionals.h>
# include <sys/param.h>
# include <mach/mach.h>
# include <mach/vm_param.h>
+# include <mach/mach_time.h>
# include <mach-o/dyld.h>
# include <mach-o/ldsyms.h>
# include <mach-o/loader.h>
#endif
-#define spinlock_t os_lock_handoff_s
-#define spinlock_trylock(l) os_lock_trylock(l)
-#define spinlock_lock(l) os_lock_lock(l)
-#define spinlock_unlock(l) os_lock_unlock(l)
-#define SPINLOCK_INITIALIZER OS_LOCK_HANDOFF_INIT
+class spinlock_t {
+ os_lock_handoff_s mLock;
+ public:
+ spinlock_t() : mLock(OS_LOCK_HANDOFF_INIT) { }
+
+ void lock() { os_lock_lock(&mLock); }
+ void unlock() { os_lock_unlock(&mLock); }
+ bool trylock() { return os_lock_trylock(&mLock); }
+
+
+ // Address-ordered lock discipline for a pair of locks.
+
+ static void lockTwo(spinlock_t *lock1, spinlock_t *lock2) {
+ if (lock1 > lock2) {
+ lock1->lock();
+ lock2->lock();
+ } else {
+ lock2->lock();
+ if (lock2 != lock1) lock1->lock();
+ }
+ }
+
+ static void unlockTwo(spinlock_t *lock1, spinlock_t *lock2) {
+ lock1->unlock();
+ if (lock2 != lock1) lock2->unlock();
+ }
+};
#if !TARGET_OS_IPHONE
#include <objc/objc.h>
#include <objc/objc-api.h>
-__BEGIN_DECLS
-
extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
#define INIT_ONCE_PTR(var, create, delete) \
# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
# define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
# if SUPPORT_RETURN_AUTORELEASE
-# define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
+# define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
# endif
# if SUPPORT_QOS_HACK
# define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
-// AssertMacros
-
-#define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
-#define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
-#define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
-
-
// OSAtomic
static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
}
return WaitForSingleObject(c->mutex, INFINITE);
}
-static inline int _monitor_exit_nodebug(monitor_t *c) {
+static inline int _monitor_leave_nodebug(monitor_t *c) {
if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
else return 0;
}
// fixme no rwlock yet
-#define rwlock_t mutex_t
-#define rwlock_init(r) mutex_init(r)
-#define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
-#define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
-#define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
-#define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
-#define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
-#define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
-
typedef IMAGE_DOS_HEADER headerType;
// fixme YES bundle? NO bundle? sometimes?
// OS compatibility
+static inline uint64_t nanoseconds() {
+ return mach_absolute_time();
+}
+
// Internal data types
typedef pthread_t objc_thread_t;
#if SUPPORT_DIRECT_THREAD_KEYS
-#if !NDEBUG
+#if DEBUG
static bool is_valid_direct_key(tls_key_t k) {
return ( k == SYNC_DATA_DIRECT_KEY
|| k == SYNC_COUNT_DIRECT_KEY
|| k == AUTORELEASE_POOL_KEY
# if SUPPORT_RETURN_AUTORELEASE
- || k == AUTORELEASE_POOL_RECLAIM_KEY
+ || k == RETURN_DISPOSITION_KEY
# endif
# if SUPPORT_QOS_HACK
|| k == QOS_KEY
#endif
-typedef pthread_mutex_t mutex_t;
-#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
+template <bool Debug> class mutex_tt;
+template <bool Debug> class monitor_tt;
+template <bool Debug> class rwlock_tt;
+template <bool Debug> class recursive_mutex_tt;
-static inline int _mutex_lock_nodebug(mutex_t *m) {
- return pthread_mutex_lock(m);
-}
-static inline bool _mutex_try_lock_nodebug(mutex_t *m) {
- return !pthread_mutex_trylock(m);
-}
-static inline int _mutex_unlock_nodebug(mutex_t *m) {
- return pthread_mutex_unlock(m);
-}
+#include "objc-lockdebug.h"
+template <bool Debug>
+class mutex_tt : nocopy_t {
+ pthread_mutex_t mLock;
-typedef struct {
- pthread_mutex_t *mutex;
-} recursive_mutex_t;
-#define RECURSIVE_MUTEX_INITIALIZER {0};
-#define RECURSIVE_MUTEX_NOT_LOCKED EPERM
-extern void recursive_mutex_init(recursive_mutex_t *m);
+ public:
+ mutex_tt() : mLock(PTHREAD_MUTEX_INITIALIZER) { }
-static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- return pthread_mutex_lock(m->mutex);
-}
-static inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- return !pthread_mutex_trylock(m->mutex);
-}
-static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- return pthread_mutex_unlock(m->mutex);
-}
+ void lock()
+ {
+ lockdebug_mutex_lock(this);
+ int err = pthread_mutex_lock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
-typedef struct {
+ bool tryLock()
+ {
+ int err = pthread_mutex_trylock(&mLock);
+ if (err == 0) {
+ lockdebug_mutex_try_lock_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_trylock failed (%d)", err);
+ }
+ }
+
+ void unlock()
+ {
+ lockdebug_mutex_unlock(this);
+
+ int err = pthread_mutex_unlock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+
+ void assertLocked() {
+ lockdebug_mutex_assert_locked(this);
+ }
+
+ void assertUnlocked() {
+ lockdebug_mutex_assert_unlocked(this);
+ }
+};
+
+using mutex_t = mutex_tt<DEBUG>;
+
+
+template <bool Debug>
+class recursive_mutex_tt : nocopy_t {
+ pthread_mutex_t mLock;
+
+ public:
+ recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { }
+
+ void lock()
+ {
+ lockdebug_recursive_mutex_lock(this);
+
+ int err = pthread_mutex_lock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
+
+ bool tryLock()
+ {
+ int err = pthread_mutex_trylock(&mLock);
+ if (err == 0) {
+ lockdebug_recursive_mutex_lock(this);
+ return true;
+ } else if (err == EBUSY) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_trylock failed (%d)", err);
+ }
+ }
+
+
+ void unlock()
+ {
+ lockdebug_recursive_mutex_unlock(this);
+
+ int err = pthread_mutex_unlock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+ bool tryUnlock()
+ {
+ int err = pthread_mutex_unlock(&mLock);
+ if (err == 0) {
+ lockdebug_recursive_mutex_unlock(this);
+ return true;
+ } else if (err == EPERM) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+ }
+
+
+ void assertLocked() {
+ lockdebug_recursive_mutex_assert_locked(this);
+ }
+
+ void assertUnlocked() {
+ lockdebug_recursive_mutex_assert_unlocked(this);
+ }
+};
+
+using recursive_mutex_t = recursive_mutex_tt<DEBUG>;
+
+
+template <bool Debug>
+class monitor_tt {
pthread_mutex_t mutex;
pthread_cond_t cond;
-} monitor_t;
-#define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
-#define MONITOR_NOT_ENTERED EPERM
-static inline int monitor_init(monitor_t *c) {
- int err = pthread_mutex_init(&c->mutex, NULL);
- if (err) return err;
- err = pthread_cond_init(&c->cond, NULL);
- if (err) {
- pthread_mutex_destroy(&c->mutex);
- return err;
+ public:
+ monitor_tt()
+ : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { }
+
+ void enter()
+ {
+ lockdebug_monitor_enter(this);
+
+ int err = pthread_mutex_lock(&mutex);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
}
- return 0;
-}
-static inline int _monitor_enter_nodebug(monitor_t *c) {
- return pthread_mutex_lock(&c->mutex);
-}
-static inline int _monitor_exit_nodebug(monitor_t *c) {
- return pthread_mutex_unlock(&c->mutex);
-}
-static inline int _monitor_wait_nodebug(monitor_t *c) {
- return pthread_cond_wait(&c->cond, &c->mutex);
-}
-static inline int monitor_notify(monitor_t *c) {
- return pthread_cond_signal(&c->cond);
-}
-static inline int monitor_notifyAll(monitor_t *c) {
- return pthread_cond_broadcast(&c->cond);
-}
+
+ void leave()
+ {
+ lockdebug_monitor_leave(this);
+
+ int err = pthread_mutex_unlock(&mutex);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+ void wait()
+ {
+ lockdebug_monitor_wait(this);
+
+ int err = pthread_cond_wait(&cond, &mutex);
+ if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
+ }
+
+ void notify()
+ {
+ int err = pthread_cond_signal(&cond);
+ if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
+ }
+
+ void notifyAll()
+ {
+ int err = pthread_cond_broadcast(&cond);
+ if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
+ }
+
+ void assertLocked()
+ {
+ lockdebug_monitor_assert_locked(this);
+ }
+
+ void assertUnlocked()
+ {
+ lockdebug_monitor_assert_unlocked(this);
+ }
+};
+
+using monitor_t = monitor_tt<DEBUG>;
// semaphore_create formatted for INIT_ONCE use
else {
pthread_priority_t currentPriority = pthread_self_priority_direct();
// Check if override is needed. Only override if we are background qos
- if (currentPriority <= BackgroundPriority) {
+ if (currentPriority != 0 && currentPriority <= BackgroundPriority) {
int res __unused = _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority);
assert(res == 0);
// Once we override, we set the reference count in the tsd
// not SUPPORT_QOS_HACK
#endif
-/* Custom read-write lock
- - reader is atomic add/subtract
- - writer is pthread mutex plus atomic add/subtract
- - fairness: new readers wait if a writer wants in
- - fairness: when writer completes, readers (probably) precede new writer
- state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
- x: blocked reader count
- y: active reader count
- z: readers allowed flag
-*/
-typedef struct {
- pthread_rwlock_t rwl;
-} rwlock_t;
-
-static inline void rwlock_init(rwlock_t *l)
-{
- int err __unused = pthread_rwlock_init(&l->rwl, NULL);
- assert(err == 0);
-}
+template <bool Debug>
+class rwlock_tt : nocopy_t {
+ pthread_rwlock_t mLock;
-static inline void _rwlock_read_nodebug(rwlock_t *l)
-{
- qosStartOverride();
- int err __unused = pthread_rwlock_rdlock(&l->rwl);
- assert(err == 0);
-}
+ public:
+ rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { }
+
+ void read()
+ {
+ lockdebug_rwlock_read(this);
-static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
-{
- int err __unused = pthread_rwlock_unlock(&l->rwl);
- assert(err == 0);
- qosEndOverride();
-}
+ qosStartOverride();
+ int err = pthread_rwlock_rdlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_rdlock failed (%d)", err);
+ }
+ void unlockRead()
+ {
+ lockdebug_rwlock_unlock_read(this);
-static inline bool _rwlock_try_read_nodebug(rwlock_t *l)
-{
- qosStartOverride();
- int err = pthread_rwlock_tryrdlock(&l->rwl);
- assert(err == 0 || err == EBUSY || err == EAGAIN);
- if (err == 0) {
- return true;
- } else {
+ int err = pthread_rwlock_unlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
qosEndOverride();
- return false;
}
-}
+ bool tryRead()
+ {
+ qosStartOverride();
+ int err = pthread_rwlock_tryrdlock(&mLock);
+ if (err == 0) {
+ lockdebug_rwlock_try_read_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ qosEndOverride();
+ return false;
+ } else {
+ _objc_fatal("pthread_rwlock_tryrdlock failed (%d)", err);
+ }
+ }
-static inline void _rwlock_write_nodebug(rwlock_t *l)
-{
- qosStartOverride();
- int err __unused = pthread_rwlock_wrlock(&l->rwl);
- assert(err == 0);
-}
+ void write()
+ {
+ lockdebug_rwlock_write(this);
-static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
-{
- int err __unused = pthread_rwlock_unlock(&l->rwl);
- assert(err == 0);
- qosEndOverride();
-}
+ qosStartOverride();
+ int err = pthread_rwlock_wrlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_wrlock failed (%d)", err);
+ }
-static inline bool _rwlock_try_write_nodebug(rwlock_t *l)
-{
- qosStartOverride();
- int err = pthread_rwlock_trywrlock(&l->rwl);
- assert(err == 0 || err == EBUSY);
- if (err == 0) {
- return true;
- } else {
+ void unlockWrite()
+ {
+ lockdebug_rwlock_unlock_write(this);
+
+ int err = pthread_rwlock_unlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
qosEndOverride();
- return false;
}
-}
+
+ bool tryWrite()
+ {
+ qosStartOverride();
+ int err = pthread_rwlock_trywrlock(&mLock);
+ if (err == 0) {
+ lockdebug_rwlock_try_write_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ qosEndOverride();
+ return false;
+ } else {
+ _objc_fatal("pthread_rwlock_trywrlock failed (%d)", err);
+ }
+ }
+
+
+ void assertReading() {
+ lockdebug_rwlock_assert_reading(this);
+ }
+
+ void assertWriting() {
+ lockdebug_rwlock_assert_writing(this);
+ }
+
+ void assertLocked() {
+ lockdebug_rwlock_assert_locked(this);
+ }
+
+ void assertUnlocked() {
+ lockdebug_rwlock_assert_unlocked(this);
+ }
+};
+
+using rwlock_t = rwlock_tt<DEBUG>;
#ifndef __LP64__
#endif
-__END_DECLS
+
+static inline void *
+memdup(const void *mem, size_t len)
+{
+ void *dup = malloc(len);
+ memcpy(dup, mem, len);
+ return dup;
+}
+
+// unsigned strdup
+static inline uint8_t *
+ustrdup(const uint8_t *str)
+{
+ return (uint8_t *)strdup((char *)str);
+}
+
+// nil-checking strdup
+static inline uint8_t *
+strdupMaybeNil(const uint8_t *str)
+{
+ if (!str) return nil;
+ return (uint8_t *)strdup((char *)str);
+}
+
+// nil-checking unsigned strdup
+static inline uint8_t *
+ustrdupMaybeNil(const uint8_t *str)
+{
+ if (!str) return nil;
+ return (uint8_t *)strdup((char *)str);
+}
#endif
#include "objc-runtime-old.h"
#include "objcrt.h"
-malloc_zone_t *_objc_internal_zone(void)
-{
- return NULL;
-}
-
int monitor_init(monitor_t *c)
{
// fixme error checking
OBJC_EXPORT void *_objc_init_image(HMODULE image, const objc_sections *sects)
{
- header_info *hi = _malloc_internal(sizeof(header_info));
+ header_info *hi = malloc(sizeof(header_info));
size_t count, i;
hi->mhdr = (const headerType *)image;
#include "objc-file-old.h"
#include "objc-file.h"
-void mutex_init(mutex_t *m)
-{
- pthread_mutex_init(m, NULL);
-}
-
-
-void recursive_mutex_init(recursive_mutex_t *m)
-{
- // fixme error checking
- pthread_mutex_t *newmutex;
-
- // Build recursive mutex attributes, if needed
- static pthread_mutexattr_t *attr;
- if (!attr) {
- pthread_mutexattr_t *newattr = (pthread_mutexattr_t *)
- _malloc_internal(sizeof(pthread_mutexattr_t));
- pthread_mutexattr_init(newattr);
- pthread_mutexattr_settype(newattr, PTHREAD_MUTEX_RECURSIVE);
- while (!attr) {
- if (OSAtomicCompareAndSwapPtrBarrier(0, newattr, (void**)&attr)) {
- // we win
- goto attr_done;
- }
- }
- // someone else built the attr first
- _free_internal(newattr);
- }
- attr_done:
-
- // Build the mutex itself
- newmutex = (pthread_mutex_t *)_malloc_internal(sizeof(pthread_mutex_t));
- pthread_mutex_init(newmutex, attr);
- while (!m->mutex) {
- if (OSAtomicCompareAndSwapPtrBarrier(0, newmutex, (void**)&m->mutex)) {
- // we win
- return;
- }
- }
-
- // someone else installed their mutex first
- pthread_mutex_destroy(newmutex);
-}
-
/***********************************************************************
* bad_magic.
* Return YES if the header has invalid Mach-o magic.
**********************************************************************/
-BOOL bad_magic(const headerType *mhdr)
+bool bad_magic(const headerType *mhdr)
{
return (mhdr->magic != MH_MAGIC && mhdr->magic != MH_MAGIC_64 &&
mhdr->magic != MH_CIGAM && mhdr->magic != MH_CIGAM_64);
_objc_inform("PREOPTIMIZATION: honoring preoptimized header info at %p for %s", hi, hi->fname);
}
-# if !NDEBUG
+# if DEBUG
// Verify image_info
size_t info_size = 0;
const objc_image_info *image_info = _getObjcImageInfo(mhdr,&info_size);
if (!objc_segment && !image_info) return NULL;
// Allocate a header_info entry.
- hi = (header_info *)_calloc_internal(sizeof(header_info), 1);
+ hi = (header_info *)calloc(sizeof(header_info), 1);
// Set up the new header_info entry.
hi->mhdr = mhdr;
* all already-loaded libraries support the executable's GC mode.
* Returns TRUE if the executable wants GC on.
**********************************************************************/
-static void check_wants_gc(BOOL *appWantsGC)
+static void check_wants_gc(bool *appWantsGC)
{
const header_info *hi;
*appWantsGC = NO;
for (hi = FirstHeader; hi != NULL; hi = hi->next) {
if (hi->mhdr->filetype == MH_EXECUTE) {
- *appWantsGC = _objcHeaderSupportsGC(hi) ? YES : NO;
+ *appWantsGC = _objcHeaderSupportsGC(hi);
if (PrintGC) {
_objc_inform("GC: executable '%s' %s",
* if we want gc, verify that every header describes files compiled
* and presumably ready for gc.
************************************************************************/
-static void verify_gc_readiness(BOOL wantsGC,
+static void verify_gc_readiness(bool wantsGC,
header_info **hList, uint32_t hCount)
{
- BOOL busted = NO;
+ bool busted = NO;
uint32_t i;
// Find the libraries and check their GC bits against the app's request
* Images linked to the executable are always permitted; they are
* enforced inside map_images() itself.
**********************************************************************/
-static BOOL InitialDyldRegistration = NO;
+static bool InitialDyldRegistration = NO;
static const char *gc_enforcer(enum dyld_image_states state,
uint32_t infoCount,
const struct dyld_image_info info[])
#endif
-
-/***********************************************************************
-* getSDKVersion
-* Look up the build-time SDK version for an image.
-* Version X.Y.Z is encoded as 0xXXXXYYZZ.
-* Images without the load command are assumed to be old (version 0.0.0).
-**********************************************************************/
-#if TARGET_OS_IPHONE
- // Simulator binaries encode an iOS version
-# define LC_VERSION_MIN LC_VERSION_MIN_IPHONEOS
-#elif TARGET_OS_MAC
-# define LC_VERSION_MIN LC_VERSION_MIN_MACOSX
-#else
-# error unknown OS
-#endif
-
-static uint32_t
-getSDKVersion(const header_info *hi)
-{
- const struct version_min_command *cmd;
- unsigned long i;
-
- cmd = (const struct version_min_command *) (hi->mhdr + 1);
- for (i = 0; i < hi->mhdr->ncmds; i++){
- if (cmd->cmd == LC_VERSION_MIN && cmd->cmdsize >= 16) {
- return cmd->sdk;
- }
- cmd = (const struct version_min_command *)((char *)cmd + cmd->cmdsize);
- }
-
- // Lack of version load command is assumed to be old.
- return 0;
-}
-
-
/***********************************************************************
* map_images_nolock
* Process the given images which are being mapped in by dyld.
map_images_nolock(enum dyld_image_states state, uint32_t infoCount,
const struct dyld_image_info infoList[])
{
- static BOOL firstTime = YES;
- static BOOL wantsGC = NO;
+ static bool firstTime = YES;
+ static bool wantsGC = NO;
uint32_t i;
header_info *hi;
header_info *hList[infoCount];
// no objc data in this entry
continue;
}
- if (mhdr->filetype == MH_EXECUTE) {
- // Record main executable's build SDK version
- AppSDKVersion = getSDKVersion(hi);
+ if (mhdr->filetype == MH_EXECUTE) {
// Size some data structures based on main executable's size
#if __OBJC2__
size_t count;
seg = getsegmentdata(hi->mhdr, "__DATA", &seg_size);
if (seg) gc_register_datasegment((uintptr_t)seg, seg_size);
+ seg = getsegmentdata(hi->mhdr, "__DATA_CONST", &seg_size);
+ if (seg) gc_register_datasegment((uintptr_t)seg, seg_size);
+
+ seg = getsegmentdata(hi->mhdr, "__DATA_DIRTY", &seg_size);
+ if (seg) gc_register_datasegment((uintptr_t)seg, seg_size);
+
seg = getsegmentdata(hi->mhdr, "__OBJC", &seg_size);
if (seg) gc_register_datasegment((uintptr_t)seg, seg_size);
// __OBJC contains no GC data, but pointers to it are
*
* Locking: loadMethodLock(both) and runtimeLock(new) acquired by load_images
**********************************************************************/
-BOOL
+bool
load_images_nolock(enum dyld_image_states state,uint32_t infoCount,
const struct dyld_image_info infoList[])
{
- BOOL found = NO;
+ bool found = NO;
uint32_t i;
i = infoCount;
while (i--) {
- header_info *hi;
- for (hi = FirstHeader; hi != NULL; hi = hi->next) {
- const headerType *mhdr = (headerType*)infoList[i].imageLoadAddress;
- if (hi->mhdr == mhdr) {
- prepare_load_methods(hi);
- found = YES;
- }
- }
+ const headerType *mhdr = (headerType*)infoList[i].imageLoadAddress;
+ if (!hasLoadMethods(mhdr)) continue;
+
+ prepare_load_methods(mhdr);
+ found = YES;
}
return found;
seg = getsegmentdata(hi->mhdr, "__DATA", &seg_size);
if (seg) gc_unregister_datasegment((uintptr_t)seg, seg_size);
+ seg = getsegmentdata(hi->mhdr, "__DATA_CONST", &seg_size);
+ if (seg) gc_unregister_datasegment((uintptr_t)seg, seg_size);
+
+ seg = getsegmentdata(hi->mhdr, "__DATA_DIRTY", &seg_size);
+ if (seg) gc_unregister_datasegment((uintptr_t)seg, seg_size);
+
seg = getsegmentdata(hi->mhdr, "__OBJC", &seg_size);
if (seg) gc_unregister_datasegment((uintptr_t)seg, seg_size);
}
// Remove header_info from header list
removeHeader(hi);
- _free_internal(hi);
+ free(hi);
+}
+
+
+/***********************************************************************
+* static_init
+* Run C++ static constructor functions.
+* libc calls _objc_init() before dyld would call our static constructors,
+* so we have to do it ourselves.
+**********************************************************************/
+static void static_init()
+{
+#if __OBJC2__
+ size_t count;
+ Initializer *inits = getLibobjcInitializers(&_mh_dylib_header, &count);
+ for (size_t i = 0; i < count; i++) {
+ inits[i]();
+ }
+#endif
}
* Old ABI: called by dyld as a library initializer
* New ABI: called by libSystem BEFORE library initialization time
**********************************************************************/
+
#if !__OBJC2__
static __attribute__((constructor))
#endif
// fixme defer initialization until an objc-using image is found?
environ_init();
tls_init();
+ static_init();
lock_init();
exception_init();
// Register for unmap first, in case some +load unmaps something
_dyld_register_func_for_remove_image(&unmap_image);
dyld_register_image_state_change_handler(dyld_image_state_bound,
- 1/*batch*/, &map_images);
+ 1/*batch*/, &map_2_images);
dyld_register_image_state_change_handler(dyld_image_state_dependents_initialized, 0/*not batch*/, &load_images);
}
static const header_info *_headerForAddress(void *addr)
{
#if __OBJC2__
- const char *segname = "__DATA";
+ const char *segnames[] = { "__DATA", "__DATA_CONST", "__DATA_DIRTY" };
#else
- const char *segname = "__OBJC";
+ const char *segnames[] = { "__OBJC" };
#endif
header_info *hi;
- // Check all headers in the vector
- for (hi = FirstHeader; hi != NULL; hi = hi->next)
- {
- uint8_t *seg;
- unsigned long seg_size;
-
- seg = getsegmentdata(hi->mhdr, segname, &seg_size);
- if (!seg) continue;
-
- // Is the class in this header?
- if ((uint8_t *)addr >= seg && (uint8_t *)addr < seg + seg_size)
- return hi;
+ for (hi = FirstHeader; hi != NULL; hi = hi->next) {
+ for (size_t i = 0; i < sizeof(segnames)/sizeof(segnames[0]); i++) {
+ unsigned long seg_size;
+ uint8_t *seg = getsegmentdata(hi->mhdr, segnames[i], &seg_size);
+ if (!seg) continue;
+
+ // Is the class in this header?
+ if ((uint8_t *)addr >= seg && (uint8_t *)addr < seg + seg_size) {
+ return hi;
+ }
+ }
}
// Not found
{
struct stat fs, ls;
int fd = -1;
- BOOL truncate = NO;
- BOOL create = NO;
+ bool truncate = NO;
+ bool create = NO;
if (flags & O_TRUNC) {
// Don't truncate the file until after it is open and verified.
}
-/***********************************************************************
-* _objc_internal_zone.
-* Malloc zone for internal runtime data.
-* By default this is the default malloc zone, but a dedicated zone is
-* used if environment variable OBJC_USE_INTERNAL_ZONE is set.
-**********************************************************************/
-malloc_zone_t *_objc_internal_zone(void)
-{
- static malloc_zone_t *z = (malloc_zone_t *)-1;
- if (z == (malloc_zone_t *)-1) {
- if (UseInternalZone) {
- z = malloc_create_zone(vm_page_size, 0);
- malloc_set_zone_name(z, "ObjC_Internal");
- } else {
- z = malloc_default_zone();
- }
- }
- return z;
-}
-
-
bool crashlog_header_name(header_info *hi)
{
return crashlog_header_name_string(hi ? hi->fname : NULL);
typedef struct objc_object *id;
namespace {
- class SideTable;
+ struct SideTable;
};
// uintptr_t extraBytes : 1; // allocated with extra bytes
# if __arm64__
-# define ISA_MASK 0x00000001fffffff8ULL
-# define ISA_MAGIC_MASK 0x000003fe00000001ULL
-# define ISA_MAGIC_VALUE 0x000001a400000001ULL
+# define ISA_MASK 0x0000000ffffffff8ULL
+# define ISA_MAGIC_MASK 0x000003f000000001ULL
+# define ISA_MAGIC_VALUE 0x000001a000000001ULL
struct {
uintptr_t indexed : 1;
uintptr_t has_assoc : 1;
uintptr_t has_cxx_dtor : 1;
- uintptr_t shiftcls : 30; // MACH_VM_MAX_ADDRESS 0x1a0000000
- uintptr_t magic : 9;
+ uintptr_t shiftcls : 33; // MACH_VM_MAX_ADDRESS 0x1000000000
+ uintptr_t magic : 6;
uintptr_t weakly_referenced : 1;
uintptr_t deallocating : 1;
uintptr_t has_sidetable_rc : 1;
# elif __x86_64__
# define ISA_MASK 0x00007ffffffffff8ULL
-# define ISA_MAGIC_MASK 0x0000000000000001ULL
-# define ISA_MAGIC_VALUE 0x0000000000000001ULL
+# define ISA_MAGIC_MASK 0x001f800000000001ULL
+# define ISA_MAGIC_VALUE 0x001d800000000001ULL
struct {
uintptr_t indexed : 1;
uintptr_t has_assoc : 1;
uintptr_t has_cxx_dtor : 1;
uintptr_t shiftcls : 44; // MACH_VM_MAX_ADDRESS 0x7fffffe00000
+ uintptr_t magic : 6;
uintptr_t weakly_referenced : 1;
uintptr_t deallocating : 1;
uintptr_t has_sidetable_rc : 1;
- uintptr_t extra_rc : 14;
-# define RC_ONE (1ULL<<50)
-# define RC_HALF (1ULL<<13)
+ uintptr_t extra_rc : 8;
+# define RC_ONE (1ULL<<56)
+# define RC_HALF (1ULL<<7)
};
# else
id rootRetain_overflow(bool tryRetain);
bool rootRelease_underflow(bool performDealloc);
- void clearDeallocating_weak();
+ void clearDeallocating_slow();
// Side table retain count overflow for nonpointer isa
void sidetable_lock();
void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced);
bool sidetable_addExtraRC_nolock(size_t delta_rc);
- bool sidetable_subExtraRC_nolock(size_t delta_rc);
+ size_t sidetable_subExtraRC_nolock(size_t delta_rc);
size_t sidetable_getExtraRC_nolock();
#endif
void sidetable_setWeaklyReferenced_nolock();
id sidetable_retain();
- id sidetable_retain_slow(SideTable *table);
+ id sidetable_retain_slow(SideTable& table);
- bool sidetable_release(bool performDealloc = true);
- bool sidetable_release_slow(SideTable *table, bool performDealloc = true);
+ uintptr_t sidetable_release(bool performDealloc = true);
+ uintptr_t sidetable_release_slow(SideTable& table, bool performDealloc = true);
bool sidetable_tryRetain();
uintptr_t sidetable_retainCount();
-#if !NDEBUG
+#if DEBUG
bool sidetable_present();
#endif
};
#include "objc-os.h"
#include "objc-abi.h"
#include "objc-api.h"
-#include "objc-auto.h"
#include "objc-config.h"
#include "objc-internal.h"
#include "maptable.h"
#include "hashtable2.h"
+#if SUPPORT_GC
+#include "objc-auto.h"
+#endif
+
/* Do not include message.h here. */
/* #include "message.h" */
#include "objc-loadmethod.h"
+#if SUPPORT_PREOPT && __cplusplus
+#include <objc-shared-cache.h>
+using objc_selopt_t = const objc_opt::objc_selopt_t;
+#else
+struct objc_selopt_t;
+#endif
+
+
__BEGIN_DECLS
*/
-typedef struct _header_info {
- struct _header_info *next;
+typedef struct header_info {
+ struct header_info *next;
const headerType *mhdr;
const objc_image_info *info;
const char *fname; // same as Dl_info.dli_fname
// Do not add fields without editing ObjCModernAbstraction.hpp
+ bool isLoaded() {
+ return loaded;
+ }
+
+ bool isBundle() {
+ return mhdr->filetype == MH_BUNDLE;
+ }
+
+ bool isPreoptimized() const;
+
#if !__OBJC2__
struct old_protocol **proto_refs;
struct objc_module *mod_ptr;
extern header_info *LastHeader;
extern int HeaderCount;
-extern uint32_t AppSDKVersion; // X.Y.Z is 0xXXXXYYZZ
-
extern void appendHeader(header_info *hi);
extern void removeHeader(header_info *hi);
extern objc_image_info *_getObjcImageInfo(const headerType *head, size_t *size);
-extern BOOL _hasObjcContents(const header_info *hi);
+extern bool _hasObjcContents(const header_info *hi);
/* selectors */
-extern void sel_init(BOOL gc, size_t selrefCount);
-extern SEL sel_registerNameNoLock(const char *str, BOOL copy);
+extern void sel_init(bool gc, size_t selrefCount);
+extern SEL sel_registerNameNoLock(const char *str, bool copy);
extern void sel_lock(void);
extern void sel_unlock(void);
-extern BOOL sel_preoptimizationValid(const header_info *hi);
extern SEL SEL_load;
extern SEL SEL_initialize;
extern bool isPreoptimized(void);
extern header_info *preoptimizedHinfoForHeader(const headerType *mhdr);
-#if SUPPORT_PREOPT && __cplusplus
-#include <objc-shared-cache.h>
-using objc_selopt_t = const objc_opt::objc_selopt_t;
-#else
-struct objc_selopt_t;
-#endif
-
extern objc_selopt_t *preoptimizedSelectors(void);
-extern Class getPreoptimizedClass(const char *name);
-extern Class* copyPreoptimizedClasses(const char *name, int *outCount);
+extern Protocol *getPreoptimizedProtocol(const char *name);
-/* optional malloc zone for runtime data */
-extern malloc_zone_t *_objc_internal_zone(void);
-extern void *_malloc_internal(size_t size);
-extern void *_calloc_internal(size_t count, size_t size);
-extern void *_realloc_internal(void *ptr, size_t size);
-extern char *_strdup_internal(const char *str);
-extern char *_strdupcat_internal(const char *s1, const char *s2);
-extern uint8_t *_ustrdup_internal(const uint8_t *str);
-extern void *_memdup_internal(const void *mem, size_t size);
-extern void _free_internal(void *ptr);
-extern size_t _malloc_size_internal(void *ptr);
+extern Class getPreoptimizedClass(const char *name);
+extern Class* copyPreoptimizedClasses(const char *name, int *outCount);
extern Class _calloc_class(size_t size);
extern IMP lookUpImpOrForward(Class, SEL, id obj, bool initialize, bool cache, bool resolver);
extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel);
-extern BOOL class_respondsToSelector_inst(Class cls, SEL sel, id inst);
+extern bool class_respondsToSelector_inst(Class cls, SEL sel, id inst);
extern bool objcMsgLogEnabled;
extern bool logMessageSend(bool isClassMethod,
extern mutex_t methodListLock;
#endif
-/* Lock debugging */
-#if defined(NDEBUG) || TARGET_OS_WIN32
-
-#define mutex_lock(m) _mutex_lock_nodebug(m)
-#define mutex_try_lock(m) _mutex_try_lock_nodebug(m)
-#define mutex_unlock(m) _mutex_unlock_nodebug(m)
-#define mutex_assert_locked(m) do { } while (0)
-#define mutex_assert_unlocked(m) do { } while (0)
-
-#define recursive_mutex_lock(m) _recursive_mutex_lock_nodebug(m)
-#define recursive_mutex_try_lock(m) _recursive_mutex_try_lock_nodebug(m)
-#define recursive_mutex_unlock(m) _recursive_mutex_unlock_nodebug(m)
-#define recursive_mutex_assert_locked(m) do { } while (0)
-#define recursive_mutex_assert_unlocked(m) do { } while (0)
-
-#define monitor_enter(m) _monitor_enter_nodebug(m)
-#define monitor_exit(m) _monitor_exit_nodebug(m)
-#define monitor_wait(m) _monitor_wait_nodebug(m)
-#define monitor_assert_locked(m) do { } while (0)
-#define monitor_assert_unlocked(m) do { } while (0)
-
-#define rwlock_read(m) _rwlock_read_nodebug(m)
-#define rwlock_write(m) _rwlock_write_nodebug(m)
-#define rwlock_try_read(m) _rwlock_try_read_nodebug(m)
-#define rwlock_try_write(m) _rwlock_try_write_nodebug(m)
-#define rwlock_unlock_read(m) _rwlock_unlock_read_nodebug(m)
-#define rwlock_unlock_write(m) _rwlock_unlock_write_nodebug(m)
-#define rwlock_assert_reading(m) do { } while (0)
-#define rwlock_assert_writing(m) do { } while (0)
-#define rwlock_assert_locked(m) do { } while (0)
-#define rwlock_assert_unlocked(m) do { } while (0)
-
-#else
+class monitor_locker_t : nocopy_t {
+ monitor_t& lock;
+ public:
+ monitor_locker_t(monitor_t& newLock) : lock(newLock) { lock.enter(); }
+ ~monitor_locker_t() { lock.leave(); }
+};
-extern int _mutex_lock_debug(mutex_t *lock, const char *name);
-extern int _mutex_try_lock_debug(mutex_t *lock, const char *name);
-extern int _mutex_unlock_debug(mutex_t *lock, const char *name);
-extern void _mutex_assert_locked_debug(mutex_t *lock, const char *name);
-extern void _mutex_assert_unlocked_debug(mutex_t *lock, const char *name);
-
-extern int _recursive_mutex_lock_debug(recursive_mutex_t *lock, const char *name);
-extern int _recursive_mutex_try_lock_debug(recursive_mutex_t *lock, const char *name);
-extern int _recursive_mutex_unlock_debug(recursive_mutex_t *lock, const char *name);
-extern void _recursive_mutex_assert_locked_debug(recursive_mutex_t *lock, const char *name);
-extern void _recursive_mutex_assert_unlocked_debug(recursive_mutex_t *lock, const char *name);
-
-extern int _monitor_enter_debug(monitor_t *lock, const char *name);
-extern int _monitor_exit_debug(monitor_t *lock, const char *name);
-extern int _monitor_wait_debug(monitor_t *lock, const char *name);
-extern void _monitor_assert_locked_debug(monitor_t *lock, const char *name);
-extern void _monitor_assert_unlocked_debug(monitor_t *lock, const char *name);
-
-extern void _rwlock_read_debug(rwlock_t *l, const char *name);
-extern void _rwlock_write_debug(rwlock_t *l, const char *name);
-extern int _rwlock_try_read_debug(rwlock_t *l, const char *name);
-extern int _rwlock_try_write_debug(rwlock_t *l, const char *name);
-extern void _rwlock_unlock_read_debug(rwlock_t *l, const char *name);
-extern void _rwlock_unlock_write_debug(rwlock_t *l, const char *name);
-extern void _rwlock_assert_reading_debug(rwlock_t *l, const char *name);
-extern void _rwlock_assert_writing_debug(rwlock_t *l, const char *name);
-extern void _rwlock_assert_locked_debug(rwlock_t *l, const char *name);
-extern void _rwlock_assert_unlocked_debug(rwlock_t *l, const char *name);
-
-#define mutex_lock(m) _mutex_lock_debug (m, #m)
-#define mutex_try_lock(m) _mutex_try_lock_debug (m, #m)
-#define mutex_unlock(m) _mutex_unlock_debug (m, #m)
-#define mutex_assert_locked(m) _mutex_assert_locked_debug (m, #m)
-#define mutex_assert_unlocked(m) _mutex_assert_unlocked_debug (m, #m)
-
-#define recursive_mutex_lock(m) _recursive_mutex_lock_debug (m, #m)
-#define recursive_mutex_try_lock(m) _recursive_mutex_try_lock_debug (m, #m)
-#define recursive_mutex_unlock(m) _recursive_mutex_unlock_debug (m, #m)
-#define recursive_mutex_assert_locked(m) _recursive_mutex_assert_locked_debug (m, #m)
-#define recursive_mutex_assert_unlocked(m) _recursive_mutex_assert_unlocked_debug (m, #m)
-
-#define monitor_enter(m) _monitor_enter_debug(m, #m)
-#define monitor_exit(m) _monitor_exit_debug(m, #m)
-#define monitor_wait(m) _monitor_wait_debug(m, #m)
-#define monitor_assert_locked(m) _monitor_assert_locked_debug(m, #m)
-#define monitor_assert_unlocked(m) _monitor_assert_unlocked_debug(m, #m)
-
-#define rwlock_read(m) _rwlock_read_debug(m, #m)
-#define rwlock_write(m) _rwlock_write_debug(m, #m)
-#define rwlock_try_read(m) _rwlock_try_read_debug(m, #m)
-#define rwlock_try_write(m) _rwlock_try_write_debug(m, #m)
-#define rwlock_unlock_read(m) _rwlock_unlock_read_debug(m, #m)
-#define rwlock_unlock_write(m) _rwlock_unlock_write_debug(m, #m)
-#define rwlock_assert_reading(m) _rwlock_assert_reading_debug(m, #m)
-#define rwlock_assert_writing(m) _rwlock_assert_writing_debug(m, #m)
-#define rwlock_assert_locked(m) _rwlock_assert_locked_debug(m, #m)
-#define rwlock_assert_unlocked(m) _rwlock_assert_unlocked_debug(m, #m)
+class mutex_locker_t : nocopy_t {
+ mutex_t& lock;
+ public:
+ mutex_locker_t(mutex_t& newLock)
+ : lock(newLock) { lock.lock(); }
+ ~mutex_locker_t() { lock.unlock(); }
+};
-#endif
+class recursive_mutex_locker_t : nocopy_t {
+ recursive_mutex_t& lock;
+ public:
+ recursive_mutex_locker_t(recursive_mutex_t& newLock)
+ : lock(newLock) { lock.lock(); }
+ ~recursive_mutex_locker_t() { lock.unlock(); }
+};
-#define rwlock_unlock(m, s) \
- do { \
- if ((s) == RDONLY) rwlock_unlock_read(m); \
- else if ((s) == RDWR) rwlock_unlock_write(m); \
- } while (0)
+class rwlock_reader_t : nocopy_t {
+ rwlock_t& lock;
+ public:
+ rwlock_reader_t(rwlock_t& newLock) : lock(newLock) { lock.read(); }
+ ~rwlock_reader_t() { lock.unlockRead(); }
+};
+class rwlock_writer_t : nocopy_t {
+ rwlock_t& lock;
+ public:
+ rwlock_writer_t(rwlock_t& newLock) : lock(newLock) { lock.write(); }
+ ~rwlock_writer_t() { lock.unlockWrite(); }
+};
/* ignored selector support */
}
/* GC startup */
-extern void gc_init(BOOL wantsGC);
+extern void gc_init(bool wantsGC);
extern void gc_init2(void);
/* Exceptions */
extern void gc_unregister_datasegment(uintptr_t base, size_t size);
/* objc_dumpHeap implementation */
-extern BOOL _objc_dumpHeap(auto_zone_t *zone, const char *filename);
+extern bool _objc_dumpHeap(auto_zone_t *zone, const char *filename);
#endif
extern void environ_init(void);
-extern void logReplacedMethod(const char *className, SEL s, BOOL isMeta, const char *catName, IMP oldImp, IMP newImp);
-
-static __inline uint32_t _objc_strhash(const char *s) {
- uint32_t hash = 0;
- for (;;) {
- int a = *s++;
- if (0 == a) break;
- hash += (hash << 8) + a;
- }
- return hash;
-}
+extern void logReplacedMethod(const char *className, SEL s, bool isMeta, const char *catName, IMP oldImp, IMP newImp);
// objc per-thread storage
} _objc_pthread_data;
-extern _objc_pthread_data *_objc_fetch_pthread_data(BOOL create);
+extern _objc_pthread_data *_objc_fetch_pthread_data(bool create);
extern void tls_init(void);
// encoding.h
uint8_t *bits;
size_t bitCount;
size_t bitsAllocated;
- BOOL weak;
+ bool weak;
} layout_bitmap;
-extern layout_bitmap layout_bitmap_create(const unsigned char *layout_string, size_t layoutStringInstanceSize, size_t instanceSize, BOOL weak);
-extern layout_bitmap layout_bitmap_create_empty(size_t instanceSize, BOOL weak);
+extern layout_bitmap layout_bitmap_create(const unsigned char *layout_string, size_t layoutStringInstanceSize, size_t instanceSize, bool weak);
+extern layout_bitmap layout_bitmap_create_empty(size_t instanceSize, bool weak);
extern void layout_bitmap_free(layout_bitmap bits);
extern const unsigned char *layout_string_create(layout_bitmap bits);
extern void layout_bitmap_set_ivar(layout_bitmap bits, const char *type, size_t offset);
extern void layout_bitmap_grow(layout_bitmap *bits, size_t newCount);
extern void layout_bitmap_slide(layout_bitmap *bits, size_t oldPos, size_t newPos);
extern void layout_bitmap_slide_anywhere(layout_bitmap *bits, size_t oldPos, size_t newPos);
-extern BOOL layout_bitmap_splat(layout_bitmap dst, layout_bitmap src,
+extern bool layout_bitmap_splat(layout_bitmap dst, layout_bitmap src,
size_t oldSrcInstanceSize);
-extern BOOL layout_bitmap_or(layout_bitmap dst, layout_bitmap src, const char *msg);
-extern BOOL layout_bitmap_clear(layout_bitmap dst, layout_bitmap src, const char *msg);
+extern bool layout_bitmap_or(layout_bitmap dst, layout_bitmap src, const char *msg);
+extern bool layout_bitmap_clear(layout_bitmap dst, layout_bitmap src, const char *msg);
extern void layout_bitmap_print(layout_bitmap bits);
// fixme runtime
-extern Class look_up_class(const char *aClassName, BOOL includeUnconnected, BOOL includeClassHandler);
-extern const char *map_images(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
+extern Class look_up_class(const char *aClassName, bool includeUnconnected, bool includeClassHandler);
+extern "C" const char *map_2_images(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
extern const char *map_images_nolock(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
extern const char * load_images(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
-extern BOOL load_images_nolock(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
+extern bool load_images_nolock(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info infoList[]);
extern void unmap_image(const struct mach_header *mh, intptr_t vmaddr_slide);
extern void unmap_image_nolock(const struct mach_header *mh);
extern void _read_images(header_info **hList, uint32_t hCount);
-extern void prepare_load_methods(header_info *hi);
+extern void prepare_load_methods(const headerType *mhdr);
+extern bool hasLoadMethods(const headerType *mhdr);
extern void _unload_image(header_info *hi);
extern const char ** _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount);
extern Class _class_remap(Class cls);
extern Class _class_getNonMetaClass(Class cls, id obj);
extern Ivar _class_getVariable(Class cls, const char *name, Class *memberOf);
-extern BOOL _class_usesAutomaticRetainRelease(Class cls);
extern uint32_t _class_getInstanceStart(Class cls);
extern unsigned _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, id *results, unsigned num_requested);
#define countof(arr) (sizeof(arr) / sizeof((arr)[0]))
+static __inline uint32_t _objc_strhash(const char *s) {
+ uint32_t hash = 0;
+ for (;;) {
+ int a = *s++;
+ if (0 == a) break;
+ hash += (hash << 8) + a;
+ }
+ return hash;
+}
+
+#if __cplusplus
+
+template <typename T>
+static inline T log2u(T x) {
+ return (x<2) ? 0 : log2u(x>>1)+1;
+}
+
+template <typename T>
+static inline T exp2u(T x) {
+ return (1 << x);
+}
+
+template <typename T>
+static T exp2m1u(T x) {
+ return (1 << x) - 1;
+}
+
+#endif
+
+
// Global operator new and delete. We must not use any app overrides.
// This ALSO REQUIRES each of these be in libobjc's unexported symbol list.
#if __cplusplus
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winline-new-delete"
#include <new>
-inline void* operator new(std::size_t size) throw (std::bad_alloc) { return _malloc_internal(size); }
-inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return _malloc_internal(size); }
-inline void* operator new(std::size_t size, const std::nothrow_t&) throw() { return _malloc_internal(size); }
-inline void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return _malloc_internal(size); }
-inline void operator delete(void* p) throw() { _free_internal(p); }
-inline void operator delete[](void* p) throw() { _free_internal(p); }
-inline void operator delete(void* p, const std::nothrow_t&) throw() { _free_internal(p); }
-inline void operator delete[](void* p, const std::nothrow_t&) throw() { _free_internal(p); }
+inline void* operator new(std::size_t size) throw (std::bad_alloc) { return malloc(size); }
+inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return malloc(size); }
+inline void* operator new(std::size_t size, const std::nothrow_t&) throw() { return malloc(size); }
+inline void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return malloc(size); }
+inline void operator delete(void* p) throw() { free(p); }
+inline void operator delete[](void* p) throw() { free(p); }
+inline void operator delete(void* p, const std::nothrow_t&) throw() { free(p); }
+inline void operator delete[](void* p, const std::nothrow_t&) throw() { free(p); }
#pragma clang diagnostic pop
#endif
+class TimeLogger {
+ uint64_t mStart;
+ bool mRecord;
+ public:
+ TimeLogger(bool record = true)
+ : mStart(nanoseconds())
+ , mRecord(record)
+ { }
+
+ void log(const char *msg) {
+ if (mRecord) {
+ uint64_t end = nanoseconds();
+ _objc_inform("%.2f ms: %s", (end - mStart) / 1000000.0, msg);
+ mStart = nanoseconds();
+ }
+ }
+};
+
+
+// StripedMap<T> is a map of void* -> T, sized appropriately
+// for cache-friendly lock striping.
+// For example, this may be used as StripedMap<spinlock_t>
+// or as StripedMap<SomeStruct> where SomeStruct stores a spin lock.
+template<typename T>
+class StripedMap {
+
+ enum { CacheLineSize = 64 };
+
+#if TARGET_OS_EMBEDDED
+ enum { StripeCount = 8 };
+#else
+ enum { StripeCount = 64 };
+#endif
+
+ struct PaddedT {
+ T value alignas(CacheLineSize);
+ };
+
+ PaddedT array[StripeCount];
+
+ static unsigned int indexForPointer(const void *p) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(p);
+ return ((addr >> 4) ^ (addr >> 9)) % StripeCount;
+ }
+
+ public:
+ T& operator[] (const void *p) {
+ return array[indexForPointer(p)].value;
+ }
+ const T& operator[] (const void *p) const {
+ return const_cast<StripedMap<T>>(this)[p];
+ }
+
+#if DEBUG
+ StripedMap() {
+ // Verify alignment expectations.
+ uintptr_t base = (uintptr_t)&array[0].value;
+ uintptr_t delta = (uintptr_t)&array[1].value - base;
+ assert(delta % CacheLineSize == 0);
+ assert(base % CacheLineSize == 0);
+ }
+#endif
+};
+
+
// DisguisedPtr<T> acts like pointer type T*, except the
// stored value is disguised to hide it from tools like `leaks`.
// nil is disguised as itself so zero-filled memory works as expected,
// because we don't currently use them anywhere
};
+// fixme type id is weird and not identical to objc_object*
+static inline bool operator == (DisguisedPtr<objc_object> lhs, id rhs) {
+ return lhs == (objc_object *)rhs;
+}
+static inline bool operator != (DisguisedPtr<objc_object> lhs, id rhs) {
+ return lhs != (objc_object *)rhs;
+}
+
// Pointer hash function.
// This is not a terrific hash, but it is fast
}
pointer allocate(size_type n, const_pointer = 0) {
- return static_cast<pointer>(::_malloc_internal(n * sizeof(T)));
+ return static_cast<pointer>(::malloc(n * sizeof(T)));
}
- void deallocate(pointer p, size_type) { ::_free_internal(p); }
+ void deallocate(pointer p, size_type) { ::free(p); }
size_type max_size() const {
return static_cast<size_type>(-1) / sizeof(T);
typedef ObjcAllocator<std::pair<void * const, ObjcAssociation> > ObjectAssociationMapAllocator;
class ObjectAssociationMap : public std::map<void *, ObjcAssociation, ObjectPointerLess, ObjectAssociationMapAllocator> {
public:
- void *operator new(size_t n) { return ::_malloc_internal(n); }
- void operator delete(void *ptr) { ::_free_internal(ptr); }
+ void *operator new(size_t n) { return ::malloc(n); }
+ void operator delete(void *ptr) { ::free(ptr); }
};
typedef ObjcAllocator<std::pair<const disguised_ptr_t, ObjectAssociationMap*> > AssociationsHashMapAllocator;
class AssociationsHashMap : public unordered_map<disguised_ptr_t, ObjectAssociationMap *, DisguisedPointerHash, DisguisedPointerEqual, AssociationsHashMapAllocator> {
public:
- void *operator new(size_t n) { return ::_malloc_internal(n); }
- void operator delete(void *ptr) { ::_free_internal(ptr); }
+ void *operator new(size_t n) { return ::malloc(n); }
+ void operator delete(void *ptr) { ::free(ptr); }
};
#endif
}
static spinlock_t _lock;
static AssociationsHashMap *_map; // associative references: object pointer -> PtrPtrHashMap.
public:
- AssociationsManager() { spinlock_lock(&_lock); }
- ~AssociationsManager() { spinlock_unlock(&_lock); }
+ AssociationsManager() { _lock.lock(); }
+ ~AssociationsManager() { _lock.unlock(); }
AssociationsHashMap &associations() {
if (_map == NULL)
}
};
-spinlock_t AssociationsManager::_lock = SPINLOCK_INITIALIZER;
+spinlock_t AssociationsManager::_lock;
AssociationsHashMap *AssociationsManager::_map = NULL;
// expanded policy bits.
#ifndef _OBJC_RUNTIME_NEW_H
#define _OBJC_RUNTIME_NEW_H
-__BEGIN_DECLS
-
#if __LP64__
typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
#else
mask_t occupied();
void incrementOccupied();
void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
- void setEmpty();
+ void initializeToEmpty();
mask_t capacity();
+ bool isConstantEmptyCache();
bool canBeFreed();
static size_t bytesForCapacity(uint32_t cap);
void expand();
void reallocate(mask_t oldCapacity, mask_t newCapacity);
- struct bucket_t * find(cache_key_t key);
+ struct bucket_t * find(cache_key_t key, id receiver);
static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn));
};
// classref_t is unremapped class_t*
typedef struct classref * classref_t;
-struct method_t {
- SEL name;
- const char *types;
- IMP imp;
-
- struct SortBySELAddress :
- public std::binary_function<const method_t&,
- const method_t&, bool>
- {
- bool operator() (const method_t& lhs,
- const method_t& rhs)
- { return lhs.name < rhs.name; }
- };
-};
-
-struct method_list_t {
- uint32_t entsize_NEVER_USE; // high bits used for fixup markers
+/***********************************************************************
+* entsize_list_tt<Element, List, FlagMask>
+* Generic implementation of an array of non-fragile structs.
+*
+* Element is the struct type (e.g. method_t)
+* List is the specialization of entsize_list_tt (e.g. method_list_t)
+* FlagMask is used to stash extra bits in the entsize field
+* (e.g. method list fixup markers)
+**********************************************************************/
+template <typename Element, typename List, uint32_t FlagMask>
+struct entsize_list_tt {
+ uint32_t entsizeAndFlags;
uint32_t count;
- method_t first;
+ Element first;
- uint32_t getEntsize() const {
- return entsize_NEVER_USE & ~(uint32_t)3;
+ uint32_t entsize() const {
+ return entsizeAndFlags & ~FlagMask;
}
- uint32_t getCount() const {
- return count;
+ uint32_t flags() const {
+ return entsizeAndFlags & FlagMask;
}
- method_t& getOrEnd(uint32_t i) const {
+
+ Element& getOrEnd(uint32_t i) const {
assert(i <= count);
- return *(method_t *)((uint8_t *)&first + i*getEntsize());
+ return *(Element *)((uint8_t *)&first + i*entsize());
}
- method_t& get(uint32_t i) const {
+ Element& get(uint32_t i) const {
assert(i < count);
return getOrEnd(i);
}
- // iterate methods, taking entsize into account
- // fixme need a proper const_iterator
- struct method_iterator {
+ size_t byteSize() const {
+ return sizeof(*this) + (count-1)*entsize();
+ }
+
+ List *duplicate() const {
+ return (List *)memdup(this, this->byteSize());
+ }
+
+ struct iterator;
+ const iterator begin() const {
+ return iterator(*static_cast<const List*>(this), 0);
+ }
+ iterator begin() {
+ return iterator(*static_cast<const List*>(this), 0);
+ }
+ const iterator end() const {
+ return iterator(*static_cast<const List*>(this), count);
+ }
+ iterator end() {
+ return iterator(*static_cast<const List*>(this), count);
+ }
+
+ struct iterator {
uint32_t entsize;
uint32_t index; // keeping track of this saves a divide in operator-
- method_t* method;
+ Element* element;
typedef std::random_access_iterator_tag iterator_category;
- typedef method_t value_type;
+ typedef Element value_type;
typedef ptrdiff_t difference_type;
- typedef method_t* pointer;
- typedef method_t& reference;
+ typedef Element* pointer;
+ typedef Element& reference;
- method_iterator() { }
+ iterator() { }
- method_iterator(const method_list_t& mlist, uint32_t start = 0)
- : entsize(mlist.getEntsize())
+ iterator(const List& list, uint32_t start = 0)
+ : entsize(list.entsize())
, index(start)
- , method(&mlist.getOrEnd(start))
+ , element(&list.getOrEnd(start))
{ }
- const method_iterator& operator += (ptrdiff_t delta) {
- method = (method_t*)((uint8_t *)method + delta*entsize);
+ const iterator& operator += (ptrdiff_t delta) {
+ element = (Element*)((uint8_t *)element + delta*entsize);
index += (int32_t)delta;
return *this;
}
- const method_iterator& operator -= (ptrdiff_t delta) {
- method = (method_t*)((uint8_t *)method - delta*entsize);
+ const iterator& operator -= (ptrdiff_t delta) {
+ element = (Element*)((uint8_t *)element - delta*entsize);
index -= (int32_t)delta;
return *this;
}
- const method_iterator operator + (ptrdiff_t delta) const {
- return method_iterator(*this) += delta;
+ const iterator operator + (ptrdiff_t delta) const {
+ return iterator(*this) += delta;
}
- const method_iterator operator - (ptrdiff_t delta) const {
- return method_iterator(*this) -= delta;
+ const iterator operator - (ptrdiff_t delta) const {
+ return iterator(*this) -= delta;
}
- method_iterator& operator ++ () { *this += 1; return *this; }
- method_iterator& operator -- () { *this -= 1; return *this; }
- method_iterator operator ++ (int) {
- method_iterator result(*this); *this += 1; return result;
+ iterator& operator ++ () { *this += 1; return *this; }
+ iterator& operator -- () { *this -= 1; return *this; }
+ iterator operator ++ (int) {
+ iterator result(*this); *this += 1; return result;
}
- method_iterator operator -- (int) {
- method_iterator result(*this); *this -= 1; return result;
+ iterator operator -- (int) {
+ iterator result(*this); *this -= 1; return result;
}
- ptrdiff_t operator - (const method_iterator& rhs) const {
+ ptrdiff_t operator - (const iterator& rhs) const {
return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index;
}
- method_t& operator * () const { return *method; }
- method_t* operator -> () const { return method; }
+ Element& operator * () const { return *element; }
+ Element* operator -> () const { return element; }
- operator method_t& () const { return *method; }
+ operator Element& () const { return *element; }
- bool operator == (const method_iterator& rhs) {
- return this->method == rhs.method;
+ bool operator == (const iterator& rhs) const {
+ return this->element == rhs.element;
}
- bool operator != (const method_iterator& rhs) {
- return this->method != rhs.method;
+ bool operator != (const iterator& rhs) const {
+ return this->element != rhs.element;
}
- bool operator < (const method_iterator& rhs) {
- return this->method < rhs.method;
+ bool operator < (const iterator& rhs) const {
+ return this->element < rhs.element;
}
- bool operator > (const method_iterator& rhs) {
- return this->method > rhs.method;
+ bool operator > (const iterator& rhs) const {
+ return this->element > rhs.element;
}
};
+};
- method_iterator begin() const { return method_iterator(*this, 0); }
- method_iterator end() const { return method_iterator(*this, getCount()); }
+struct method_t {
+ SEL name;
+ const char *types;
+ IMP imp;
+
+ struct SortBySELAddress :
+ public std::binary_function<const method_t&,
+ const method_t&, bool>
+ {
+ bool operator() (const method_t& lhs,
+ const method_t& rhs)
+ { return lhs.name < rhs.name; }
+ };
};
struct ivar_t {
uint32_t alignment_raw;
uint32_t size;
- uint32_t alignment() {
+ uint32_t alignment() const {
if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT;
return 1 << alignment_raw;
}
};
-struct ivar_list_t {
- uint32_t entsize;
- uint32_t count;
- ivar_t first;
-};
-
struct property_t {
const char *name;
const char *attributes;
};
-struct property_list_t {
- uint32_t entsize;
- uint32_t count;
- property_t first;
+// Two bits of entsize are used for fixup markers.
+struct method_list_t : entsize_list_tt<method_t, method_list_t, 0x3> {
+ bool isFixedUp() const;
+ void setFixedUp();
+
+ uint32_t indexOfMethod(const method_t *meth) const {
+ uint32_t i =
+ (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize());
+ assert(i < count);
+ return i;
+ }
+};
+
+struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> {
};
+struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> {
+};
+
+
typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped
-#define PROTOCOL_FIXED_UP (1<<31) // must never be set by compiler
+// Values for protocol_t->flags
+#define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler
+#define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler
+
+#define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2)
struct protocol_t : objc_object {
const char *mangledName;
property_list_t *instanceProperties;
uint32_t size; // sizeof(protocol_t)
uint32_t flags;
+ // Fields below this point are not always present on disk.
const char **extendedMethodTypes;
-
- // Fields below this point are allocated at runtime
- // and are not present on disk.
const char *_demangledName;
const char *demangledName();
return demangledName();
}
- bool isFixedUp() const {
- return flags & PROTOCOL_FIXED_UP;
- }
+ bool isFixedUp() const;
+ void setFixedUp();
bool hasExtendedMethodTypesField() const {
return size >= (offsetof(protocol_t, extendedMethodTypes)
// count is 64-bit by accident.
uintptr_t count;
protocol_ref_t list[0]; // variable-size
-};
-
-struct class_ro_t {
- uint32_t flags;
- uint32_t instanceStart;
- uint32_t instanceSize;
-#ifdef __LP64__
- uint32_t reserved;
-#endif
- const uint8_t * ivarLayout;
-
- const char * name;
- const method_list_t * baseMethods;
- const protocol_list_t * baseProtocols;
- const ivar_list_t * ivars;
-
- const uint8_t * weakIvarLayout;
- const property_list_t *baseProperties;
-};
-
-struct class_rw_t {
- uint32_t flags;
- uint32_t version;
-
- const class_ro_t *ro;
-
- union {
- method_list_t **method_lists; // RW_METHOD_ARRAY == 1
- method_list_t *method_list; // RW_METHOD_ARRAY == 0
- };
- struct chained_property_list *properties;
- const protocol_list_t ** protocols;
+ size_t byteSize() const {
+ return sizeof(*this) + count*sizeof(list[0]);
+ }
- Class firstSubclass;
- Class nextSiblingClass;
+ protocol_list_t *duplicate() const {
+ return (protocol_list_t *)memdup(this, this->byteSize());
+ }
- char *demangledName;
+ typedef protocol_ref_t* iterator;
+ typedef const protocol_ref_t* const_iterator;
- void setFlags(uint32_t set)
- {
- OSAtomicOr32Barrier(set, &flags);
+ const_iterator begin() const {
+ return list;
}
-
- void clearFlags(uint32_t clear)
- {
- OSAtomicXor32Barrier(clear, &flags);
+ iterator begin() {
+ return list;
+ }
+ const_iterator end() const {
+ return list + count;
}
+ iterator end() {
+ return list + count;
+ }
+};
- // set and clear must not overlap
- void changeFlags(uint32_t set, uint32_t clear)
- {
- assert((set & clear) == 0);
+struct locstamped_category_t {
+ category_t *cat;
+ struct header_info *hi;
+};
- uint32_t oldf, newf;
- do {
- oldf = flags;
- newf = (oldf | set) & ~clear;
- } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
- }
+struct locstamped_category_list_t {
+ uint32_t count;
+#if __LP64__
+ uint32_t reserved;
+#endif
+ locstamped_category_t list[0];
};
#endif
// class has instance-specific GC layout
#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21)
-// class's method list is an array of method lists
-#define RW_METHOD_ARRAY (1<<20)
+// available for use
+// #define RW_20 (1<<20)
// class has started realizing but not yet completed it
#define RW_REALIZING (1<<19)
#endif
+struct class_ro_t {
+ uint32_t flags;
+ uint32_t instanceStart;
+ uint32_t instanceSize;
+#ifdef __LP64__
+ uint32_t reserved;
+#endif
+
+ const uint8_t * ivarLayout;
+
+ const char * name;
+ method_list_t * baseMethodList;
+ protocol_list_t * baseProtocols;
+ const ivar_list_t * ivars;
+
+ const uint8_t * weakIvarLayout;
+ property_list_t *baseProperties;
+
+ method_list_t *baseMethods() const {
+ return baseMethodList;
+ }
+};
+
+
+/***********************************************************************
+* list_array_tt<Element, List>
+* Generic implementation for metadata that can be augmented by categories.
+*
+* Element is the underlying metadata type (e.g. method_t)
+* List is the metadata's list type (e.g. method_list_t)
+*
+* A list_array_tt has one of three values:
+* - empty
+* - a pointer to a single list
+* - an array of pointers to lists
+*
+* countLists/beginLists/endLists iterate the metadata lists
+* count/begin/end iterate the underlying metadata elements
+**********************************************************************/
+template <typename Element, typename List>
+class list_array_tt {
+ struct array_t {
+ uint32_t count;
+ List* lists[0];
+
+ static size_t byteSize(uint32_t count) {
+ return sizeof(array_t) + count*sizeof(lists[0]);
+ }
+ size_t byteSize() {
+ return byteSize(count);
+ }
+ };
+
+ protected:
+ class iterator {
+ List **lists;
+ List **listsEnd;
+ typename List::iterator m, mEnd;
+
+ public:
+ iterator(List **begin, List **end)
+ : lists(begin), listsEnd(end)
+ {
+ if (begin != end) {
+ m = (*begin)->begin();
+ mEnd = (*begin)->end();
+ }
+ }
+
+ const Element& operator * () const {
+ return *m;
+ }
+ Element& operator * () {
+ return *m;
+ }
+
+ bool operator != (const iterator& rhs) const {
+ if (lists != rhs.lists) return true;
+ if (lists == listsEnd) return false; // m is undefined
+ if (m != rhs.m) return true;
+ return false;
+ }
+
+ const iterator& operator ++ () {
+ assert(m != mEnd);
+ m++;
+ if (m == mEnd) {
+ assert(lists != listsEnd);
+ lists++;
+ if (lists != listsEnd) {
+ m = (*lists)->begin();
+ mEnd = (*lists)->end();
+ }
+ }
+ return *this;
+ }
+ };
+
+ private:
+ union {
+ List* list;
+ uintptr_t arrayAndFlag;
+ };
+
+ bool hasArray() const {
+ return arrayAndFlag & 1;
+ }
+
+ array_t *array() {
+ return (array_t *)(arrayAndFlag & ~1);
+ }
+
+ void setArray(array_t *array) {
+ arrayAndFlag = (uintptr_t)array | 1;
+ }
+
+ public:
+
+ uint32_t count() {
+ uint32_t result = 0;
+ for (auto lists = beginLists(), end = endLists();
+ lists != end;
+ ++lists)
+ {
+ result += (*lists)->count;
+ }
+ return result;
+ }
+
+ iterator begin() {
+ return iterator(beginLists(), endLists());
+ }
+
+ iterator end() {
+ List **e = endLists();
+ return iterator(e, e);
+ }
+
+
+ uint32_t countLists() {
+ if (hasArray()) {
+ return array()->count;
+ } else if (list) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ List** beginLists() {
+ if (hasArray()) {
+ return array()->lists;
+ } else {
+ return &list;
+ }
+ }
+
+ List** endLists() {
+ if (hasArray()) {
+ return array()->lists + array()->count;
+ } else if (list) {
+ return &list + 1;
+ } else {
+ return &list;
+ }
+ }
+
+ void attachLists(List* const * addedLists, uint32_t addedCount) {
+ if (addedCount == 0) return;
+
+ if (hasArray()) {
+ // many lists -> many lists
+ uint32_t oldCount = array()->count;
+ uint32_t newCount = oldCount + addedCount;
+ setArray((array_t *)realloc(array(), array_t::byteSize(newCount)));
+ array()->count = newCount;
+ memmove(array()->lists + addedCount, array()->lists,
+ oldCount * sizeof(array()->lists[0]));
+ memcpy(array()->lists, addedLists,
+ addedCount * sizeof(array()->lists[0]));
+ }
+ else if (!list && addedCount == 1) {
+ // 0 lists -> 1 list
+ list = addedLists[0];
+ }
+ else {
+ // 1 list -> many lists
+ List* oldList = list;
+ uint32_t oldCount = oldList ? 1 : 0;
+ uint32_t newCount = oldCount + addedCount;
+ setArray((array_t *)malloc(array_t::byteSize(newCount)));
+ array()->count = newCount;
+ if (oldList) array()->lists[addedCount] = oldList;
+ memcpy(array()->lists, addedLists,
+ addedCount * sizeof(array()->lists[0]));
+ }
+ }
+
+ void tryFree() {
+ if (hasArray()) {
+ for (uint32_t i = 0; i < array()->count; i++) {
+ try_free(array()->lists[i]);
+ }
+ try_free(array());
+ }
+ else if (list) {
+ try_free(list);
+ }
+ }
+
+ template<typename Result>
+ Result duplicate() {
+ Result result;
+
+ if (hasArray()) {
+ array_t *a = array();
+ result.setArray((array_t *)memdup(a, a->byteSize()));
+ for (uint32_t i = 0; i < a->count; i++) {
+ result.array()->lists[i] = a->lists[i]->duplicate();
+ }
+ } else if (list) {
+ result.list = list->duplicate();
+ } else {
+ result.list = nil;
+ }
+
+ return result;
+ }
+};
+
+
+class method_array_t :
+ public list_array_tt<method_t, method_list_t>
+{
+ typedef list_array_tt<method_t, method_list_t> Super;
+
+ public:
+ method_list_t **beginCategoryMethodLists() {
+ return beginLists();
+ }
+
+ method_list_t **endCategoryMethodLists(Class cls);
+
+ method_array_t duplicate() {
+ return Super::duplicate<method_array_t>();
+ }
+};
+
+
+class property_array_t :
+ public list_array_tt<property_t, property_list_t>
+{
+ typedef list_array_tt<property_t, property_list_t> Super;
+
+ public:
+ property_array_t duplicate() {
+ return Super::duplicate<property_array_t>();
+ }
+};
+
+
+class protocol_array_t :
+ public list_array_tt<protocol_ref_t, protocol_list_t>
+{
+ typedef list_array_tt<protocol_ref_t, protocol_list_t> Super;
+
+ public:
+ protocol_array_t duplicate() {
+ return Super::duplicate<protocol_array_t>();
+ }
+};
+
+
+struct class_rw_t {
+ uint32_t flags;
+ uint32_t version;
+
+ const class_ro_t *ro;
+
+ method_array_t methods;
+ property_array_t properties;
+ protocol_array_t protocols;
+
+ Class firstSubclass;
+ Class nextSiblingClass;
+
+ char *demangledName;
+
+ void setFlags(uint32_t set)
+ {
+ OSAtomicOr32Barrier(set, &flags);
+ }
+
+ void clearFlags(uint32_t clear)
+ {
+ OSAtomicXor32Barrier(clear, &flags);
+ }
+
+ // set and clear must not overlap
+ void changeFlags(uint32_t set, uint32_t clear)
+ {
+ assert((set & clear) == 0);
+
+ uint32_t oldf, newf;
+ do {
+ oldf = flags;
+ newf = (oldf | set) & ~clear;
+ } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags));
+ }
+};
+
+
struct class_data_bits_t {
// Values are the FAST_ flags above.
void printRequiresRawIsa(bool inherited);
bool canAllocIndexed() {
+ assert(!isFuture());
return !requiresRawIsa();
}
bool canAllocFast() {
+ assert(!isFuture());
return bits.canAllocFast();
}
struct method_list_t *classMethods;
struct protocol_list_t *protocols;
struct property_list_t *instanceProperties;
+
+ method_list_t *methodsForMeta(bool isMeta) {
+ if (isMeta) return classMethods;
+ else return instanceMethods;
+ }
+
+ property_list_t *propertiesForMeta(bool isMeta) {
+ if (isMeta) return nil; // classProperties;
+ else return instanceProperties;
+ }
};
struct objc_super2 {
static inline void
foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class))
{
- // rwlock_assert_writing(&runtimeLock);
+ // runtimeLock.assertWriting();
assert(top);
Class cls = top;
while (1) {
});
}
-__END_DECLS
-
#endif
#define newprotocol(p) ((protocol_t *)p)
static void disableTaggedPointers();
-static void detach_class(Class cls, BOOL isMeta);
+static void detach_class(Class cls, bool isMeta);
static void free_class(Class cls);
static Class setSuperclass(Class cls, Class newSuper);
static Class realizeClass(Class cls);
static method_t *getMethodNoSuper_nolock(Class cls, SEL sel);
static method_t *getMethod_nolock(Class cls, SEL sel);
-static IMP _method_getImplementation(method_t *m);
-static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, BOOL replace);
+static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace);
static NXHashTable *realizedClasses(void);
static bool isRRSelector(SEL sel);
static bool isAWZSelector(SEL sel);
static bool methodListImplementsAWZ(const method_list_t *mlist);
static void updateCustomRR_AWZ(Class cls, method_t *meth);
static method_t *search_method_list(const method_list_t *mlist, SEL sel);
+static void flushCaches(Class cls);
#if SUPPORT_FIXUP
static void fixupMessageRef(message_ref_t *msg);
#endif
static bool MetaclassNSObjectAWZSwizzled;
static bool ClassNSObjectRRSwizzled;
+#define SDK_FORMAT "%hu.%hhu.%hhu"
+#define FORMAT_SDK(v) \
+ (unsigned short)(((uint32_t)(v))>>16), \
+ (unsigned char)(((uint32_t)(v))>>8), \
+ (unsigned char)(((uint32_t)(v))>>0)
+
id objc_noop_imp(id self, SEL _cmd __unused) {
return self;
**********************************************************************/
rwlock_t runtimeLock;
rwlock_t selLock;
-mutex_t cacheUpdateLock = MUTEX_INITIALIZER;
-recursive_mutex_t loadMethodLock = RECURSIVE_MUTEX_INITIALIZER;
+mutex_t cacheUpdateLock;
+recursive_mutex_t loadMethodLock;
#if SUPPORT_QOS_HACK
pthread_priority_t BackgroundPriority = 0;
pthread_priority_t MainPriority = 0;
-# if !NDEBUG
+# if DEBUG
static __unused void destroyQOSKey(void *arg) {
_objc_fatal("QoS override level at thread exit is %zu instead of zero",
(size_t)(uintptr_t)arg);
void lock_init(void)
{
- rwlock_init(&selLock);
- rwlock_init(&runtimeLock);
- recursive_mutex_init(&loadMethodLock);
-
#if SUPPORT_QOS_HACK
BackgroundPriority = _pthread_qos_class_encode(QOS_CLASS_BACKGROUND, 0, 0);
MainPriority = _pthread_qos_class_encode(qos_class_main(), 0, 0);
-# if !NDEBUG
+# if DEBUG
pthread_key_init_np(QOS_KEY, &destroyQOSKey);
# endif
#endif
STATIC_ASSERT((~ISA_MAGIC_MASK & ISA_MAGIC_VALUE) == 0);
// die if virtual address space bound goes up
-STATIC_ASSERT((~ISA_MASK & MACH_VM_MAX_ADDRESS) == 0);
+STATIC_ASSERT((~ISA_MASK & MACH_VM_MAX_ADDRESS) == 0 ||
+ ISA_MASK + sizeof(void*) == MACH_VM_MAX_ADDRESS);
#else
#endif
-typedef struct {
- category_t *cat;
- BOOL fromBundle;
-} category_pair_t;
-
-typedef struct {
- uint32_t count;
- category_pair_t list[0]; // variable-size
-} category_list;
-
-#define FOREACH_METHOD_LIST(_mlist, _cls, code) \
- do { \
- class_rw_t *_data = _cls->data(); \
- const method_list_t *_mlist; \
- if (_data->method_lists) { \
- if (_data->flags & RW_METHOD_ARRAY) { \
- method_list_t **_mlistp; \
- for (_mlistp=_data->method_lists; _mlistp[0]; _mlistp++){ \
- _mlist = _mlistp[0]; \
- code \
- } \
- } else { \
- _mlist = _data->method_list; \
- code \
- } \
- } \
- } while (0)
-
-
-// As above, but skips the class's base method list.
-#define FOREACH_CATEGORY_METHOD_LIST(_mlist, _cls, code) \
- do { \
- class_rw_t *_data = _cls->data(); \
- const method_list_t *_mlist; \
- if (_data->method_lists) { \
- if (_data->flags & RW_METHOD_ARRAY) { \
- if (_data->ro->baseMethods) { \
- /* has base methods: use all mlists except the last */ \
- method_list_t **_mlistp; \
- for (_mlistp=_data->method_lists; _mlistp[0] && _mlistp[1]; _mlistp++){ \
- _mlist = _mlistp[0]; \
- code \
- } \
- } else { \
- /* no base methods: use all mlists including the last */ \
- method_list_t **_mlistp; \
- for (_mlistp=_data->method_lists; _mlistp[0]; _mlistp++){ \
- _mlist = _mlistp[0]; \
- code \
- } \
- } \
- } else if (!_data->ro->baseMethods) { \
- /* no base methods: use all mlists including the last */ \
- _mlist = _data->method_list; \
- code \
- } \
- } \
- } while (0)
+typedef locstamped_category_list_t category_list;
/*
Shared cache's sorting and uniquing are not trusted, but do affect the
location of the selector name string.
Runtime fixed-up method lists get 2.
+
+ High two bits of protocol->flags is used as the fixed-up marker.
+ PREOPTIMIZED VERSION:
+ Protocols from shared cache are 1<<30.
+ Runtime fixed-up protocols get 1<<30.
+ UN-PREOPTIMIZED VERSION:
+ Protocols from shared cache are 1<<30.
+ Shared cache's fixups are not trusted.
+ Runtime fixed-up protocols get 3<<30.
*/
static uint32_t fixed_up_method_list = 3;
+static uint32_t fixed_up_protocol = PROTOCOL_FIXED_UP_1;
void
disableSharedCacheOptimizations(void)
{
fixed_up_method_list = 2;
+ fixed_up_protocol = PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2;
}
-static bool
-isMethodListFixedUp(const method_list_t *mlist)
-{
- return (mlist->entsize_NEVER_USE & 3) == fixed_up_method_list;
-}
-
-
-static const char *sel_cname(SEL sel)
-{
- return (const char *)(void *)sel;
-}
-
-
-static void
-setMethodListFixedUp(method_list_t *mlist)
-{
- rwlock_assert_writing(&runtimeLock);
- assert(!isMethodListFixedUp(mlist));
- mlist->entsize_NEVER_USE =
- (mlist->entsize_NEVER_USE & ~3) | fixed_up_method_list;
-}
-
-/*
-static size_t chained_property_list_size(const chained_property_list *plist)
-{
- return sizeof(chained_property_list) +
- plist->count * sizeof(property_t);
-}
-*/
-
-static size_t protocol_list_size(const protocol_list_t *plist)
-{
- return sizeof(protocol_list_t) + plist->count * sizeof(protocol_t *);
-}
-
-
-// low bit used by dyld shared cache
-static uint32_t method_list_entsize(const method_list_t *mlist)
-{
- return mlist->entsize_NEVER_USE & ~3;
-}
-
-static size_t method_list_size(const method_list_t *mlist)
-{
- return sizeof(method_list_t) + (mlist->count-1)*method_list_entsize(mlist);
-}
-
-static method_t *method_list_nth(const method_list_t *mlist, uint32_t i)
-{
- return &mlist->get(i);
-}
-
-static uint32_t method_list_count(const method_list_t *mlist)
-{
- return mlist ? mlist->count : 0;
-}
-
-static void method_list_swap(method_list_t *mlist, uint32_t i, uint32_t j)
-{
- size_t entsize = method_list_entsize(mlist);
- char temp[entsize];
- memcpy(temp, method_list_nth(mlist, i), entsize);
- memcpy(method_list_nth(mlist, i), method_list_nth(mlist, j), entsize);
- memcpy(method_list_nth(mlist, j), temp, entsize);
+bool method_list_t::isFixedUp() const {
+ return flags() == fixed_up_method_list;
}
-static uint32_t method_list_index(const method_list_t *mlist,const method_t *m)
-{
- uint32_t i = (uint32_t)(((uintptr_t)m - (uintptr_t)mlist) / method_list_entsize(mlist));
- assert(i < mlist->count);
- return i;
+void method_list_t::setFixedUp() {
+ runtimeLock.assertWriting();
+ assert(!isFixedUp());
+ entsizeAndFlags = entsize() | fixed_up_method_list;
}
-
-static size_t ivar_list_size(const ivar_list_t *ilist)
-{
- return sizeof(ivar_list_t) + (ilist->count-1) * ilist->entsize;
+bool protocol_t::isFixedUp() const {
+ return (flags & PROTOCOL_FIXED_UP_MASK) == fixed_up_protocol;
}
-static ivar_t *ivar_list_nth(const ivar_list_t *ilist, uint32_t i)
-{
- return (ivar_t *)(i*ilist->entsize + (char *)&ilist->first);
+void protocol_t::setFixedUp() {
+ runtimeLock.assertWriting();
+ assert(!isFixedUp());
+ flags = (flags & ~PROTOCOL_FIXED_UP_MASK) | fixed_up_protocol;
}
-static method_list_t *cat_method_list(const category_t *cat, BOOL isMeta)
-{
- if (!cat) return nil;
-
- if (isMeta) return cat->classMethods;
- else return cat->instanceMethods;
-}
-
-static uint32_t cat_method_count(const category_t *cat, BOOL isMeta)
+method_list_t **method_array_t::endCategoryMethodLists(Class cls)
{
- method_list_t *cmlist = cat_method_list(cat, isMeta);
- return cmlist ? cmlist->count : 0;
+ method_list_t **mlists = beginLists();
+ method_list_t **mlistsEnd = endLists();
+
+ if (mlists == mlistsEnd || !cls->data()->ro->baseMethods())
+ {
+ // No methods, or no base methods.
+ // Everything here is a category method.
+ return mlistsEnd;
+ }
+
+ // Have base methods. Category methods are
+ // everything except the last method list.
+ return mlistsEnd - 1;
}
-static method_t *cat_method_nth(const category_t *cat, BOOL isMeta, uint32_t i)
+static const char *sel_cname(SEL sel)
{
- method_list_t *cmlist = cat_method_list(cat, isMeta);
- if (!cmlist) return nil;
-
- return method_list_nth(cmlist, i);
+ return (const char *)(void *)sel;
}
-static property_t *
-property_list_nth(const property_list_t *plist, uint32_t i)
+static size_t protocol_list_size(const protocol_list_t *plist)
{
- return (property_t *)(i*plist->entsize + (char *)&plist->first);
+ return sizeof(protocol_list_t) + plist->count * sizeof(protocol_t *);
}
-// fixme don't chain property lists
-typedef struct chained_property_list {
- struct chained_property_list *next;
- uint32_t count;
- property_t list[0]; // variable-size
-} chained_property_list;
-
static void try_free(const void *p)
{
swift_class_t *swiftSupercls = (swift_class_t *)supercls;
size_t superSize = swiftSupercls->classSize;
void *superBits = swiftSupercls->baseAddress();
- void *bits = _malloc_internal(superSize + extraBytes);
+ void *bits = malloc(superSize + extraBytes);
// Copy all of the superclass's data to the new class.
memcpy(bits, superBits, superSize);
**********************************************************************/
static class_ro_t *make_ro_writeable(class_rw_t *rw)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (rw->flags & RW_COPIED_RO) {
// already writeable, do nothing
} else {
class_ro_t *ro = (class_ro_t *)
- _memdup_internal(rw->ro, sizeof(*rw->ro));
+ memdup(rw->ro, sizeof(*rw->ro));
rw->ro = ro;
rw->flags |= RW_COPIED_RO;
}
**********************************************************************/
static NXMapTable *unattachedCategories(void)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
static NXMapTable *category_map = nil;
if (category_map) return category_map;
// fixme initial map size
- category_map = NXCreateMapTableFromZone(NXPtrValueMapPrototype, 16,
- _objc_internal_zone());
+ category_map = NXCreateMapTable(NXPtrValueMapPrototype, 16);
return category_map;
}
static void addUnattachedCategoryForClass(category_t *cat, Class cls,
header_info *catHeader)
{
- rwlock_assert_writing(&runtimeLock);
-
- BOOL catFromBundle = (catHeader->mhdr->filetype == MH_BUNDLE) ? YES: NO;
+ runtimeLock.assertWriting();
// DO NOT use cat->cls! cls may be cat->cls->isa instead
NXMapTable *cats = unattachedCategories();
list = (category_list *)NXMapGet(cats, cls);
if (!list) {
list = (category_list *)
- _calloc_internal(sizeof(*list) + sizeof(list->list[0]), 1);
+ calloc(sizeof(*list) + sizeof(list->list[0]), 1);
} else {
list = (category_list *)
- _realloc_internal(list, sizeof(*list) + sizeof(list->list[0]) * (list->count + 1));
+ realloc(list, sizeof(*list) + sizeof(list->list[0]) * (list->count + 1));
}
- list->list[list->count++] = (category_pair_t){cat, catFromBundle};
+ list->list[list->count++] = (locstamped_category_t){cat, catHeader};
NXMapInsert(cats, cls, list);
}
**********************************************************************/
static void removeUnattachedCategoryForClass(category_t *cat, Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
// DO NOT use cat->cls! cls may be cat->cls->isa instead
NXMapTable *cats = unattachedCategories();
* The result must be freed by the caller.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static category_list *unattachedCategoriesForClass(Class cls)
+static category_list *
+unattachedCategoriesForClass(Class cls, bool realizing)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
return (category_list *)NXMapRemove(unattachedCategories(), cls);
}
+/***********************************************************************
+* removeAllUnattachedCategoriesForClass
+* Deletes all unattached categories (loaded or not) for a class.
+* Locking: runtimeLock must be held by the caller.
+**********************************************************************/
+static void removeAllUnattachedCategoriesForClass(Class cls)
+{
+ runtimeLock.assertWriting();
+
+ void *list = NXMapRemove(unattachedCategories(), cls);
+ if (list) free(list);
+}
+
+
/***********************************************************************
* classNSObject
* Returns class NSObject.
static void printReplacements(Class cls, category_list *cats)
{
uint32_t c;
- BOOL isMeta = cls->isMetaClass();
+ bool isMeta = cls->isMetaClass();
if (!cats) return;
// Later categories override earlier ones.
for (c = 0; c < cats->count; c++) {
category_t *cat = cats->list[c].cat;
- uint32_t cmCount = cat_method_count(cat, isMeta);
- uint32_t m;
- for (m = 0; m < cmCount; m++) {
- uint32_t c2, m2;
- method_t *meth2 = nil;
- method_t *meth = cat_method_nth(cat, isMeta, m);
- SEL s = sel_registerName(sel_cname(meth->name));
+
+ method_list_t *mlist = cat->methodsForMeta(isMeta);
+ if (!mlist) continue;
+
+ for (const auto& meth : *mlist) {
+ SEL s = sel_registerName(sel_cname(meth.name));
// Don't warn about GC-ignored selectors
if (ignoreSelector(s)) continue;
-
+
+ // Search for replaced methods in method lookup order.
+ // Complain about the first duplicate only.
+
// Look for method in earlier categories
- for (c2 = 0; c2 < c; c2++) {
+ for (uint32_t c2 = 0; c2 < c; c2++) {
category_t *cat2 = cats->list[c2].cat;
- uint32_t cm2Count = cat_method_count(cat2, isMeta);
- for (m2 = 0; m2 < cm2Count; m2++) {
- meth2 = cat_method_nth(cat2, isMeta, m2);
- SEL s2 = sel_registerName(sel_cname(meth2->name));
- if (s == s2) goto whine;
+
+ const method_list_t *mlist2 = cat2->methodsForMeta(isMeta);
+ if (!mlist2) continue;
+
+ for (const auto& meth2 : *mlist2) {
+ SEL s2 = sel_registerName(sel_cname(meth2.name));
+ if (s == s2) {
+ logReplacedMethod(cls->nameForLogging(), s,
+ cls->isMetaClass(), cat->name,
+ meth2.imp, meth.imp);
+ goto complained;
+ }
}
}
// Look for method in cls
- FOREACH_METHOD_LIST(mlist, cls, {
- for (m2 = 0; m2 < mlist->count; m2++) {
- meth2 = method_list_nth(mlist, m2);
- SEL s2 = sel_registerName(sel_cname(meth2->name));
- if (s == s2) goto whine;
+ for (const auto& meth2 : cls->data()->methods) {
+ SEL s2 = sel_registerName(sel_cname(meth2.name));
+ if (s == s2) {
+ logReplacedMethod(cls->nameForLogging(), s,
+ cls->isMetaClass(), cat->name,
+ meth2.imp, meth.imp);
+ goto complained;
}
- });
-
- // Didn't find any override.
- continue;
+ }
- whine:
- // Found an override.
- logReplacedMethod(cls->nameForLogging(), s,
- cls->isMetaClass(), cat->name,
- _method_getImplementation(meth2),
- _method_getImplementation(meth));
+ complained:
+ ;
}
}
}
-static BOOL isBundleClass(Class cls)
+static bool isBundleClass(Class cls)
{
- return (cls->data()->ro->flags & RO_FROM_BUNDLE) ? YES : NO;
+ return cls->data()->ro->flags & RO_FROM_BUNDLE;
}
-static method_list_t *
+static void
fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort)
{
- rwlock_assert_writing(&runtimeLock);
- assert(!isMethodListFixedUp(mlist));
-
- mlist = (method_list_t *)
- _memdup_internal(mlist, method_list_size(mlist));
+ runtimeLock.assertWriting();
+ assert(!mlist->isFixedUp());
// fixme lock less in attachMethodLists ?
sel_lock();
// Unique selectors in list.
- uint32_t m;
- for (m = 0; m < mlist->count; m++) {
- method_t *meth = method_list_nth(mlist, m);
-
- const char *name = sel_cname(meth->name);
+ for (auto& meth : *mlist) {
+ const char *name = sel_cname(meth.name);
SEL sel = sel_registerNameNoLock(name, bundleCopy);
- meth->name = sel;
+ meth.name = sel;
if (ignoreSelector(sel)) {
- meth->imp = (IMP)&_objc_ignored_method;
+ meth.imp = (IMP)&_objc_ignored_method;
}
}
}
// Mark method list as uniqued and sorted
- setMethodListFixedUp(mlist);
-
- return mlist;
+ mlist->setFixedUp();
}
static void
-attachMethodLists(Class cls, method_list_t **addedLists, int addedCount,
- bool baseMethods, bool methodsFromBundle,
- bool flushCaches)
+prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount,
+ bool baseMethods, bool methodsFromBundle)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
+
+ if (addedCount == 0) return;
// Don't scan redundantly
bool scanForCustomRR = !UseGC && !cls->hasCustomRR();
assert(!scanForCustomRR && !scanForCustomAWZ);
}
- // Method list array is nil-terminated.
- // Some elements of lists are nil; we must filter them out.
-
- method_list_t *oldBuf[2];
- method_list_t **oldLists;
- int oldCount = 0;
- if (cls->data()->flags & RW_METHOD_ARRAY) {
- oldLists = cls->data()->method_lists;
- } else {
- oldBuf[0] = cls->data()->method_list;
- oldBuf[1] = nil;
- oldLists = oldBuf;
- }
- if (oldLists) {
- while (oldLists[oldCount]) oldCount++;
- }
-
- int newCount = oldCount;
- for (int i = 0; i < addedCount; i++) {
- if (addedLists[i]) newCount++; // only non-nil entries get added
- }
-
- method_list_t *newBuf[2];
- method_list_t **newLists;
- if (newCount > 1) {
- newLists = (method_list_t **)
- _malloc_internal((1 + newCount) * sizeof(*newLists));
- } else {
- newLists = newBuf;
- }
-
// Add method lists to array.
// Reallocate un-fixed method lists.
// The new methods are PREPENDED to the method list array.
- newCount = 0;
- int i;
- for (i = 0; i < addedCount; i++) {
+ for (int i = 0; i < addedCount; i++) {
method_list_t *mlist = addedLists[i];
- if (!mlist) continue;
+ assert(mlist);
// Fixup selectors if necessary
- if (!isMethodListFixedUp(mlist)) {
- mlist = fixupMethodList(mlist, methodsFromBundle, true/*sort*/);
+ if (!mlist->isFixedUp()) {
+ fixupMethodList(mlist, methodsFromBundle, true/*sort*/);
}
// Scan for method implementations tracked by the class's flags
cls->setHasCustomAWZ();
scanForCustomAWZ = false;
}
-
- // Update method caches
- if (flushCaches) {
- cache_eraseMethods(cls, mlist);
- }
-
- // Fill method list array
- newLists[newCount++] = mlist;
- }
-
- // Copy old methods to the method list array
- for (i = 0; i < oldCount; i++) {
- newLists[newCount++] = oldLists[i];
- }
- if (oldLists && oldLists != oldBuf) free(oldLists);
-
- // nil-terminate
- newLists[newCount] = nil;
-
- if (newCount > 1) {
- assert(newLists != newBuf);
- cls->data()->method_lists = newLists;
- cls->setInfo(RW_METHOD_ARRAY);
- } else {
- assert(newLists == newBuf);
- cls->data()->method_list = newLists[0];
- assert(!(cls->data()->flags & RW_METHOD_ARRAY));
}
}
+
+// Attach method lists and properties and protocols from categories to a class.
+// Assumes the categories in cats are all loaded and sorted by load order,
+// oldest categories first.
static void
-attachCategoryMethods(Class cls, category_list *cats, bool flushCaches)
+attachCategories(Class cls, category_list *cats, bool flush_caches)
{
if (!cats) return;
if (PrintReplacedMethods) printReplacements(cls, cats);
bool isMeta = cls->isMetaClass();
+
+ // fixme rearrange to remove these intermediate allocations
method_list_t **mlists = (method_list_t **)
- _malloc_internal(cats->count * sizeof(*mlists));
+ malloc(cats->count * sizeof(*mlists));
+ property_list_t **proplists = (property_list_t **)
+ malloc(cats->count * sizeof(*proplists));
+ protocol_list_t **protolists = (protocol_list_t **)
+ malloc(cats->count * sizeof(*protolists));
// Count backwards through cats to get newest categories first
int mcount = 0;
+ int propcount = 0;
+ int protocount = 0;
int i = cats->count;
- BOOL fromBundle = NO;
+ bool fromBundle = NO;
while (i--) {
- method_list_t *mlist = cat_method_list(cats->list[i].cat, isMeta);
+ auto& entry = cats->list[i];
+
+ method_list_t *mlist = entry.cat->methodsForMeta(isMeta);
if (mlist) {
mlists[mcount++] = mlist;
- fromBundle |= cats->list[i].fromBundle;
+ fromBundle |= entry.hi->isBundle();
}
- }
-
- attachMethodLists(cls, mlists, mcount, NO, fromBundle, flushCaches);
-
- _free_internal(mlists);
-}
-
-static chained_property_list *
-buildPropertyList(const property_list_t *plist, category_list *cats, BOOL isMeta)
-{
- chained_property_list *newlist;
- uint32_t count = 0;
- uint32_t p, c;
-
- // Count properties in all lists.
- if (plist) count = plist->count;
- if (cats) {
- for (c = 0; c < cats->count; c++) {
- category_t *cat = cats->list[c].cat;
- /*
- if (isMeta && cat->classProperties) {
- count += cat->classProperties->count;
- }
- else*/
- if (!isMeta && cat->instanceProperties) {
- count += cat->instanceProperties->count;
- }
- }
- }
-
- if (count == 0) return nil;
-
- // Allocate new list.
- newlist = (chained_property_list *)
- _malloc_internal(sizeof(*newlist) + count * sizeof(property_t));
- newlist->count = 0;
- newlist->next = nil;
-
- // Copy properties; newest categories first, then ordinary properties
- if (cats) {
- c = cats->count;
- while (c--) {
- property_list_t *cplist;
- category_t *cat = cats->list[c].cat;
- /*
- if (isMeta) {
- cplist = cat->classProperties;
- } else */
- {
- cplist = cat->instanceProperties;
- }
- if (cplist) {
- for (p = 0; p < cplist->count; p++) {
- newlist->list[newlist->count++] =
- *property_list_nth(cplist, p);
- }
- }
+ property_list_t *proplist = entry.cat->propertiesForMeta(isMeta);
+ if (proplist) {
+ proplists[propcount++] = proplist;
}
- }
- if (plist) {
- for (p = 0; p < plist->count; p++) {
- newlist->list[newlist->count++] = *property_list_nth(plist, p);
- }
- }
- assert(newlist->count == count);
-
- return newlist;
-}
-
-
-static const protocol_list_t **
-buildProtocolList(category_list *cats, const protocol_list_t *base,
- const protocol_list_t **protos)
-{
- const protocol_list_t **p, **newp;
- const protocol_list_t **newprotos;
- unsigned int count = 0;
- unsigned int i;
-
- // count protocol list in base
- if (base) count++;
-
- // count protocol lists in cats
- if (cats) for (i = 0; i < cats->count; i++) {
- category_t *cat = cats->list[i].cat;
- if (cat->protocols) count++;
- }
-
- // no base or category protocols? return existing protocols unchanged
- if (count == 0) return protos;
-
- // count protocol lists in protos
- for (p = protos; p && *p; p++) {
- count++;
+ protocol_list_t *protolist = entry.cat->protocols;
+ if (protolist) {
+ protolists[protocount++] = protolist;
+ }
}
- if (count == 0) return nil;
-
- newprotos = (const protocol_list_t **)
- _malloc_internal((count+1) * sizeof(protocol_list_t *));
- newp = newprotos;
-
- if (base) {
- *newp++ = base;
- }
+ auto rw = cls->data();
- for (p = protos; p && *p; p++) {
- *newp++ = *p;
- }
-
- if (cats) for (i = 0; i < cats->count; i++) {
- category_t *cat = cats->list[i].cat;
- if (cat->protocols) {
- *newp++ = cat->protocols;
- }
- }
+ prepareMethodLists(cls, mlists, mcount, NO, fromBundle);
+ rw->methods.attachLists(mlists, mcount);
+ free(mlists);
+ if (flush_caches && mcount > 0) flushCaches(cls);
- *newp = nil;
+ rw->properties.attachLists(proplists, propcount);
+ free(proplists);
- return newprotos;
+ rw->protocols.attachLists(protolists, protocount);
+ free(protolists);
}
**********************************************************************/
static void methodizeClass(Class cls)
{
- category_list *cats;
- BOOL isMeta;
-
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
- isMeta = cls->isMetaClass();
+ bool isMeta = cls->isMetaClass();
+ auto rw = cls->data();
+ auto ro = rw->ro;
// Methodizing for the first time
if (PrintConnecting) {
_objc_inform("CLASS: methodizing class '%s' %s",
cls->nameForLogging(), isMeta ? "(meta)" : "");
}
-
- // Build method and protocol and property lists.
- // Include methods and protocols and properties from categories, if any
- attachMethodLists(cls, (method_list_t **)&cls->data()->ro->baseMethods, 1,
- YES, isBundleClass(cls), NO);
+ // Install methods and properties that the class implements itself.
+ method_list_t *list = ro->baseMethods();
+ if (list) {
+ prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls));
+ rw->methods.attachLists(&list, 1);
+ }
+
+ property_list_t *proplist = ro->baseProperties;
+ if (proplist) {
+ rw->properties.attachLists(&proplist, 1);
+ }
+
+ protocol_list_t *protolist = ro->baseProtocols;
+ if (protolist) {
+ rw->protocols.attachLists(&protolist, 1);
+ }
// Root classes get bonus method implementations if they don't have
// them already. These apply before category replacements.
-
if (cls->isRootMetaclass()) {
// root metaclass
addMethod(cls, SEL_initialize, (IMP)&objc_noop_imp, "", NO);
}
- cats = unattachedCategoriesForClass(cls);
- attachCategoryMethods(cls, cats, NO);
-
- if (cats || cls->data()->ro->baseProperties) {
- cls->data()->properties =
- buildPropertyList(cls->data()->ro->baseProperties, cats, isMeta);
- }
-
- if (cats || cls->data()->ro->baseProtocols) {
- cls->data()->protocols =
- buildProtocolList(cats, cls->data()->ro->baseProtocols, nil);
- }
+ // Attach categories.
+ category_list *cats = unattachedCategoriesForClass(cls, true /*realizing*/);
+ attachCategories(cls, cats, false /*don't flush caches*/);
if (PrintConnecting) {
- uint32_t i;
if (cats) {
- for (i = 0; i < cats->count; i++) {
+ for (uint32_t i = 0; i < cats->count; i++) {
_objc_inform("CLASS: attached category %c%s(%s)",
isMeta ? '+' : '-',
cls->nameForLogging(), cats->list[i].cat->name);
}
}
- if (cats) _free_internal(cats);
+ if (cats) free(cats);
-#ifndef NDEBUG
+#if DEBUG
// Debug: sanity-check all SELs; log method list contents
- FOREACH_METHOD_LIST(mlist, cls, {
- method_list_t::method_iterator iter = mlist->begin();
- method_list_t::method_iterator end = mlist->end();
- for ( ; iter != end; ++iter) {
- if (PrintConnecting) {
- _objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-',
- cls->nameForLogging(), sel_getName(iter->name));
- }
- assert(ignoreSelector(iter->name) || sel_registerName(sel_getName(iter->name))==iter->name);
+ for (const auto& meth : rw->methods) {
+ if (PrintConnecting) {
+ _objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-',
+ cls->nameForLogging(), sel_getName(meth.name));
}
- });
+ assert(ignoreSelector(meth.name) ||
+ sel_registerName(sel_getName(meth.name)) == meth.name);
+ }
#endif
}
static void remethodizeClass(Class cls)
{
category_list *cats;
- BOOL isMeta;
+ bool isMeta;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
isMeta = cls->isMetaClass();
// Re-methodizing: check for more categories
- if ((cats = unattachedCategoriesForClass(cls))) {
- chained_property_list *newproperties;
- const protocol_list_t **newprotos;
-
+ if ((cats = unattachedCategoriesForClass(cls, false/*not realizing*/))) {
if (PrintConnecting) {
_objc_inform("CLASS: attaching categories to class '%s' %s",
cls->nameForLogging(), isMeta ? "(meta)" : "");
}
- // Update methods, properties, protocols
-
- attachCategoryMethods(cls, cats, YES);
-
- newproperties = buildPropertyList(nil, cats, isMeta);
- if (newproperties) {
- newproperties->next = cls->data()->properties;
- cls->data()->properties = newproperties;
- }
-
- newprotos = buildProtocolList(cats, nil, cls->data()->protocols);
- if (cls->data()->protocols && cls->data()->protocols != newprotos) {
- _free_internal(cls->data()->protocols);
- }
- cls->data()->protocols = newprotos;
-
- _free_internal(cats);
+ attachCategories(cls, cats, true /*flush caches*/);
+ free(cats);
}
}
static NXMapTable *nonmeta_class_map = nil;
static NXMapTable *nonMetaClasses(void)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
if (nonmeta_class_map) return nonmeta_class_map;
// nonmeta_class_map is typically small
INIT_ONCE_PTR(nonmeta_class_map,
- NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32,
- _objc_internal_zone()),
+ NXCreateMapTable(NXPtrValueMapPrototype, 32),
NXFreeMapTable(v));
return nonmeta_class_map;
**********************************************************************/
static void addNonMetaClass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
void *old;
old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls);
static void removeNonMetaClass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
NXMapRemove(nonMetaClasses(), cls->ISA());
}
char *name;
- if (strncmp(prefix, "Swift", prefixLength) == 0) {
+ if (prefixLength == 5 && memcmp(prefix, "Swift", 5) == 0) {
asprintf(&name, "_Tt%cSs%zu%.*s%s",
isProtocol ? 'P' : 'C',
suffixLength, (int)suffixLength, suffix,
static Class getClass_impl(const char *name)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
// allocated in _read_images
assert(gdb_objc_realized_classes);
static Class getClass(const char *name)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
// Try name as-is
Class result = getClass_impl(name);
* Warns about duplicate class names and keeps the old mapping.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void addNamedClass(Class cls, const char *name)
+static void addNamedClass(Class cls, const char *name, Class replacing = nil)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
Class old;
- if ((old = getClass(name))) {
+ if ((old = getClass(name)) && old != replacing) {
inform_duplicate(name, old, cls);
// getNonMetaClass uses name lookups. Classes not found by name
**********************************************************************/
static void removeNamedClass(Class cls, const char *name)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
assert(!(cls->data()->flags & RO_META));
if (cls == NXMapGet(gdb_objc_realized_classes, name)) {
NXMapRemove(gdb_objc_realized_classes, name);
static NXHashTable *realizedClasses(void)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
// allocated in _read_images
assert(realized_class_hash);
static NXHashTable *realized_metaclass_hash = nil;
static NXHashTable *realizedMetaclasses(void)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
// allocated in _read_images
assert(realized_metaclass_hash);
**********************************************************************/
static void addRealizedClass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
void *old;
old = NXHashInsert(realizedClasses(), cls);
objc_addRegisteredClass(cls);
**********************************************************************/
static void removeRealizedClass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (cls->isRealized()) {
assert(!cls->isMetaClass());
NXHashRemove(realizedClasses(), cls);
**********************************************************************/
static void addRealizedMetaclass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
void *old;
old = NXHashInsert(realizedMetaclasses(), cls);
assert(cls->isMetaClass());
**********************************************************************/
static void removeRealizedMetaclass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (cls->isRealized()) {
assert(cls->isMetaClass());
NXHashRemove(realizedMetaclasses(), cls);
* Returns the classname => future class map for unrealized future classes.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static NXMapTable *futureNamedClasses(void)
+static NXMapTable *future_named_class_map = nil;
+static NXMapTable *futureNamedClasses()
{
- rwlock_assert_writing(&runtimeLock);
-
- static NXMapTable *future_named_class_map = nil;
+ runtimeLock.assertWriting();
if (future_named_class_map) return future_named_class_map;
// future_named_class_map is big enough for CF's classes and a few others
future_named_class_map =
- NXCreateMapTableFromZone(NXStrValueMapPrototype, 32,
- _objc_internal_zone());
+ NXCreateMapTable(NXStrValueMapPrototype, 32);
return future_named_class_map;
}
{
void *old;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (PrintFuture) {
_objc_inform("FUTURE: reserving %p for %s", (void*)cls, name);
}
- class_rw_t *rw = (class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1);
- class_ro_t *ro = (class_ro_t *)_calloc_internal(sizeof(class_ro_t), 1);
- ro->name = _strdup_internal(name);
+ class_rw_t *rw = (class_rw_t *)calloc(sizeof(class_rw_t), 1);
+ class_ro_t *ro = (class_ro_t *)calloc(sizeof(class_ro_t), 1);
+ ro->name = strdup(name);
rw->ro = ro;
cls->setData(rw);
cls->data()->flags = RO_FUTURE;
/***********************************************************************
-* removeFutureNamedClass
+* popFutureNamedClass
* Removes the named class from the unrealized future class list,
* because it has been realized.
+* Returns nil if the name is not used by a future class.
* Locking: runtimeLock must be held by the caller
**********************************************************************/
-static void removeFutureNamedClass(const char *name)
+static Class popFutureNamedClass(const char *name)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
- NXMapKeyFreeingRemove(futureNamedClasses(), name);
+ Class cls = nil;
+
+ if (future_named_class_map) {
+ cls = (Class)NXMapKeyFreeingRemove(future_named_class_map, name);
+ if (cls && NXCountMapTable(future_named_class_map) == 0) {
+ NXFreeMapTable(future_named_class_map);
+ future_named_class_map = nil;
+ }
+ }
+
+ return cls;
}
* Returns the oldClass => nil map for ignored weak-linked classes.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static NXMapTable *remappedClasses(BOOL create)
+static NXMapTable *remappedClasses(bool create)
{
static NXMapTable *remapped_class_map = nil;
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
if (remapped_class_map) return remapped_class_map;
if (!create) return nil;
// remapped_class_map is big enough to hold CF's classes and a few others
INIT_ONCE_PTR(remapped_class_map,
- NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32,
- _objc_internal_zone()),
+ NXCreateMapTable(NXPtrValueMapPrototype, 32),
NXFreeMapTable(v));
return remapped_class_map;
* Returns YES if no classes have been remapped
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
-static BOOL noClassesRemapped(void)
+static bool noClassesRemapped(void)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
- BOOL result = (remappedClasses(NO) == nil);
+ bool result = (remappedClasses(NO) == nil);
+#if DEBUG
+ // Catch construction of an empty table, which defeats optimization.
+ NXMapTable *map = remappedClasses(NO);
+ if (map) assert(NXCountMapTable(map) > 0);
+#endif
return result;
}
**********************************************************************/
static void addRemappedClass(Class oldcls, Class newcls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (PrintFuture) {
_objc_inform("FUTURE: using %p instead of %p for %s",
- (void*)oldcls, (void*)newcls, oldcls->nameForLogging());
+ (void*)newcls, (void*)oldcls, oldcls->nameForLogging());
}
void *old;
**********************************************************************/
static Class remapClass(Class cls)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
Class c2;
if (!cls) return nil;
- if (NXMapMember(remappedClasses(YES), cls, (void**)&c2) == NX_MAPNOTAKEY) {
+ NXMapTable *map = remappedClasses(NO);
+ if (!map || NXMapMember(map, cls, (void**)&c2) == NX_MAPNOTAKEY) {
return cls;
} else {
return c2;
Class _class_remap(Class cls)
{
- rwlock_read(&runtimeLock);
- Class result = remapClass(cls);
- rwlock_unlock_read(&runtimeLock);
- return result;
+ rwlock_reader_t lock(runtimeLock);
+ return remapClass(cls);
}
/***********************************************************************
**********************************************************************/
static void remapClassRef(Class *clsref)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
Class newcls = remapClass(*clsref);
if (*clsref != newcls) *clsref = newcls;
static Class getNonMetaClass(Class metacls, id inst)
{
static int total, named, secondary, sharedcache;
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
realizeClass(metacls);
assert(cls->ISA() == metacls);
return cls;
}
-#if !NDEBUG
+#if DEBUG
_objc_fatal("cls is not an instance of metacls");
#else
// release build: be forgiving and fall through to slow lookups
**********************************************************************/
Class _class_getNonMetaClass(Class cls, id obj)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
cls = getNonMetaClass(cls, obj);
assert(cls->isRealized());
- rwlock_unlock_write(&runtimeLock);
-
return cls;
}
**********************************************************************/
static void addSubclass(Class supercls, Class subcls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (supercls && subcls) {
assert(supercls->isRealized());
**********************************************************************/
static void removeSubclass(Class supercls, Class subcls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
assert(supercls->isRealized());
assert(subcls->isRealized());
assert(subcls->superclass == supercls);
{
static NXMapTable *protocol_map = nil;
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
INIT_ONCE_PTR(protocol_map,
- NXCreateMapTableFromZone(NXStrValueMapPrototype, 16,
- _objc_internal_zone()),
+ NXCreateMapTable(NXStrValueMapPrototype, 16),
NXFreeMapTable(v) );
return protocol_map;
* Looks up a protocol by name. Demangled Swift names are recognized.
* Locking: runtimeLock must be read- or write-locked by the caller.
**********************************************************************/
-static Protocol *getProtocol_impl(const char *name)
-{
- rwlock_assert_locked(&runtimeLock);
-
- return (Protocol *)NXMapGet(protocols(), name);
-}
-
static Protocol *getProtocol(const char *name)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
// Try name as-is.
- Protocol *result = getProtocol_impl(name);
+ Protocol *result = (Protocol *)NXMapGet(protocols(), name);
if (result) return result;
// Try Swift-mangled equivalent of the given name.
if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) {
- result = getProtocol_impl(swName);
+ result = (Protocol *)NXMapGet(protocols(), swName);
free(swName);
return result;
}
**********************************************************************/
static protocol_t *remapProtocol(protocol_ref_t proto)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
protocol_t *newproto = (protocol_t *)
getProtocol(((protocol_t *)proto)->mangledName);
* Fix up a protocol ref, in case the protocol referenced has been reallocated.
* Locking: runtimeLock must be read- or write-locked by the caller
**********************************************************************/
+static size_t UnfixedProtocolReferences;
static void remapProtocolRef(protocol_t **protoref)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
protocol_t *newproto = remapProtocol((protocol_ref_t)*protoref);
- if (*protoref != newproto) *protoref = newproto;
+ if (*protoref != newproto) {
+ *protoref = newproto;
+ UnfixedProtocolReferences++;
+ }
}
static void moveIvars(class_ro_t *ro, uint32_t superSize,
layout_bitmap *ivarBitmap, layout_bitmap *weakBitmap)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
uint32_t diff;
- uint32_t i;
assert(superSize > ro->instanceStart);
diff = superSize - ro->instanceStart;
if (ro->ivars) {
// Find maximum alignment in this class's ivars
uint32_t maxAlignment = 1;
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
+ for (const auto& ivar : *ro->ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
- uint32_t alignment = ivar->alignment();
+ uint32_t alignment = ivar.alignment();
if (alignment > maxAlignment) maxAlignment = alignment;
}
if (diff & alignMask) diff = (diff + alignMask) & ~alignMask;
// Slide all of this class's ivars en masse
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
+ for (const auto& ivar : *ro->ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
- uint32_t oldOffset = (uint32_t)*ivar->offset;
+ uint32_t oldOffset = (uint32_t)*ivar.offset;
uint32_t newOffset = oldOffset + diff;
- *ivar->offset = newOffset;
+ *ivar.offset = newOffset;
if (PrintIvars) {
- _objc_inform("IVARS: offset %u -> %u for %s (size %u, align %u)",
- oldOffset, newOffset, ivar->name,
- ivar->size, ivar->alignment());
+ _objc_inform("IVARS: offset %u -> %u for %s "
+ "(size %u, align %u)",
+ oldOffset, newOffset, ivar.name,
+ ivar.size, ivar.alignment());
}
}
**********************************************************************/
static ivar_t *getIvar(Class cls, const char *name)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
const ivar_list_t *ivars;
assert(cls->isRealized());
if ((ivars = cls->data()->ro->ivars)) {
- uint32_t i;
- for (i = 0; i < ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
-
- // ivar->name may be nil for anonymous bitfields etc.
- if (ivar->name && 0 == strcmp(name, ivar->name)) {
- return ivar;
+ for (auto& ivar : *ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
+
+ // ivar.name may be nil for anonymous bitfields etc.
+ if (ivar.name && 0 == strcmp(name, ivar.name)) {
+ return &ivar;
}
}
}
/* debug: print them all before sliding
if (ro->ivars) {
- uint32_t i;
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
+ for (const auto& ivar : *ro->ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
_objc_inform("IVARS: %s.%s (offset %u, size %u, align %u)",
- ro->name, ivar->name,
- *ivar->offset, ivar->size, ivar->alignment());
+ ro->name, ivar.name,
+ *ivar.offset, ivar.size, ivar.alignment());
}
}
*/
// default to word size to simplify ivar update
uint32_t alignment = 1<<WORD_SHIFT;
if (ro->ivars) {
- uint32_t i;
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (ivar->alignment() > alignment) {
- alignment = ivar->alignment();
+ for (const auto& ivar : *ro->ivars) {
+ if (ivar.alignment() > alignment) {
+ alignment = ivar.alignment();
}
}
}
}
if (ro->ivars) {
- uint32_t i;
- for (i = 0; i < ro->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro->ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
- *ivar->offset -= delta;
+ for (const auto& ivar : *ro->ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
+ *ivar.offset -= delta;
}
}
**********************************************************************/
static Class realizeClass(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
const class_ro_t *ro;
class_rw_t *rw;
Class supercls;
Class metacls;
- BOOL isMeta;
+ bool isMeta;
if (!cls) return nil;
if (cls->isRealized()) return cls;
cls->changeInfo(RW_REALIZED|RW_REALIZING, RW_FUTURE);
} else {
// Normal class. Allocate writeable class data.
- rw = (class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1);
+ rw = (class_rw_t *)calloc(sizeof(class_rw_t), 1);
rw->ro = ro;
rw->flags = RW_REALIZED|RW_REALIZING;
cls->setData(rw);
}
- isMeta = (ro->flags & RO_META) ? YES : NO;
+ isMeta = ro->flags & RO_META;
rw->version = isMeta ? 7 : 0; // old runtime went up to 6
* missingWeakSuperclass
* Return YES if some superclass of cls was weak-linked and is missing.
**********************************************************************/
-static BOOL
+static bool
missingWeakSuperclass(Class cls)
{
assert(!cls->isRealized());
**********************************************************************/
static void realizeAllClassesInImage(header_info *hi)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
size_t count, i;
classref_t *classlist;
**********************************************************************/
static void realizeAllClasses(void)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
header_info *hi;
for (hi = FirstHeader; hi; hi = hi->next) {
**********************************************************************/
Class _objc_allocateFutureClass(const char *name)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
Class cls;
- NXMapTable *future_named_class_map = futureNamedClasses();
+ NXMapTable *map = futureNamedClasses();
- if ((cls = (Class)NXMapGet(future_named_class_map, name))) {
+ if ((cls = (Class)NXMapGet(map, name))) {
// Already have a future class for this name.
- rwlock_unlock_write(&runtimeLock);
return cls;
}
cls = _calloc_class(sizeof(objc_class));
addFutureNamedClass(name, cls);
- rwlock_unlock_write(&runtimeLock);
return cls;
}
}
-/***********************************************************************
-*
-**********************************************************************/
-void objc_setFutureClass(Class cls, const char *name)
-{
- // fixme hack do nothing - NSCFString handled specially elsewhere
-}
-
-
BOOL _class_isFutureClass(Class cls)
{
return cls && cls->isFuture();
**********************************************************************/
static void flushCaches(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(cacheUpdateLock);
if (cls) {
foreach_realized_class_and_subclass(cls, ^(Class c){
- cache_erase_nolock(&c->cache);
+ cache_erase_nolock(c);
});
if (!cls->superclass) {
// root; metaclasses are subclasses and were flushed above
} else {
foreach_realized_class_and_subclass(cls->ISA(), ^(Class c){
- cache_erase_nolock(&c->cache);
+ cache_erase_nolock(c);
});
}
}
NXHashTable *classes = realizedClasses();
NXHashState state = NXInitHashState(classes);
while (NXNextHashState(classes, &state, (void **)&c)) {
- cache_erase_nolock(&c->cache);
+ cache_erase_nolock(c);
}
classes = realizedMetaclasses();
state = NXInitHashState(classes);
while (NXNextHashState(classes, &state, (void **)&c)) {
- cache_erase_nolock(&c->cache);
+ cache_erase_nolock(c);
}
}
-
- mutex_unlock(&cacheUpdateLock);
-}
-
-
-static void flushImps(Class cls, SEL sel1, IMP imp1, SEL sel2, IMP imp2)
-{
- rwlock_assert_writing(&runtimeLock);
-
- mutex_lock(&cacheUpdateLock);
-
- if (cls) {
- foreach_realized_class_and_subclass(cls, ^(Class c){
- cache_eraseImp_nolock(c, sel1, imp1);
- if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
- });
-
- if (!cls->superclass) {
- // root; metaclasses are subclasses and were flushed above
- } else {
- foreach_realized_class_and_subclass(cls->ISA(), ^(Class c){
- cache_eraseImp_nolock(c, sel1, imp1);
- if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
- });
- }
- }
- else {
- Class c;
- NXHashTable *classes = realizedClasses();
- NXHashState state = NXInitHashState(classes);
- while (NXNextHashState(classes, &state, (void **)&c)) {
- cache_eraseImp_nolock(c, sel1, imp1);
- if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
- }
- classes = realizedMetaclasses();
- state = NXInitHashState(classes);
- while (NXNextHashState(classes, &state, (void **)&c)) {
- cache_eraseImp_nolock(c, sel1, imp1);
- if (sel2) cache_eraseImp_nolock(c, sel2, imp2);
- }
- }
-
- mutex_unlock(&cacheUpdateLock);
}
void _objc_flush_caches(Class cls)
{
- rwlock_write(&runtimeLock);
- flushCaches(cls);
- rwlock_unlock_write(&runtimeLock);
+ {
+ rwlock_writer_t lock(runtimeLock);
+ flushCaches(cls);
+ }
if (!cls) {
// collectALot if cls==nil
- mutex_lock(&cacheUpdateLock);
+ mutex_locker_t lock(cacheUpdateLock);
cache_collect(true);
- mutex_unlock(&cacheUpdateLock);
}
}
* Locking: write-locks runtimeLock
**********************************************************************/
const char *
-map_images(enum dyld_image_states state, uint32_t infoCount,
- const struct dyld_image_info infoList[])
+map_2_images(enum dyld_image_states state, uint32_t infoCount,
+ const struct dyld_image_info infoList[])
{
- const char *err;
-
- rwlock_write(&runtimeLock);
- err = map_images_nolock(state, infoCount, infoList);
- rwlock_unlock_write(&runtimeLock);
- return err;
+ rwlock_writer_t lock(runtimeLock);
+ return map_images_nolock(state, infoCount, infoList);
}
load_images(enum dyld_image_states state, uint32_t infoCount,
const struct dyld_image_info infoList[])
{
- BOOL found;
+ bool found;
+
+ // Return without taking locks if there are no +load methods here.
+ found = false;
+ for (uint32_t i = 0; i < infoCount; i++) {
+ if (hasLoadMethods((const headerType *)infoList[i].imageLoadAddress)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) return nil;
- recursive_mutex_lock(&loadMethodLock);
+ recursive_mutex_locker_t lock(loadMethodLock);
// Discover load methods
- rwlock_write(&runtimeLock);
- found = load_images_nolock(state, infoCount, infoList);
- rwlock_unlock_write(&runtimeLock);
+ {
+ rwlock_writer_t lock2(runtimeLock);
+ found = load_images_nolock(state, infoCount, infoList);
+ }
// Call +load methods (without runtimeLock - re-entrant)
if (found) {
call_load_methods();
}
- recursive_mutex_unlock(&loadMethodLock);
-
return nil;
}
void
unmap_image(const struct mach_header *mh, intptr_t vmaddr_slide)
{
- recursive_mutex_lock(&loadMethodLock);
- rwlock_write(&runtimeLock);
-
+ recursive_mutex_locker_t lock(loadMethodLock);
+ rwlock_writer_t lock2(runtimeLock);
unmap_image_nolock(mh);
-
- rwlock_unlock_write(&runtimeLock);
- recursive_mutex_unlock(&loadMethodLock);
}
*
* Locking: runtimeLock acquired by map_images or objc_readClassPair
**********************************************************************/
-static unsigned int PreoptTotalMethodLists;
-static unsigned int PreoptOptimizedMethodLists;
-static unsigned int PreoptTotalClasses;
-static unsigned int PreoptOptimizedClasses;
-
-Class readClass(Class cls, bool headerIsBundle, bool headerInSharedCache)
+Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized)
{
const char *mangledName = cls->mangledName();
if (cls->ISA()->cache._mask) cls->ISA()->cache._mask = 0;
if (cls->ISA()->cache._occupied) cls->ISA()->cache._occupied = 0;
#endif
-
- NXMapTable *future_named_class_map = futureNamedClasses();
-
- if (NXCountMapTable(future_named_class_map) > 0) {
- Class newCls = nil;
- newCls = (Class)NXMapGet(future_named_class_map, mangledName);
- removeFutureNamedClass(mangledName);
-
- if (newCls) {
- // Copy objc_class to future class's struct.
- // Preserve future's rw data block.
- if (newCls->isSwift()) {
- _objc_fatal("Can't complete future class request for '%s' "
- "because the real class is too big.",
- cls->nameForLogging());
- }
-
- class_rw_t *rw = newCls->data();
- const class_ro_t *old_ro = rw->ro;
- memcpy(newCls, cls, sizeof(objc_class));
- rw->ro = (class_ro_t *)newCls->data();
- newCls->setData(rw);
- _free_internal((void *)old_ro->name);
- _free_internal((void *)old_ro);
-
- addRemappedClass(cls, newCls);
-
- cls = newCls;
+ Class replacing = nil;
+ if (Class newCls = popFutureNamedClass(mangledName)) {
+ // This name was previously allocated as a future class.
+ // Copy objc_class to future class's struct.
+ // Preserve future's rw data block.
+
+ if (newCls->isSwift()) {
+ _objc_fatal("Can't complete future class request for '%s' "
+ "because the real class is too big.",
+ cls->nameForLogging());
}
+
+ class_rw_t *rw = newCls->data();
+ const class_ro_t *old_ro = rw->ro;
+ memcpy(newCls, cls, sizeof(objc_class));
+ rw->ro = (class_ro_t *)newCls->data();
+ newCls->setData(rw);
+ free((void *)old_ro->name);
+ free((void *)old_ro);
+
+ addRemappedClass(cls, newCls);
+
+ replacing = cls;
+ cls = newCls;
}
- PreoptTotalClasses++;
- if (headerInSharedCache && isPreoptimized()) {
+ if (headerIsPreoptimized && !replacing) {
// class list built in shared cache
// fixme strict assert doesn't work because of duplicates
// assert(cls == getClass(name));
assert(getClass(mangledName));
- PreoptOptimizedClasses++;
} else {
- addNamedClass(cls, mangledName);
+ addNamedClass(cls, mangledName, replacing);
}
// for future reference: shared cache never contains MH_BUNDLEs
cls->ISA()->data()->flags |= RO_FROM_BUNDLE;
}
- if (PrintPreopt) {
- const method_list_t *mlist;
- if ((mlist = ((class_ro_t *)cls->data())->baseMethods)) {
- PreoptTotalMethodLists++;
- if (isMethodListFixedUp(mlist)) PreoptOptimizedMethodLists++;
- }
- if ((mlist = ((class_ro_t *)cls->ISA()->data())->baseMethods)) {
- PreoptTotalMethodLists++;
- if (isMethodListFixedUp(mlist)) PreoptOptimizedMethodLists++;
+ return cls;
+}
+
+
+/***********************************************************************
+* readProtocol
+* Read a protocol as written by a compiler.
+**********************************************************************/
+static void
+readProtocol(protocol_t *newproto, Class protocol_class,
+ NXMapTable *protocol_map,
+ bool headerIsPreoptimized, bool headerIsBundle)
+{
+ // This is not enough to make protocols in unloaded bundles safe,
+ // but it does prevent crashes when looking up unrelated protocols.
+ auto insertFn = headerIsBundle ? NXMapKeyCopyingInsert : NXMapInsert;
+
+ protocol_t *oldproto = (protocol_t *)getProtocol(newproto->mangledName);
+
+ if (oldproto) {
+ // Some other definition already won.
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s "
+ "(duplicate of %p)",
+ newproto, oldproto->nameForLogging(), oldproto);
}
}
+ else if (headerIsPreoptimized) {
+ // Shared cache initialized the protocol object itself,
+ // but in order to allow out-of-cache replacement we need
+ // to add it to the protocol table now.
- return cls;
+ protocol_t *cacheproto = (protocol_t *)
+ getPreoptimizedProtocol(newproto->mangledName);
+ protocol_t *installedproto;
+ if (cacheproto && cacheproto != newproto) {
+ // Another definition in the shared cache wins (because
+ // everything in the cache was fixed up to point to it).
+ installedproto = cacheproto;
+ }
+ else {
+ // This definition wins.
+ installedproto = newproto;
+ }
+
+ assert(installedproto->getIsa() == protocol_class);
+ assert(installedproto->size >= sizeof(protocol_t));
+ insertFn(protocol_map, installedproto->mangledName,
+ installedproto);
+
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s",
+ installedproto, installedproto->nameForLogging());
+ if (newproto != installedproto) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s "
+ "(duplicate of %p)",
+ newproto, installedproto->nameForLogging(),
+ installedproto);
+ }
+ }
+ }
+ else if (newproto->size >= sizeof(protocol_t)) {
+ // New protocol from an un-preoptimized image
+ // with sufficient storage. Fix it up in place.
+ // fixme duplicate protocols from unloadable bundle
+ newproto->initIsa(protocol_class); // fixme pinned
+ insertFn(protocol_map, newproto->mangledName, newproto);
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s",
+ newproto, newproto->nameForLogging());
+ }
+ }
+ else {
+ // New protocol from an un-preoptimized image
+ // with insufficient storage. Reallocate it.
+ // fixme duplicate protocols from unloadable bundle
+ size_t size = max(sizeof(protocol_t), (size_t)newproto->size);
+ protocol_t *installedproto = (protocol_t *)calloc(size, 1);
+ memcpy(installedproto, newproto, newproto->size);
+ installedproto->size = (typeof(installedproto->size))size;
+
+ installedproto->initIsa(protocol_class); // fixme pinned
+ insertFn(protocol_map, installedproto->mangledName, installedproto);
+ if (PrintProtocols) {
+ _objc_inform("PROTOCOLS: protocol at %p is %s ",
+ installedproto, installedproto->nameForLogging());
+ _objc_inform("PROTOCOLS: protocol at %p is %s "
+ "(reallocated to %p)",
+ newproto, installedproto->nameForLogging(),
+ installedproto);
+ }
+ }
}
-
/***********************************************************************
* _read_images
* Perform initial processing of the headers in the linked
size_t i;
Class *resolvedFutureClasses = nil;
size_t resolvedFutureClassCount = 0;
- static BOOL doneOnce;
+ static bool doneOnce;
+ TimeLogger ts(PrintImageTimes);
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
#define EACH_HEADER \
hIndex = 0; \
#if SUPPORT_NONPOINTER_ISA
# if TARGET_OS_MAC && !TARGET_OS_IPHONE
- // Disable non-pointer isa if the app is too old.
- if (AppSDKVersion < INSERT VERSION HERE) {
+ // Disable non-pointer isa if the app is too old
+ // (linked before OS X 10.11)
+ if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_11) {
DisableIndexedIsa = true;
if (PrintRawIsa) {
_objc_inform("RAW ISA: disabling non-pointer isa because "
- "the app is too old (SDK version %hu.%hhu.%hhu)",
- (unsigned short)(AppSDKVersion>>16),
- (unsigned char)(AppSDKVersion>>8),
- (unsigned char)(AppSDKVersion));
+ "the app is too old (SDK version " SDK_FORMAT ")",
+ FORMAT_SDK(dyld_get_program_sdk_version()));
}
}
+
+ // Disable non-pointer isa if the app has a __DATA,__objc_rawisa section
+ // New apps that load old extensions may need this.
+ for (EACH_HEADER) {
+ if (hi->mhdr->filetype != MH_EXECUTE) continue;
+ unsigned long size;
+ if (getsectiondata(hi->mhdr, "__DATA", "__objc_rawisa", &size)) {
+ DisableIndexedIsa = true;
+ if (PrintRawIsa) {
+ _objc_inform("RAW ISA: disabling non-pointer isa because "
+ "the app has a __DATA,__objc_rawisa section");
+ }
+ }
+ break; // assume only one MH_EXECUTE image
+ }
# endif
// Disable non-pointer isa for all GC apps.
int namedClassesSize =
(isPreoptimized() ? unoptimizedTotal : total) * 4 / 3;
gdb_objc_realized_classes =
- NXCreateMapTableFromZone(NXStrValueMapPrototype, namedClassesSize,
- _objc_internal_zone());
+ NXCreateMapTable(NXStrValueMapPrototype, namedClassesSize);
// realizedClasses and realizedMetaclasses - less than the full total
realized_class_hash =
- NXCreateHashTableFromZone(NXPtrPrototype, total / 8, nil,
- _objc_internal_zone());
+ NXCreateHashTable(NXPtrPrototype, total / 8, nil);
realized_metaclass_hash =
- NXCreateHashTableFromZone(NXPtrPrototype, total / 8, nil,
- _objc_internal_zone());
+ NXCreateHashTable(NXPtrPrototype, total / 8, nil);
+
+ ts.log("IMAGE TIMES: first time tasks");
}
// Discover classes. Fix up unresolved future classes. Mark bundle classes.
for (EACH_HEADER) {
- bool headerIsBundle = (hi->mhdr->filetype == MH_BUNDLE);
- bool headerInSharedCache = hi->inSharedCache;
+ bool headerIsBundle = hi->isBundle();
+ bool headerIsPreoptimized = hi->isPreoptimized();
classref_t *classlist = _getObjc2ClassList(hi, &count);
for (i = 0; i < count; i++) {
Class cls = (Class)classlist[i];
- Class newCls = readClass(cls, headerIsBundle, headerInSharedCache);
+ Class newCls = readClass(cls, headerIsBundle, headerIsPreoptimized);
if (newCls != cls && newCls) {
// Class was moved but not deleted. Currently this occurs
// only when the new class resolved a future class.
// Non-lazily realize the class below.
resolvedFutureClasses = (Class *)
- _realloc_internal(resolvedFutureClasses,
+ realloc(resolvedFutureClasses,
(resolvedFutureClassCount+1)
* sizeof(Class));
resolvedFutureClasses[resolvedFutureClassCount++] = newCls;
}
}
- if (PrintPreopt && PreoptTotalMethodLists) {
- _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) method lists pre-sorted",
- PreoptOptimizedMethodLists, PreoptTotalMethodLists,
- 100.0*PreoptOptimizedMethodLists/PreoptTotalMethodLists);
- }
- if (PrintPreopt && PreoptTotalClasses) {
- _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) classes pre-registered",
- PreoptOptimizedClasses, PreoptTotalClasses,
- 100.0*PreoptOptimizedClasses/PreoptTotalClasses);
- }
+ ts.log("IMAGE TIMES: discover classes");
// Fix up remapped classes
// Class list and nonlazy class list remain unremapped.
}
}
+ ts.log("IMAGE TIMES: remap classes");
// Fix up @selector references
- sel_lock();
- for (EACH_HEADER) {
- if (PrintPreopt) {
- if (sel_preoptimizationValid(hi)) {
- _objc_inform("PREOPTIMIZATION: honoring preoptimized selectors in %s",
- hi->fname);
- }
- else if (_objcHeaderOptimizedByDyld(hi)) {
- _objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors in %s",
- hi->fname);
- }
- }
-
- if (sel_preoptimizationValid(hi)) continue;
+ static size_t UnfixedSelectors;
+ sel_lock();
+ for (EACH_HEADER) {
+ if (hi->isPreoptimized()) continue;
- bool isBundle = hi->mhdr->filetype == MH_BUNDLE;
+ bool isBundle = hi->isBundle();
SEL *sels = _getObjc2SelectorRefs(hi, &count);
+ UnfixedSelectors += count;
for (i = 0; i < count; i++) {
const char *name = sel_cname(sels[i]);
sels[i] = sel_registerNameNoLock(name, isBundle);
}
sel_unlock();
+ ts.log("IMAGE TIMES: fix up selector references");
+
#if SUPPORT_FIXUP
// Fix up old objc_msgSend_fixup call sites
for (EACH_HEADER) {
fixupMessageRef(refs+i);
}
}
+
+ ts.log("IMAGE TIMES: fix up objc_msgSend_fixup");
#endif
// Discover protocols. Fix up protocol refs.
extern objc_class OBJC_CLASS_$_Protocol;
Class cls = (Class)&OBJC_CLASS_$_Protocol;
assert(cls);
- protocol_t **protolist = _getObjc2ProtocolList(hi, &count);
NXMapTable *protocol_map = protocols();
- // fixme duplicate protocols from unloadable bundle
+ bool isPreoptimized = hi->isPreoptimized();
+ bool isBundle = hi->isBundle();
+
+ protocol_t **protolist = _getObjc2ProtocolList(hi, &count);
for (i = 0; i < count; i++) {
- protocol_t *oldproto = (protocol_t *)
- getProtocol(protolist[i]->mangledName);
- if (!oldproto) {
- size_t size = max(sizeof(protocol_t),
- (size_t)protolist[i]->size);
- protocol_t *newproto = (protocol_t *)_calloc_internal(size, 1);
- memcpy(newproto, protolist[i], protolist[i]->size);
- newproto->size = (typeof(newproto->size))size;
-
- newproto->initIsa(cls); // fixme pinned
- NXMapKeyCopyingInsert(protocol_map,
- newproto->mangledName, newproto);
- if (PrintProtocols) {
- _objc_inform("PROTOCOLS: protocol at %p is %s",
- newproto, newproto->nameForLogging());
- }
- } else {
- if (PrintProtocols) {
- _objc_inform("PROTOCOLS: protocol at %p is %s (duplicate)",
- protolist[i], oldproto->nameForLogging());
- }
- }
+ readProtocol(protolist[i], cls, protocol_map,
+ isPreoptimized, isBundle);
}
}
+
+ ts.log("IMAGE TIMES: discover protocols");
+
+ // Fix up @protocol references
+ // Preoptimized images may have the right
+ // answer already but we don't know for sure.
for (EACH_HEADER) {
- protocol_t **protolist;
- protolist = _getObjc2ProtocolRefs(hi, &count);
+ protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count);
for (i = 0; i < count; i++) {
remapProtocolRef(&protolist[i]);
}
}
+ ts.log("IMAGE TIMES: fix up @protocol references");
+
// Realize non-lazy classes (for +load methods and static instances)
for (EACH_HEADER) {
classref_t *classlist =
realizeClass(cls);
}
- }
+ }
+
+ ts.log("IMAGE TIMES: realize non-lazy classes");
// Realize newly-resolved future classes, in case CF manipulates them
if (resolvedFutureClasses) {
realizeClass(resolvedFutureClasses[i]);
resolvedFutureClasses[i]->setRequiresRawIsa(false/*inherited*/);
}
- _free_internal(resolvedFutureClasses);
+ free(resolvedFutureClasses);
}
+ ts.log("IMAGE TIMES: realize future classes");
+
// Discover categories.
for (EACH_HEADER) {
category_t **catlist =
// First, register the category with its target class.
// Then, rebuild the class's method lists (etc) if
// the class is realized.
- BOOL classExists = NO;
+ bool classExists = NO;
if (cat->instanceMethods || cat->protocols
|| cat->instanceProperties)
{
}
}
+ ts.log("IMAGE TIMES: discover categories");
+
// Category discovery MUST BE LAST to avoid potential races
// when other threads call the new category code before
// this thread finishes its fixups.
realizeAllClasses();
}
+
+ // Print preoptimization statistics
+ if (PrintPreopt) {
+ static unsigned int PreoptTotalMethodLists;
+ static unsigned int PreoptOptimizedMethodLists;
+ static unsigned int PreoptTotalClasses;
+ static unsigned int PreoptOptimizedClasses;
+
+ for (EACH_HEADER) {
+ if (hi->isPreoptimized()) {
+ _objc_inform("PREOPTIMIZATION: honoring preoptimized selectors "
+ "in %s", hi->fname);
+ }
+ else if (_objcHeaderOptimizedByDyld(hi)) {
+ _objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors "
+ "in %s", hi->fname);
+ }
+
+ classref_t *classlist = _getObjc2ClassList(hi, &count);
+ for (i = 0; i < count; i++) {
+ Class cls = remapClass(classlist[i]);
+ if (!cls) continue;
+
+ PreoptTotalClasses++;
+ if (hi->isPreoptimized()) {
+ PreoptOptimizedClasses++;
+ }
+
+ const method_list_t *mlist;
+ if ((mlist = ((class_ro_t *)cls->data())->baseMethods())) {
+ PreoptTotalMethodLists++;
+ if (mlist->isFixedUp()) {
+ PreoptOptimizedMethodLists++;
+ }
+ }
+ if ((mlist=((class_ro_t *)cls->ISA()->data())->baseMethods())) {
+ PreoptTotalMethodLists++;
+ if (mlist->isFixedUp()) {
+ PreoptOptimizedMethodLists++;
+ }
+ }
+ }
+ }
+
+ _objc_inform("PREOPTIMIZATION: %zu selector references not "
+ "pre-optimized", UnfixedSelectors);
+ _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) method lists pre-sorted",
+ PreoptOptimizedMethodLists, PreoptTotalMethodLists,
+ PreoptTotalMethodLists
+ ? 100.0*PreoptOptimizedMethodLists/PreoptTotalMethodLists
+ : 0.0);
+ _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) classes pre-registered",
+ PreoptOptimizedClasses, PreoptTotalClasses,
+ PreoptTotalClasses
+ ? 100.0*PreoptOptimizedClasses/PreoptTotalClasses
+ : 0.0);
+ _objc_inform("PREOPTIMIZATION: %zu protocol references not "
+ "pre-optimized", UnfixedProtocolReferences);
+ }
+
#undef EACH_HEADER
}
cls->setInfo(RW_LOADED);
}
-void prepare_load_methods(header_info *hi)
+// Quick scan for +load methods that doesn't take a lock.
+bool hasLoadMethods(const headerType *mhdr)
+{
+ size_t count;
+ if (_getObjc2NonlazyClassList(mhdr, &count) && count > 0) return true;
+ if (_getObjc2NonlazyCategoryList(mhdr, &count) && count > 0) return true;
+ return false;
+}
+
+void prepare_load_methods(const headerType *mhdr)
{
size_t count, i;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
classref_t *classlist =
- _getObjc2NonlazyClassList(hi, &count);
+ _getObjc2NonlazyClassList(mhdr, &count);
for (i = 0; i < count; i++) {
schedule_class_load(remapClass(classlist[i]));
}
- category_t **categorylist = _getObjc2NonlazyCategoryList(hi, &count);
+ category_t **categorylist = _getObjc2NonlazyCategoryList(mhdr, &count);
for (i = 0; i < count; i++) {
category_t *cat = categorylist[i];
Class cls = remapClass(cat->cls);
{
size_t count, i;
- recursive_mutex_assert_locked(&loadMethodLock);
- rwlock_assert_writing(&runtimeLock);
+ loadMethodLock.assertLocked();
+ runtimeLock.assertWriting();
// Unload unattached categories and categories waiting for +load.
}
-/***********************************************************************
-* method_getImplementation
-* Returns this method's IMP.
-* Locking: none
-**********************************************************************/
-static IMP
-_method_getImplementation(method_t *m)
-{
- if (!m) return nil;
- return m->imp;
-}
-
IMP
method_getImplementation(Method m)
{
- return _method_getImplementation(m);
+ return m ? m->imp : nil;
}
static IMP
_method_setImplementation(Class cls, method_t *m, IMP imp)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (!m) return nil;
if (!imp) return nil;
return m->imp;
}
- IMP old = _method_getImplementation(m);
+ IMP old = m->imp;
m->imp = imp;
- // Class-side cache updates are slow if cls is nil (i.e. unknown)
+ // Cache updates are slow if cls is nil (i.e. unknown)
// RR/AWZ updates are slow if cls is nil (i.e. unknown)
// fixme build list of classes whose Methods are known externally?
- // Scrub the old IMP from the cache.
- // Can't simply overwrite the new IMP because the cached value could be
- // the same IMP from a different Method.
- flushImps(cls, m->name, old, nil, nil);
+ flushCaches(cls);
- // Catch changes to retain/release and allocWithZone implementations
updateCustomRR_AWZ(cls, m);
return old;
{
// Don't know the class - will be slow if RR/AWZ are affected
// fixme build list of classes whose Methods are known externally?
- IMP result;
- rwlock_write(&runtimeLock);
- result = _method_setImplementation(Nil, m, imp);
- rwlock_unlock_write(&runtimeLock);
- return result;
+ rwlock_writer_t lock(runtimeLock);
+ return _method_setImplementation(Nil, m, imp);
}
{
if (!m1 || !m2) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if (ignoreSelector(m1->name) || ignoreSelector(m2->name)) {
// Ignored methods stay ignored. Now they're both ignored.
m1->imp = (IMP)&_objc_ignored_method;
m2->imp = (IMP)&_objc_ignored_method;
- rwlock_unlock_write(&runtimeLock);
return;
}
// RR/AWZ updates are slow because class is unknown
- // Class-side cache updates are slow because class is unknown
+ // Cache updates are slow because class is unknown
// fixme build list of classes whose Methods are known externally?
- // Scrub the old IMPs from the caches.
- // Can't simply overwrite the new IMP because the cached value could be
- // the same IMP from a different Method.
- flushImps(nil, m1->name,m2->imp, m2->name,m1->imp);
+ flushCaches(nil);
updateCustomRR_AWZ(nil, m1);
updateCustomRR_AWZ(nil, m2);
-
- rwlock_unlock_write(&runtimeLock);
}
return nil;
}
- objc_property_attribute_t *result;
- rwlock_read(&runtimeLock);
- result = copyPropertyAttributeList(prop->attributes,outCount);
- rwlock_unlock_read(&runtimeLock);
- return result;
+ rwlock_reader_t lock(runtimeLock);
+ return copyPropertyAttributeList(prop->attributes,outCount);
}
char * property_copyAttributeValue(objc_property_t prop, const char *name)
{
if (!prop || !name || *name == '\0') return nil;
- char *result;
- rwlock_read(&runtimeLock);
- result = copyPropertyAttributeValue(prop->attributes, name);
- rwlock_unlock_read(&runtimeLock);
- return result;
+ rwlock_reader_t lock(runtimeLock);
+ return copyPropertyAttributeValue(prop->attributes, name);
}
{
a = 0;
- if (isRequiredMethod && isInstanceMethod) {
- b = method_list_index(proto->instanceMethods, m);
- return;
+ if (proto->instanceMethods) {
+ if (isRequiredMethod && isInstanceMethod) {
+ b = proto->instanceMethods->indexOfMethod(m);
+ return;
+ }
+ a += proto->instanceMethods->count;
}
- a += method_list_count(proto->instanceMethods);
- if (isRequiredMethod && !isInstanceMethod) {
- b = method_list_index(proto->classMethods, m);
- return;
+ if (proto->classMethods) {
+ if (isRequiredMethod && !isInstanceMethod) {
+ b = proto->classMethods->indexOfMethod(m);
+ return;
+ }
+ a += proto->classMethods->count;
}
- a += method_list_count(proto->classMethods);
- if (!isRequiredMethod && isInstanceMethod) {
- b = method_list_index(proto->optionalInstanceMethods, m);
- return;
+ if (proto->optionalInstanceMethods) {
+ if (!isRequiredMethod && isInstanceMethod) {
+ b = proto->optionalInstanceMethods->indexOfMethod(m);
+ return;
+ }
+ a += proto->optionalInstanceMethods->count;
}
- a += method_list_count(proto->optionalInstanceMethods);
- if (!isRequiredMethod && !isInstanceMethod) {
- b = method_list_index(proto->optionalClassMethods, m);
- return;
+ if (proto->optionalClassMethods) {
+ if (!isRequiredMethod && !isInstanceMethod) {
+ b = proto->optionalClassMethods->indexOfMethod(m);
+ return;
+ }
+ a += proto->optionalClassMethods->count;
}
- a += method_list_count(proto->optionalClassMethods);
}
* Fixes up a single method list in a protocol.
**********************************************************************/
static void
-fixupProtocolMethodList(protocol_t *proto, method_list_t **mlistp,
+fixupProtocolMethodList(protocol_t *proto, method_list_t *mlist,
bool required, bool instance)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
- if (!*mlistp) return;
- if (isMethodListFixedUp(*mlistp)) return;
+ if (!mlist) return;
+ if (mlist->isFixedUp()) return;
bool hasExtendedMethodTypes = proto->hasExtendedMethodTypes();
- *mlistp = fixupMethodList(*mlistp, true/*always copy for simplicity*/,
- !hasExtendedMethodTypes/*sort if no ext*/);
-
- method_list_t *mlist = *mlistp;
+ fixupMethodList(mlist, true/*always copy for simplicity*/,
+ !hasExtendedMethodTypes/*sort if no ext*/);
if (hasExtendedMethodTypes) {
// Sort method list and extended method types together.
// fixupMethodList() can't do this.
// fixme COW stomp
- uint32_t count = method_list_count(mlist);
+ uint32_t count = mlist->count;
uint32_t prefix;
uint32_t junk;
- getExtendedTypesIndexesForMethod(proto, method_list_nth(mlist, 0),
+ getExtendedTypesIndexesForMethod(proto, &mlist->get(0),
required, instance, prefix, junk);
const char **types = proto->extendedMethodTypes;
for (uint32_t i = 0; i < count; i++) {
for (uint32_t j = i+1; j < count; j++) {
- method_t *mi = method_list_nth(mlist, i);
- method_t *mj = method_list_nth(mlist, j);
- if (mi->name > mj->name) {
- method_list_swap(mlist, i, j);
+ method_t& mi = mlist->get(i);
+ method_t& mj = mlist->get(j);
+ if (mi.name > mj.name) {
+ std::swap(mi, mj);
std::swap(types[prefix+i], types[prefix+j]);
}
}
static void
fixupProtocol(protocol_t *proto)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (proto->protocols) {
for (uintptr_t i = 0; i < proto->protocols->count; i++) {
}
}
- fixupProtocolMethodList(proto, &proto->instanceMethods, YES, YES);
- fixupProtocolMethodList(proto, &proto->classMethods, YES, NO);
- fixupProtocolMethodList(proto, &proto->optionalInstanceMethods, NO, YES);
- fixupProtocolMethodList(proto, &proto->optionalClassMethods, NO, NO);
+ fixupProtocolMethodList(proto, proto->instanceMethods, YES, YES);
+ fixupProtocolMethodList(proto, proto->classMethods, YES, NO);
+ fixupProtocolMethodList(proto, proto->optionalInstanceMethods, NO, YES);
+ fixupProtocolMethodList(proto, proto->optionalClassMethods, NO, NO);
// fixme memory barrier so we can check this with no lock
- proto->flags |= PROTOCOL_FIXED_UP;
+ proto->setFixedUp();
}
static void
fixupProtocolIfNeeded(protocol_t *proto)
{
- rwlock_assert_unlocked(&runtimeLock);
+ runtimeLock.assertUnlocked();
assert(proto);
if (!proto->isFixedUp()) {
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
fixupProtocol(proto);
- rwlock_unlock_write(&runtimeLock);
}
}
bool isRequiredMethod, bool isInstanceMethod,
bool recursive)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
if (!proto || !sel) return nil;
if (!proto) return nil;
fixupProtocolIfNeeded(proto);
- rwlock_read(&runtimeLock);
- method_t *result = protocol_getMethod_nolock(proto, sel,
- isRequiredMethod,
- isInstanceMethod,
- recursive);
- rwlock_unlock_read(&runtimeLock);
- return result;
+ rwlock_reader_t lock(runtimeLock);
+ return protocol_getMethod_nolock(proto, sel, isRequiredMethod,
+ isInstanceMethod, recursive);
}
bool isRequiredMethod,
bool isInstanceMethod)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
if (!proto) return nil;
if (!proto->hasExtendedMethodTypes()) return nil;
if (!proto) return nil;
fixupProtocolIfNeeded(proto);
- const char *enc;
- rwlock_read(&runtimeLock);
- enc = protocol_getMethodTypeEncoding_nolock(proto, sel,
- isRequiredMethod,
- isInstanceMethod);
- rwlock_unlock_read(&runtimeLock);
- return enc;
+ rwlock_reader_t lock(runtimeLock);
+ return protocol_getMethodTypeEncoding_nolock(proto, sel,
+ isRequiredMethod,
+ isInstanceMethod);
}
static bool
protocol_conformsToProtocol_nolock(protocol_t *self, protocol_t *other)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
if (!self || !other) {
return NO;
**********************************************************************/
BOOL protocol_conformsToProtocol(Protocol *self, Protocol *other)
{
- BOOL result;
- rwlock_read(&runtimeLock);
- result = protocol_conformsToProtocol_nolock(newprotocol(self),
- newprotocol(other));
- rwlock_unlock_read(&runtimeLock);
- return result;
+ rwlock_reader_t lock(runtimeLock);
+ return protocol_conformsToProtocol_nolock(newprotocol(self),
+ newprotocol(other));
}
fixupProtocolIfNeeded(proto);
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
method_list_t *mlist =
getProtocolMethodList(proto, isRequiredMethod, isInstanceMethod);
if (mlist) {
- unsigned int i;
- count = mlist->count;
result = (struct objc_method_description *)
- calloc(count + 1, sizeof(struct objc_method_description));
- for (i = 0; i < count; i++) {
- method_t *m = method_list_nth(mlist, i);
- result[i].name = m->name;
- result[i].types = (char *)m->types;
+ calloc(mlist->count + 1, sizeof(struct objc_method_description));
+ for (const auto& meth : *mlist) {
+ result[count].name = meth.name;
+ result[count].types = (char *)meth.types;
+ count++;
}
}
- rwlock_unlock_read(&runtimeLock);
-
if (outCount) *outCount = count;
return result;
}
protocol_getProperty_nolock(protocol_t *proto, const char *name,
bool isRequiredProperty, bool isInstanceProperty)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
if (!isRequiredProperty || !isInstanceProperty) {
// Only required instance properties are currently supported
property_list_t *plist;
if ((plist = proto->instanceProperties)) {
- uint32_t i;
- for (i = 0; i < plist->count; i++) {
- property_t *prop = property_list_nth(plist, i);
- if (0 == strcmp(name, prop->name)) {
- return prop;
+ for (auto& prop : *plist) {
+ if (0 == strcmp(name, prop.name)) {
+ return ∝
}
}
}
objc_property_t protocol_getProperty(Protocol *p, const char *name,
BOOL isRequiredProperty, BOOL isInstanceProperty)
{
- property_t *result;
-
if (!p || !name) return nil;
- rwlock_read(&runtimeLock);
- result = protocol_getProperty_nolock(newprotocol(p), name,
- isRequiredProperty,
- isInstanceProperty);
- rwlock_unlock_read(&runtimeLock);
-
- return (objc_property_t)result;
+ rwlock_reader_t lock(runtimeLock);
+ return (objc_property_t)
+ protocol_getProperty_nolock(newprotocol(p), name,
+ isRequiredProperty, isInstanceProperty);
}
}
if (count > 0) {
- unsigned int i;
result = (property_t **)malloc((count+1) * sizeof(property_t *));
-
- for (i = 0; i < count; i++) {
- result[i] = property_list_nth(plist, i);
+
+ count = 0;
+ for (auto& prop : *plist) {
+ result[count++] = ∝
}
- result[i] = nil;
+ result[count] = nil;
}
if (outCount) *outCount = count;
objc_property_t *protocol_copyPropertyList(Protocol *proto, unsigned int *outCount)
{
- property_t **result = nil;
-
if (!proto) {
if (outCount) *outCount = 0;
return nil;
}
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
property_list_t *plist = newprotocol(proto)->instanceProperties;
- result = copyPropertyList(plist, outCount);
-
- rwlock_unlock_read(&runtimeLock);
-
- return (objc_property_t *)result;
+ return (objc_property_t *)copyPropertyList(plist, outCount);
}
return nil;
}
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
if (proto->protocols) {
count = (unsigned int)proto->protocols->count;
result[i] = nil;
}
- rwlock_unlock_read(&runtimeLock);
-
if (outCount) *outCount = count;
return result;
}
Protocol *
objc_allocateProtocol(const char *name)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if (getProtocol(name)) {
- rwlock_unlock_write(&runtimeLock);
return nil;
}
- protocol_t *result = (protocol_t *)_calloc_internal(sizeof(protocol_t), 1);
+ protocol_t *result = (protocol_t *)calloc(sizeof(protocol_t), 1);
extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
result->initProtocolIsa(cls);
result->size = sizeof(protocol_t);
// fixme mangle the name if it looks swift-y?
- result->mangledName = _strdup_internal(name);
+ result->mangledName = strdup(name);
// fixme reserve name without installing
- rwlock_unlock_write(&runtimeLock);
-
return (Protocol *)result;
}
{
protocol_t *proto = newprotocol(proto_gen);
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
extern objc_class OBJC_CLASS_$___IncompleteProtocol;
Class oldcls = (Class)&OBJC_CLASS_$___IncompleteProtocol;
if (proto->ISA() == cls) {
_objc_inform("objc_registerProtocol: protocol '%s' was already "
"registered!", proto->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
if (proto->ISA() != oldcls) {
_objc_inform("objc_registerProtocol: protocol '%s' was not allocated "
"with objc_allocateProtocol!", proto->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
- proto->initProtocolIsa(cls);
+ // NOT initProtocolIsa(). The protocol object may already
+ // have been retained and we must preserve that count.
+ proto->changeIsa(cls);
NXMapKeyCopyingInsert(protocols(), proto->mangledName, proto);
-
- rwlock_unlock_write(&runtimeLock);
}
if (!proto_gen) return;
if (!addition_gen) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if (proto->ISA() != cls) {
_objc_inform("protocol_addProtocol: modified protocol '%s' is not "
"under construction!", proto->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
if (addition->ISA() == cls) {
_objc_inform("protocol_addProtocol: added protocol '%s' is still "
"under construction!", addition->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
protocol_list_t *protolist = proto->protocols;
if (!protolist) {
protolist = (protocol_list_t *)
- _calloc_internal(1, sizeof(protocol_list_t)
+ calloc(1, sizeof(protocol_list_t)
+ sizeof(protolist->list[0]));
} else {
protolist = (protocol_list_t *)
- _realloc_internal(protolist, protocol_list_size(protolist)
+ realloc(protolist, protocol_list_size(protolist)
+ sizeof(protolist->list[0]));
}
protolist->list[protolist->count++] = (protocol_ref_t)addition;
proto->protocols = protolist;
-
- rwlock_unlock_write(&runtimeLock);
}
* Locking: acquires runtimeLock
**********************************************************************/
static void
-protocol_addMethod_nolock(method_list_t **list, SEL name, const char *types)
+protocol_addMethod_nolock(method_list_t*& list, SEL name, const char *types)
{
- if (!*list) {
- *list = (method_list_t *)
- _calloc_internal(sizeof(method_list_t), 1);
- (*list)->entsize_NEVER_USE = sizeof((*list)->first);
- setMethodListFixedUp(*list);
+ if (!list) {
+ list = (method_list_t *)calloc(sizeof(method_list_t), 1);
+ list->entsizeAndFlags = sizeof(list->first);
+ list->setFixedUp();
} else {
- size_t size = method_list_size(*list) + method_list_entsize(*list);
- *list = (method_list_t *)
- _realloc_internal(*list, size);
+ size_t size = list->byteSize() + list->entsize();
+ list = (method_list_t *)realloc(list, size);
}
- method_t *meth = method_list_nth(*list, (*list)->count++);
- meth->name = name;
- meth->types = _strdup_internal(types ? types : "");
- meth->imp = nil;
+ method_t& meth = list->get(list->count++);
+ meth.name = name;
+ meth.types = strdup(types ? types : "");
+ meth.imp = nil;
}
void
if (!proto_gen) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if (proto->ISA() != cls) {
_objc_inform("protocol_addMethodDescription: protocol '%s' is not "
"under construction!", proto->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
if (isRequiredMethod && isInstanceMethod) {
- protocol_addMethod_nolock(&proto->instanceMethods, name, types);
+ protocol_addMethod_nolock(proto->instanceMethods, name, types);
} else if (isRequiredMethod && !isInstanceMethod) {
- protocol_addMethod_nolock(&proto->classMethods, name, types);
+ protocol_addMethod_nolock(proto->classMethods, name, types);
} else if (!isRequiredMethod && isInstanceMethod) {
- protocol_addMethod_nolock(&proto->optionalInstanceMethods, name,types);
+ protocol_addMethod_nolock(proto->optionalInstanceMethods, name,types);
} else /* !isRequiredMethod && !isInstanceMethod) */ {
- protocol_addMethod_nolock(&proto->optionalClassMethods, name, types);
+ protocol_addMethod_nolock(proto->optionalClassMethods, name, types);
}
-
- rwlock_unlock_write(&runtimeLock);
}
* Locking: acquires runtimeLock
**********************************************************************/
static void
-protocol_addProperty_nolock(property_list_t **plist, const char *name,
+protocol_addProperty_nolock(property_list_t *&plist, const char *name,
const objc_property_attribute_t *attrs,
unsigned int count)
{
- if (!*plist) {
- *plist = (property_list_t *)
- _calloc_internal(sizeof(property_list_t), 1);
- (*plist)->entsize = sizeof(property_t);
+ if (!plist) {
+ plist = (property_list_t *)calloc(sizeof(property_list_t), 1);
+ plist->entsizeAndFlags = sizeof(property_t);
} else {
- *plist = (property_list_t *)
- _realloc_internal(*plist, sizeof(property_list_t)
- + (*plist)->count * (*plist)->entsize);
+ plist = (property_list_t *)
+ realloc(plist, sizeof(property_list_t)
+ + plist->count * plist->entsize());
}
- property_t *prop = property_list_nth(*plist, (*plist)->count++);
- prop->name = _strdup_internal(name);
- prop->attributes = copyPropertyAttributeString(attrs, count);
+ property_t& prop = plist->get(plist->count++);
+ prop.name = strdup(name);
+ prop.attributes = copyPropertyAttributeString(attrs, count);
}
void
if (!proto) return;
if (!name) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if (proto->ISA() != cls) {
_objc_inform("protocol_addProperty: protocol '%s' is not "
"under construction!", proto->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
if (isRequiredProperty && isInstanceProperty) {
- protocol_addProperty_nolock(&proto->instanceProperties, name, attrs, count);
+ protocol_addProperty_nolock(proto->instanceProperties, name, attrs, count);
}
//else if (isRequiredProperty && !isInstanceProperty) {
- // protocol_addProperty_nolock(&proto->classProperties, name, attrs, count);
+ // protocol_addProperty_nolock(proto->classProperties, name, attrs, count);
//} else if (!isRequiredProperty && isInstanceProperty) {
- // protocol_addProperty_nolock(&proto->optionalInstanceProperties, name, attrs, count);
+ // protocol_addProperty_nolock(proto->optionalInstanceProperties, name, attrs, count);
//} else /* !isRequiredProperty && !isInstanceProperty) */ {
- // protocol_addProperty_nolock(&proto->optionalClassProperties, name, attrs, count);
+ // protocol_addProperty_nolock(proto->optionalClassProperties, name, attrs, count);
//}
-
- rwlock_unlock_write(&runtimeLock);
}
int
objc_getClassList(Class *buffer, int bufferLen)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
realizeAllClasses();
int allCount = NXCountHashTable(classes);
if (!buffer) {
- rwlock_unlock_write(&runtimeLock);
return allCount;
}
buffer[count++] = cls;
}
- rwlock_unlock_write(&runtimeLock);
-
return allCount;
}
Class *
objc_copyClassList(unsigned int *outCount)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
realizeAllClasses();
}
result[count] = nil;
}
-
- rwlock_unlock_write(&runtimeLock);
if (outCount) *outCount = count;
return result;
Protocol * __unsafe_unretained *
objc_copyProtocolList(unsigned int *outCount)
{
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
- unsigned int count, i;
- Protocol *proto;
- const char *name;
- NXMapState state;
NXMapTable *protocol_map = protocols();
- Protocol **result;
- count = NXCountMapTable(protocol_map);
+ unsigned int count = NXCountMapTable(protocol_map);
if (count == 0) {
- rwlock_unlock_read(&runtimeLock);
if (outCount) *outCount = 0;
return nil;
}
- result = (Protocol **)calloc(1 + count, sizeof(Protocol *));
+ Protocol **result = (Protocol **)malloc((count+1) * sizeof(Protocol*));
- i = 0;
- state = NXInitMapState(protocol_map);
+ unsigned int i = 0;
+ Protocol *proto;
+ const char *name;
+ NXMapState state = NXInitMapState(protocol_map);
while (NXNextMapState(protocol_map, &state,
(const void **)&name, (const void **)&proto))
{
result[i++] = nil;
assert(i == count+1);
- rwlock_unlock_read(&runtimeLock);
-
if (outCount) *outCount = count;
return result;
}
**********************************************************************/
Protocol *objc_getProtocol(const char *name)
{
- rwlock_read(&runtimeLock);
- Protocol *result = getProtocol(name);
- rwlock_unlock_read(&runtimeLock);
- return result;
+ rwlock_reader_t lock(runtimeLock);
+ return getProtocol(name);
}
return nil;
}
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
assert(cls->isRealized());
- FOREACH_METHOD_LIST(mlist, cls, {
- count += mlist->count;
- });
+ count = cls->data()->methods.count();
if (count > 0) {
- unsigned int m;
result = (Method *)malloc((count + 1) * sizeof(Method));
- m = 0;
- FOREACH_METHOD_LIST(mlist, cls, {
- unsigned int i;
- for (i = 0; i < mlist->count; i++) {
- method_t *aMethod = method_list_nth(mlist, i);
- if (ignoreSelector(method_getName(aMethod))) {
- count--;
- continue;
- }
- result[m++] = aMethod;
+ count = 0;
+ for (auto& meth : cls->data()->methods) {
+ if (! ignoreSelector(meth.name)) {
+ result[count++] = &meth;
}
- });
- result[m] = nil;
+ }
+ result[count] = nil;
}
- rwlock_unlock_read(&runtimeLock);
-
if (outCount) *outCount = count;
return result;
}
const ivar_list_t *ivars;
Ivar *result = nil;
unsigned int count = 0;
- unsigned int i;
if (!cls) {
if (outCount) *outCount = 0;
return nil;
}
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
assert(cls->isRealized());
if ((ivars = cls->data()->ro->ivars) && ivars->count) {
result = (Ivar *)malloc((ivars->count+1) * sizeof(Ivar));
- for (i = 0; i < ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
- result[count++] = ivar;
+ for (auto& ivar : *ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
+ result[count++] = &ivar;
}
result[count] = nil;
}
-
- rwlock_unlock_read(&runtimeLock);
if (outCount) *outCount = count;
return result;
objc_property_t *
class_copyPropertyList(Class cls, unsigned int *outCount)
{
- chained_property_list *plist;
- unsigned int count = 0;
- property_t **result = nil;
-
if (!cls) {
if (outCount) *outCount = 0;
return nil;
}
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
assert(cls->isRealized());
+ auto rw = cls->data();
- for (plist = cls->data()->properties; plist; plist = plist->next) {
- count += plist->count;
- }
-
+ property_t **result = nil;
+ unsigned int count = rw->properties.count();
if (count > 0) {
- unsigned int p;
result = (property_t **)malloc((count + 1) * sizeof(property_t *));
-
- p = 0;
- for (plist = cls->data()->properties; plist; plist = plist->next) {
- unsigned int i;
- for (i = 0; i < plist->count; i++) {
- result[p++] = &plist->list[i];
- }
+
+ count = 0;
+ for (auto& prop : rw->properties) {
+ result[count++] = ∝
}
- result[p] = nil;
+ result[count] = nil;
}
- rwlock_unlock_read(&runtimeLock);
-
if (outCount) *outCount = count;
return (objc_property_t *)result;
}
IMP
objc_class::getLoadMethod()
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
const method_list_t *mlist;
- uint32_t i;
assert(isRealized());
assert(ISA()->isRealized());
assert(!isMetaClass());
assert(ISA()->isMetaClass());
- mlist = ISA()->data()->ro->baseMethods;
+ mlist = ISA()->data()->ro->baseMethods();
if (mlist) {
- for (i = 0; i < mlist->count; i++) {
- method_t *m = method_list_nth(mlist, i);
- const char *name = sel_cname(m->name);
+ for (const auto& meth : *mlist) {
+ const char *name = sel_cname(meth.name);
if (0 == strcmp(name, "load")) {
- return m->imp;
+ return meth.imp;
}
}
}
const char *
_category_getClassName(Category cat)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
return remapClass(cat->cls)->nameForLogging();
}
Class
_category_getClass(Category cat)
{
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
Class result = remapClass(cat->cls);
assert(result->isRealized()); // ok for call_category_loads' usage
- rwlock_unlock_read(&runtimeLock);
return result;
}
IMP
_category_getLoadMethod(Category cat)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
const method_list_t *mlist;
- uint32_t i;
mlist = cat->classMethods;
if (mlist) {
- for (i = 0; i < mlist->count; i++) {
- method_t *m = method_list_nth(mlist, i);
- const char *name = sel_cname(m->name);
+ for (const auto& meth : *mlist) {
+ const char *name = sel_cname(meth.name);
if (0 == strcmp(name, "load")) {
- return m->imp;
+ return meth.imp;
}
}
}
Protocol * __unsafe_unretained *
class_copyProtocolList(Class cls, unsigned int *outCount)
{
- Protocol **r;
- const protocol_list_t **p;
unsigned int count = 0;
- unsigned int i;
Protocol **result = nil;
if (!cls) {
return nil;
}
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
assert(cls->isRealized());
- for (p = cls->data()->protocols; p && *p; p++) {
- count += (uint32_t)(*p)->count;
- }
+ count = cls->data()->protocols.count();
- if (count) {
+ if (count > 0) {
result = (Protocol **)malloc((count+1) * sizeof(Protocol *));
- r = result;
- for (p = cls->data()->protocols; p && *p; p++) {
- for (i = 0; i < (*p)->count; i++) {
- *r++ = (Protocol *)remapProtocol((*p)->list[i]);
- }
+
+ count = 0;
+ for (const auto& proto : cls->data()->protocols) {
+ result[count++] = (Protocol *)remapProtocol(proto);
}
- *r++ = nil;
+ result[count] = nil;
}
- rwlock_unlock_read(&runtimeLock);
-
if (outCount) *outCount = count;
return result;
}
const char **names;
// Need to write-lock in case demangledName() needs to realize a class.
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
classlist = _getObjc2ClassList(hi, &count);
names = (const char **)malloc((count+1) * sizeof(const char *));
count -= shift;
names[count] = nil;
- rwlock_unlock_write(&runtimeLock);
-
if (outCount) *outCount = (unsigned int)count;
return names;
}
// Class is not yet realized and name is mangled. Realize the class.
// Only objc_copyClassNamesForImage() should get here.
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
assert(realize);
if (realize) {
realizeClass((Class)this);
static method_t *findMethodInSortedMethodList(SEL key, const method_list_t *list)
{
+ assert(list);
+
const method_t * const first = &list->first;
const method_t *base = first;
const method_t *probe;
**********************************************************************/
static method_t *search_method_list(const method_list_t *mlist, SEL sel)
{
- int methodListIsFixedUp = isMethodListFixedUp(mlist);
- int methodListHasExpectedSize = mlist->getEntsize() == sizeof(method_t);
+ int methodListIsFixedUp = mlist->isFixedUp();
+ int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t);
if (__builtin_expect(methodListIsFixedUp && methodListHasExpectedSize, 1)) {
return findMethodInSortedMethodList(sel, mlist);
} else {
// Linear search of unsorted method list
- method_list_t::method_iterator iter = mlist->begin();
- method_list_t::method_iterator end = mlist->end();
- for ( ; iter != end; ++iter) {
- if (iter->name == sel) return &*iter;
+ for (auto& meth : *mlist) {
+ if (meth.name == sel) return &meth;
}
}
-#ifndef NDEBUG
+#if DEBUG
// sanity-check negative results
- if (isMethodListFixedUp(mlist)) {
- method_list_t::method_iterator iter = mlist->begin();
- method_list_t::method_iterator end = mlist->end();
- for ( ; iter != end; ++iter) {
- if (iter->name == sel) {
+ if (mlist->isFixedUp()) {
+ for (auto& meth : *mlist) {
+ if (meth.name == sel) {
_objc_fatal("linear search worked when binary search did not");
}
}
static method_t *
getMethodNoSuper_nolock(Class cls, SEL sel)
{
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
assert(cls->isRealized());
// fixme nil cls?
// fixme nil sel?
- FOREACH_METHOD_LIST(mlist, cls, {
- method_t *m = search_method_list(mlist, sel);
+ for (auto mlists = cls->data()->methods.beginLists(),
+ end = cls->data()->methods.endLists();
+ mlists != end;
+ ++mlists)
+ {
+ method_t *m = search_method_list(*mlists, sel);
if (m) return m;
- });
+ }
return nil;
}
{
method_t *m = nil;
- rwlock_assert_locked(&runtimeLock);
+ runtimeLock.assertLocked();
// fixme nil cls?
// fixme nil sel?
**********************************************************************/
static Method _class_getMethod(Class cls, SEL sel)
{
- method_t *m;
- rwlock_read(&runtimeLock);
- m = getMethod_nolock(cls, sel);
- rwlock_unlock_read(&runtimeLock);
- return m;
+ rwlock_reader_t lock(runtimeLock);
+ return getMethod_nolock(cls, sel);
}
* implementer is the class that owns the implementation in question.
**********************************************************************/
static void
-log_and_fill_cache(Class cls, Class implementer, IMP imp, SEL sel)
+log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer)
{
#if SUPPORT_MESSAGE_LOGGING
if (objcMsgLogEnabled) {
if (!cacheIt) return;
}
#endif
- cache_fill (cls, sel, imp);
+ cache_fill (cls, sel, imp, receiver);
}
Method meth;
bool triedResolver = NO;
- rwlock_assert_unlocked(&runtimeLock);
+ runtimeLock.assertUnlocked();
// Optimistic cache lookup
if (cache) {
}
if (!cls->isRealized()) {
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
realizeClass(cls);
- rwlock_unlock_write(&runtimeLock);
}
if (initialize && !cls->isInitialized()) {
// be added but ignored indefinitely because the cache was re-filled
// with the old value after the cache flush on behalf of the category.
retry:
- rwlock_read(&runtimeLock);
+ runtimeLock.read();
// Ignore GC selectors
if (ignoreSelector(sel)) {
imp = _objc_ignored_method;
- cache_fill(cls, sel, imp);
+ cache_fill(cls, sel, imp, inst);
goto done;
}
meth = getMethodNoSuper_nolock(cls, sel);
if (meth) {
- log_and_fill_cache(cls, cls, meth->imp, sel);
+ log_and_fill_cache(cls, meth->imp, sel, inst, cls);
imp = meth->imp;
goto done;
}
if (imp) {
if (imp != (IMP)_objc_msgForward_impcache) {
// Found the method in a superclass. Cache it in this class.
- log_and_fill_cache(cls, curClass, imp, sel);
+ log_and_fill_cache(cls, imp, sel, inst, curClass);
goto done;
}
else {
// Superclass method list.
meth = getMethodNoSuper_nolock(curClass, sel);
if (meth) {
- log_and_fill_cache(cls, curClass, meth->imp, sel);
+ log_and_fill_cache(cls, meth->imp, sel, inst, curClass);
imp = meth->imp;
goto done;
}
// No implementation found. Try method resolver once.
if (resolver && !triedResolver) {
- rwlock_unlock_read(&runtimeLock);
+ runtimeLock.unlockRead();
_class_resolveMethod(cls, sel, inst);
// Don't cache the result; we don't hold the lock so it may have
// changed already. Re-do the search from scratch instead.
// Use forwarding.
imp = (IMP)_objc_msgForward_impcache;
- cache_fill(cls, sel, imp);
+ cache_fill(cls, sel, imp, inst);
done:
- rwlock_unlock_read(&runtimeLock);
+ runtimeLock.unlockRead();
// paranoia: look for ignored selectors with non-ignored implementations
assert(!(ignoreSelector(sel) && imp != (IMP)&_objc_ignored_method));
// Cache miss. Search method list.
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
meth = getMethodNoSuper_nolock(cls, sel);
if (meth) {
// Hit in method list. Cache it.
- cache_fill(cls, sel, meth->imp);
- rwlock_unlock_read(&runtimeLock);
+ cache_fill(cls, sel, meth->imp, nil);
return meth->imp;
} else {
// Miss in method list. Cache objc_msgForward.
- cache_fill(cls, sel, _objc_msgForward_impcache);
- rwlock_unlock_read(&runtimeLock);
+ cache_fill(cls, sel, _objc_msgForward_impcache, nil);
return _objc_msgForward_impcache;
}
}
**********************************************************************/
objc_property_t class_getProperty(Class cls, const char *name)
{
- property_t *result = nil;
- chained_property_list *plist;
-
if (!cls || !name) return nil;
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
assert(cls->isRealized());
for ( ; cls; cls = cls->superclass) {
- for (plist = cls->data()->properties; plist; plist = plist->next) {
- uint32_t i;
- for (i = 0; i < plist->count; i++) {
- if (0 == strcmp(name, plist->list[i].name)) {
- result = &plist->list[i];
- goto done;
- }
+ for (auto& prop : cls->data()->properties) {
+ if (0 == strcmp(name, prop.name)) {
+ return (objc_property_t)∝
}
}
}
-
- done:
- rwlock_unlock_read(&runtimeLock);
-
- return (objc_property_t)result;
+
+ return nil;
}
cls = (Class)this;
metacls = cls->ISA();
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
// Scan metaclass for custom AWZ.
// Scan metaclass for custom RR.
}
else if (metacls == classNSObject()->ISA()) {
// NSObject's metaclass AWZ is default, but we still need to check cats
- FOREACH_CATEGORY_METHOD_LIST(mlist, metacls, {
- if (methodListImplementsAWZ(mlist)) {
+ auto& methods = metacls->data()->methods;
+ for (auto mlists = methods.beginCategoryMethodLists(),
+ end = methods.endCategoryMethodLists(metacls);
+ mlists != end;
+ ++mlists)
+ {
+ if (methodListImplementsAWZ(*mlists)) {
metaCustomAWZ = YES;
inherited = NO;
break;
}
- });
+ }
}
else if (metacls->superclass->hasCustomAWZ()) {
// Superclass is custom AWZ, therefore we are too.
}
else {
// Not metaclass NSObject.
- FOREACH_METHOD_LIST(mlist, metacls, {
- if (methodListImplementsAWZ(mlist)) {
+ auto& methods = metacls->data()->methods;
+ for (auto mlists = methods.beginLists(),
+ end = methods.endLists();
+ mlists != end;
+ ++mlists)
+ {
+ if (methodListImplementsAWZ(*mlists)) {
metaCustomAWZ = YES;
inherited = NO;
break;
}
- });
+ }
}
if (!metaCustomAWZ) metacls->setHasDefaultAWZ();
}
if (cls == classNSObject()) {
// NSObject's RR is default, but we still need to check categories
- FOREACH_CATEGORY_METHOD_LIST(mlist, cls, {
- if (methodListImplementsRR(mlist)) {
+ auto& methods = cls->data()->methods;
+ for (auto mlists = methods.beginCategoryMethodLists(),
+ end = methods.endCategoryMethodLists(cls);
+ mlists != end;
+ ++mlists)
+ {
+ if (methodListImplementsRR(*mlists)) {
clsCustomRR = YES;
inherited = NO;
break;
}
- });
+ }
}
else if (!cls->superclass) {
// Custom root class
}
else {
// Not class NSObject.
- FOREACH_METHOD_LIST(mlist, cls, {
- if (methodListImplementsRR(mlist)) {
+ auto& methods = cls->data()->methods;
+ for (auto mlists = methods.beginLists(),
+ end = methods.endLists();
+ mlists != end;
+ ++mlists)
+ {
+ if (methodListImplementsRR(*mlists)) {
clsCustomRR = YES;
inherited = NO;
break;
}
- });
+ }
}
if (!clsCustomRR) cls->setHasDefaultRR();
// Update the +initialize flags.
// Do this last.
metacls->changeInfo(RW_INITIALIZED, RW_INITIALIZING);
-
- rwlock_unlock_read(&runtimeLock);
}
**********************************************************************/
BOOL _class_usesAutomaticRetainRelease(Class cls)
{
- return (cls->data()->ro->flags & RO_IS_ARR) ? YES : NO;
+ return bool(cls->data()->ro->flags & RO_IS_ARR);
}
void objc_class::setHasCustomRR(bool inherited)
{
Class cls = (Class)this;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (hasCustomRR()) return;
void objc_class::setHasCustomAWZ(bool inherited)
{
Class cls = (Class)this;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (hasCustomAWZ()) return;
void objc_class::setRequiresRawIsa(bool inherited)
{
Class cls = (Class)this;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (requiresRawIsa()) return;
// We look for such cases here.
if (isRRSelector(meth->name)) {
- // already custom, nothing would change
- if (classNSObject()->hasCustomRR()) return;
+
+ if ((classNSObject()->isInitialized() &&
+ classNSObject()->hasCustomRR())
+ ||
+ ClassNSObjectRRSwizzled)
+ {
+ // already custom, nothing would change
+ return;
+ }
bool swizzlingNSObject = NO;
if (cls == classNSObject()) {
} else {
// Don't know the class.
// The only special case is class NSObject.
- FOREACH_METHOD_LIST(mlist, classNSObject(), {
- for (uint32_t i = 0; i < mlist->count; i++) {
- if (meth == method_list_nth(mlist, i)) {
- swizzlingNSObject = YES;
- break;
- }
+ for (const auto& meth2 : classNSObject()->data()->methods) {
+ if (meth == &meth2) {
+ swizzlingNSObject = YES;
+ break;
}
- if (swizzlingNSObject) break;
- });
+ }
}
if (swizzlingNSObject) {
if (classNSObject()->isInitialized()) {
}
}
else if (isAWZSelector(meth->name)) {
- // already custom, nothing would change
- if (classNSObject()->ISA()->hasCustomAWZ()) return;
+ Class metaclassNSObject = classNSObject()->ISA();
+
+ if ((metaclassNSObject->isInitialized() &&
+ metaclassNSObject->hasCustomAWZ())
+ ||
+ MetaclassNSObjectAWZSwizzled)
+ {
+ // already custom, nothing would change
+ return;
+ }
bool swizzlingNSObject = NO;
- if (cls == classNSObject()->ISA()) {
+ if (cls == metaclassNSObject) {
swizzlingNSObject = YES;
} else {
// Don't know the class.
// The only special case is metaclass NSObject.
- FOREACH_METHOD_LIST(mlist, classNSObject()->ISA(), {
- for (uint32_t i = 0; i < mlist->count; i++) {
- if (meth == method_list_nth(mlist, i)) {
- swizzlingNSObject = YES;
- break;
- }
+ for (const auto& meth2 : metaclassNSObject->data()->methods) {
+ if (meth == &meth2) {
+ swizzlingNSObject = YES;
+ break;
}
- if (swizzlingNSObject) break;
- });
+ }
}
if (swizzlingNSObject) {
- if (classNSObject()->ISA()->isInitialized()) {
- classNSObject()->ISA()->setHasCustomAWZ();
+ if (metaclassNSObject->isInitialized()) {
+ metaclassNSObject->setHasCustomAWZ();
} else {
// NSObject not yet +initialized, so custom RR has not yet
// been checked, and setInitialized() will not notice the
{
if (!cls) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
// Can only change layout of in-construction classes.
// note: if modifications to post-construction classes were
if (!(cls->data()->flags & RW_CONSTRUCTING)) {
_objc_inform("*** Can't set ivar layout for already-registered "
"class '%s'", cls->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
class_ro_t *ro_w = make_ro_writeable(cls->data());
try_free(ro_w->ivarLayout);
- ro_w->ivarLayout = _ustrdup_internal(layout);
-
- rwlock_unlock_write(&runtimeLock);
+ ro_w->ivarLayout = ustrdupMaybeNil(layout);
}
// SPI: Instance-specific object layout.
_class_setIvarLayoutAccessor(Class cls, const uint8_t* (*accessor) (id object)) {
if (!cls) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
class_ro_t *ro_w = make_ro_writeable(cls->data());
if (!(cls->data()->flags & RW_HAS_INSTANCE_SPECIFIC_LAYOUT)) try_free(ro_w->ivarLayout);
ro_w->ivarLayout = (uint8_t *)accessor;
cls->setInfo(RW_HAS_INSTANCE_SPECIFIC_LAYOUT);
-
- rwlock_unlock_write(&runtimeLock);
}
const uint8_t *
{
if (!cls) return;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
// Can only change layout of in-construction classes.
// note: if modifications to post-construction classes were
if (!(cls->data()->flags & RW_CONSTRUCTING)) {
_objc_inform("*** Can't set weak ivar layout for already-registered "
"class '%s'", cls->nameForLogging());
- rwlock_unlock_write(&runtimeLock);
return;
}
class_ro_t *ro_w = make_ro_writeable(cls->data());
try_free(ro_w->weakIvarLayout);
- ro_w->weakIvarLayout = _ustrdup_internal(layout);
-
- rwlock_unlock_write(&runtimeLock);
+ ro_w->weakIvarLayout = ustrdupMaybeNil(layout);
}
Ivar
_class_getVariable(Class cls, const char *name, Class *memberOf)
{
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
for ( ; cls; cls = cls->superclass) {
ivar_t *ivar = getIvar(cls, name);
if (ivar) {
- rwlock_unlock_read(&runtimeLock);
if (memberOf) *memberOf = cls;
return ivar;
}
}
- rwlock_unlock_read(&runtimeLock);
-
return nil;
}
BOOL class_conformsToProtocol(Class cls, Protocol *proto_gen)
{
protocol_t *proto = newprotocol(proto_gen);
- const protocol_list_t **plist;
- unsigned int i;
- BOOL result = NO;
if (!cls) return NO;
if (!proto_gen) return NO;
- rwlock_read(&runtimeLock);
+ rwlock_reader_t lock(runtimeLock);
assert(cls->isRealized());
- for (plist = cls->data()->protocols; plist && *plist; plist++) {
- for (i = 0; i < (*plist)->count; i++) {
- protocol_t *p = remapProtocol((*plist)->list[i]);
- if (p == proto || protocol_conformsToProtocol_nolock(p, proto)) {
- result = YES;
- goto done;
- }
+ for (const auto& proto_ref : cls->data()->protocols) {
+ protocol_t *p = remapProtocol(proto_ref);
+ if (p == proto || protocol_conformsToProtocol_nolock(p, proto)) {
+ return YES;
}
}
- done:
- rwlock_unlock_read(&runtimeLock);
-
- return result;
+ return NO;
}
* Locking: runtimeLock must be held by the caller
**********************************************************************/
static IMP
-addMethod(Class cls, SEL name, IMP imp, const char *types, BOOL replace)
+addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace)
{
IMP result = nil;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
assert(types);
assert(cls->isRealized());
if ((m = getMethodNoSuper_nolock(cls, name))) {
// already exists
if (!replace) {
- result = _method_getImplementation(m);
+ result = m->imp;
} else {
result = _method_setImplementation(cls, m, imp);
}
} else {
// fixme optimize
method_list_t *newlist;
- newlist = (method_list_t *)_calloc_internal(sizeof(*newlist), 1);
- newlist->entsize_NEVER_USE = (uint32_t)sizeof(method_t) | fixed_up_method_list;
+ newlist = (method_list_t *)calloc(sizeof(*newlist), 1);
+ newlist->entsizeAndFlags =
+ (uint32_t)sizeof(method_t) | fixed_up_method_list;
newlist->count = 1;
newlist->first.name = name;
newlist->first.types = strdup(types);
newlist->first.imp = (IMP)&_objc_ignored_method;
}
- attachMethodLists(cls, &newlist, 1, NO, NO, YES);
+ prepareMethodLists(cls, &newlist, 1, NO, NO);
+ cls->data()->methods.attachLists(&newlist, 1);
+ flushCaches(cls);
result = nil;
}
{
if (!cls) return NO;
- rwlock_write(&runtimeLock);
- IMP old = addMethod(cls, name, imp, types ?: "", NO);
- rwlock_unlock_write(&runtimeLock);
- return old ? NO : YES;
+ rwlock_writer_t lock(runtimeLock);
+ return ! addMethod(cls, name, imp, types ?: "", NO);
}
{
if (!cls) return nil;
- rwlock_write(&runtimeLock);
- IMP old = addMethod(cls, name, imp, types ?: "", YES);
- rwlock_unlock_write(&runtimeLock);
- return old;
+ rwlock_writer_t lock(runtimeLock);
+ return addMethod(cls, name, imp, types ?: "", YES);
}
if (!type) type = "";
if (name && 0 == strcmp(name, "")) name = nil;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
assert(cls->isRealized());
// No class variables
if (cls->isMetaClass()) {
- rwlock_unlock_write(&runtimeLock);
return NO;
}
// Can only add ivars to in-construction classes.
if (!(cls->data()->flags & RW_CONSTRUCTING)) {
- rwlock_unlock_write(&runtimeLock);
return NO;
}
// Check for too-big ivar.
// fixme check for superclass ivar too?
if ((name && getIvar(cls, name)) || size > UINT32_MAX) {
- rwlock_unlock_write(&runtimeLock);
return NO;
}
ivar_list_t *oldlist, *newlist;
if ((oldlist = (ivar_list_t *)cls->data()->ro->ivars)) {
- size_t oldsize = ivar_list_size(oldlist);
- newlist = (ivar_list_t *)
- _calloc_internal(oldsize + oldlist->entsize, 1);
+ size_t oldsize = oldlist->byteSize();
+ newlist = (ivar_list_t *)calloc(oldsize + oldlist->entsize(), 1);
memcpy(newlist, oldlist, oldsize);
- _free_internal(oldlist);
+ free(oldlist);
} else {
- newlist = (ivar_list_t *)
- _calloc_internal(sizeof(ivar_list_t), 1);
- newlist->entsize = (uint32_t)sizeof(ivar_t);
+ newlist = (ivar_list_t *)calloc(sizeof(ivar_list_t), 1);
+ newlist->entsizeAndFlags = (uint32_t)sizeof(ivar_t);
}
uint32_t offset = cls->unalignedInstanceSize();
uint32_t alignMask = (1<<alignment)-1;
offset = (offset + alignMask) & ~alignMask;
- ivar_t *ivar = ivar_list_nth(newlist, newlist->count++);
+ ivar_t& ivar = newlist->get(newlist->count++);
#if __x86_64__
// Deliberately over-allocate the ivar offset variable.
// Use calloc() to clear all 64 bits. See the note in struct ivar_t.
- ivar->offset = (int32_t *)(int64_t *)_calloc_internal(sizeof(int64_t), 1);
+ ivar.offset = (int32_t *)(int64_t *)calloc(sizeof(int64_t), 1);
#else
- ivar->offset = (int32_t *)_malloc_internal(sizeof(int32_t));
+ ivar.offset = (int32_t *)malloc(sizeof(int32_t));
#endif
- *ivar->offset = offset;
- ivar->name = name ? _strdup_internal(name) : nil;
- ivar->type = _strdup_internal(type);
- ivar->alignment_raw = alignment;
- ivar->size = (uint32_t)size;
+ *ivar.offset = offset;
+ ivar.name = name ? strdup(name) : nil;
+ ivar.type = strdup(type);
+ ivar.alignment_raw = alignment;
+ ivar.size = (uint32_t)size;
ro_w->ivars = newlist;
cls->setInstanceSize((uint32_t)(offset + size));
// Ivar layout updated in registerClass.
- rwlock_unlock_write(&runtimeLock);
-
return YES;
}
BOOL class_addProtocol(Class cls, Protocol *protocol_gen)
{
protocol_t *protocol = newprotocol(protocol_gen);
- protocol_list_t *plist;
- const protocol_list_t **plistp;
if (!cls) return NO;
if (class_conformsToProtocol(cls, protocol_gen)) return NO;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
assert(cls->isRealized());
// fixme optimize
- plist = (protocol_list_t *)
- _malloc_internal(sizeof(protocol_list_t) + sizeof(protocol_t *));
- plist->count = 1;
- plist->list[0] = (protocol_ref_t)protocol;
-
- unsigned int count = 0;
- for (plistp = cls->data()->protocols; plistp && *plistp; plistp++) {
- count++;
- }
+ protocol_list_t *protolist = (protocol_list_t *)
+ malloc(sizeof(protocol_list_t) + sizeof(protocol_t *));
+ protolist->count = 1;
+ protolist->list[0] = (protocol_ref_t)protocol;
- cls->data()->protocols = (const protocol_list_t **)
- _realloc_internal(cls->data()->protocols,
- (count+2) * sizeof(protocol_list_t *));
- cls->data()->protocols[count] = plist;
- cls->data()->protocols[count+1] = nil;
+ cls->data()->protocols.attachLists(&protolist, 1);
// fixme metaclass?
- rwlock_unlock_write(&runtimeLock);
-
return YES;
}
* Adds a property to a class.
* Locking: acquires runtimeLock
**********************************************************************/
-static BOOL
+static bool
_class_addProperty(Class cls, const char *name,
const objc_property_attribute_t *attrs, unsigned int count,
- BOOL replace)
+ bool replace)
{
- chained_property_list *plist;
-
if (!cls) return NO;
if (!name) return NO;
}
else if (prop) {
// replace existing
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
try_free(prop->attributes);
prop->attributes = copyPropertyAttributeString(attrs, count);
- rwlock_unlock_write(&runtimeLock);
return YES;
}
else {
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
assert(cls->isRealized());
- plist = (chained_property_list *)
- _malloc_internal(sizeof(*plist) + sizeof(plist->list[0]));
- plist->count = 1;
- plist->list[0].name = _strdup_internal(name);
- plist->list[0].attributes = copyPropertyAttributeString(attrs, count);
-
- plist->next = cls->data()->properties;
- cls->data()->properties = plist;
+ property_list_t *proplist = (property_list_t *)
+ malloc(sizeof(*proplist));
+ proplist->count = 1;
+ proplist->entsizeAndFlags = sizeof(proplist->first);
+ proplist->first.name = strdup(name);
+ proplist->first.attributes = copyPropertyAttributeString(attrs, count);
- rwlock_unlock_write(&runtimeLock);
+ cls->data()->properties.attachLists(&proplist, 1);
return YES;
}
**********************************************************************/
Class
look_up_class(const char *name,
- BOOL includeUnconnected __attribute__((unused)),
- BOOL includeClassHandler __attribute__((unused)))
+ bool includeUnconnected __attribute__((unused)),
+ bool includeClassHandler __attribute__((unused)))
{
if (!name) return nil;
- rwlock_read(&runtimeLock);
- Class result = getClass(name);
- BOOL unrealized = result && !result->isRealized();
- rwlock_unlock_read(&runtimeLock);
+ Class result;
+ bool unrealized;
+ {
+ rwlock_reader_t lock(runtimeLock);
+ result = getClass(name);
+ unrealized = result && !result->isRealized();
+ }
if (unrealized) {
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
realizeClass(result);
- rwlock_unlock_write(&runtimeLock);
}
return result;
}
{
Class duplicate;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
assert(original->isRealized());
assert(!original->isMetaClass());
duplicate->initClassIsa(original->ISA());
duplicate->superclass = original->superclass;
- duplicate->cache.setEmpty();
+ duplicate->cache.initializeToEmpty();
- class_rw_t *rw = (class_rw_t *)_calloc_internal(sizeof(*original->data()), 1);
+ class_rw_t *rw = (class_rw_t *)calloc(sizeof(*original->data()), 1);
rw->flags = (original->data()->flags | RW_COPIED_RO | RW_REALIZING);
rw->version = original->data()->version;
rw->firstSubclass = nil;
duplicate->setData(rw);
rw->ro = (class_ro_t *)
- _memdup_internal(original->data()->ro, sizeof(*original->data()->ro));
- *(char **)&rw->ro->name = _strdup_internal(name);
-
- if (original->data()->flags & RW_METHOD_ARRAY) {
- rw->method_lists = (method_list_t **)
- _memdup_internal(original->data()->method_lists,
- malloc_size(original->data()->method_lists));
- method_list_t **mlistp;
- for (mlistp = rw->method_lists; *mlistp; mlistp++) {
- *mlistp = (method_list_t *)
- _memdup_internal(*mlistp, method_list_size(*mlistp));
- }
- } else {
- if (original->data()->method_list) {
- rw->method_list = (method_list_t *)
- _memdup_internal(original->data()->method_list,
- method_list_size(original->data()->method_list));
- }
- }
+ memdup(original->data()->ro, sizeof(*original->data()->ro));
+ *(char **)&rw->ro->name = strdup(name);
+
+ rw->methods = original->data()->methods.duplicate();
// fixme dies when categories are added to the base
rw->properties = original->data()->properties;
duplicate->clearInfo(RW_REALIZING);
- rwlock_unlock_write(&runtimeLock);
-
return duplicate;
}
static void objc_initializeClassPair_internal(Class superclass, const char *name, Class cls, Class meta)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
class_ro_t *cls_ro_w, *meta_ro_w;
- cls->cache.setEmpty();
- meta->cache.setEmpty();
+ cls->cache.initializeToEmpty();
+ meta->cache.initializeToEmpty();
- cls->setData((class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1));
- meta->setData((class_rw_t *)_calloc_internal(sizeof(class_rw_t), 1));
- cls_ro_w = (class_ro_t *)_calloc_internal(sizeof(class_ro_t), 1);
- meta_ro_w = (class_ro_t *)_calloc_internal(sizeof(class_ro_t), 1);
+ cls->setData((class_rw_t *)calloc(sizeof(class_rw_t), 1));
+ meta->setData((class_rw_t *)calloc(sizeof(class_rw_t), 1));
+ cls_ro_w = (class_ro_t *)calloc(sizeof(class_ro_t), 1);
+ meta_ro_w = (class_ro_t *)calloc(sizeof(class_ro_t), 1);
cls->data()->ro = cls_ro_w;
meta->data()->ro = meta_ro_w;
meta->setInstanceSize(meta_ro_w->instanceStart);
}
- cls_ro_w->name = _strdup_internal(name);
- meta_ro_w->name = _strdup_internal(name);
+ cls_ro_w->name = strdup(name);
+ meta_ro_w->name = strdup(name);
cls_ro_w->ivarLayout = &UnsetLayout;
cls_ro_w->weakIvarLayout = &UnsetLayout;
**********************************************************************/
Class objc_initializeClassPair(Class superclass, const char *name, Class cls, Class meta)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
// Fail if the class name is in use.
// Fail if the superclass isn't kosher.
if (getClass(name) || !verifySuperclass(superclass, true/*rootOK*/)) {
- rwlock_unlock_write(&runtimeLock);
return nil;
}
objc_initializeClassPair_internal(superclass, name, cls, meta);
- rwlock_unlock_write(&runtimeLock);
return cls;
}
{
Class cls, meta;
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
// Fail if the class name is in use.
// Fail if the superclass isn't kosher.
if (getClass(name) || !verifySuperclass(superclass, true/*rootOK*/)) {
- rwlock_unlock_write(&runtimeLock);
return nil;
}
// fixme mangle the name if it looks swift-y?
objc_initializeClassPair_internal(superclass, name, cls, meta);
- rwlock_unlock_write(&runtimeLock);
-
return cls;
}
**********************************************************************/
void objc_registerClassPair(Class cls)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if ((cls->data()->flags & RW_CONSTRUCTED) ||
(cls->ISA()->data()->flags & RW_CONSTRUCTED))
{
_objc_inform("objc_registerClassPair: class '%s' was already "
"registered!", cls->data()->ro->name);
- rwlock_unlock_write(&runtimeLock);
return;
}
_objc_inform("objc_registerClassPair: class '%s' was not "
"allocated with objc_allocateClassPair!",
cls->data()->ro->name);
- rwlock_unlock_write(&runtimeLock);
return;
}
else if (ro_w->ivars == nil) {
// No local ivars. Use superclass's layouts.
ro_w->ivarLayout =
- _ustrdup_internal(supercls->data()->ro->ivarLayout);
+ ustrdupMaybeNil(supercls->data()->ro->ivarLayout);
}
else {
// Has local ivars. Build layouts based on superclass.
layout_bitmap_create(supercls->data()->ro->ivarLayout,
supercls->unalignedInstanceSize(),
cls->unalignedInstanceSize(), NO);
- uint32_t i;
- for (i = 0; i < ro_w->ivars->count; i++) {
- ivar_t *ivar = ivar_list_nth(ro_w->ivars, i);
- if (!ivar->offset) continue; // anonymous bitfield
+ for (const auto& ivar : *ro_w->ivars) {
+ if (!ivar.offset) continue; // anonymous bitfield
- layout_bitmap_set_ivar(bitmap, ivar->type, *ivar->offset);
+ layout_bitmap_set_ivar(bitmap, ivar.type, *ivar.offset);
}
ro_w->ivarLayout = layout_string_create(bitmap);
layout_bitmap_free(bitmap);
else if (ro_w->ivars == nil) {
// No local ivars. Use superclass's layout.
ro_w->weakIvarLayout =
- _ustrdup_internal(supercls->data()->ro->weakIvarLayout);
+ ustrdupMaybeNil(supercls->data()->ro->weakIvarLayout);
}
else {
// Has local ivars. Build layout based on superclass.
// No way to add weak ivars yet.
ro_w->weakIvarLayout =
- _ustrdup_internal(supercls->data()->ro->weakIvarLayout);
+ ustrdupMaybeNil(supercls->data()->ro->weakIvarLayout);
}
}
addNamedClass(cls, cls->data()->ro->name);
addRealizedClass(cls);
addRealizedMetaclass(cls->ISA());
-
- rwlock_unlock_write(&runtimeLock);
}
**********************************************************************/
Class objc_readClassPair(Class bits, const struct objc_image_info *info)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
// No info bits are significant yet.
(void)info;
const char *name = bits->mangledName();
bool rootOK = bits->data()->flags & RO_ROOT;
if (getClass(name) || !verifySuperclass(bits->superclass, rootOK)){
- rwlock_unlock_write(&runtimeLock);
return nil;
}
cls->nameForLogging(), bits, cls);
}
realizeClass(cls);
-
- rwlock_unlock_write(&runtimeLock);
return cls;
}
* Call this before free_class.
* Locking: runtimeLock must be held by the caller.
**********************************************************************/
-static void detach_class(Class cls, BOOL isMeta)
+static void detach_class(Class cls, bool isMeta)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
// categories not yet attached to this class
- category_list *cats;
- cats = unattachedCategoriesForClass(cls);
- if (cats) free(cats);
+ removeAllUnattachedCategoriesForClass(cls);
// superclass's subclass list
if (cls->isRealized()) {
**********************************************************************/
static void free_class(Class cls)
{
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
if (! cls->isRealized()) return;
- uint32_t i;
-
- if (cls->cache.canBeFreed()) {
- free(cls->cache.buckets());
- }
+ auto rw = cls->data();
+ auto ro = rw->ro;
- FOREACH_METHOD_LIST(mlist, cls, {
- for (i = 0; i < mlist->count; i++) {
- method_t *m = method_list_nth(mlist, i);
- try_free(m->types);
- }
- try_free(mlist);
- });
- if (cls->data()->flags & RW_METHOD_ARRAY) {
- try_free(cls->data()->method_lists);
- }
-
- const ivar_list_t *ilist = cls->data()->ro->ivars;
- if (ilist) {
- for (i = 0; i < ilist->count; i++) {
- const ivar_t *ivar = ivar_list_nth(ilist, i);
- try_free(ivar->offset);
- try_free(ivar->name);
- try_free(ivar->type);
- }
- try_free(ilist);
- }
+ cache_delete(cls);
- const protocol_list_t **plistp;
- for (plistp = cls->data()->protocols; plistp && *plistp; plistp++) {
- try_free(*plistp);
+ for (auto& meth : rw->methods) {
+ try_free(meth.types);
}
- try_free(cls->data()->protocols);
+ rw->methods.tryFree();
- const chained_property_list *proplist = cls->data()->properties;
- while (proplist) {
- for (i = 0; i < proplist->count; i++) {
- const property_t *prop = proplist->list+i;
- try_free(prop->name);
- try_free(prop->attributes);
- }
- {
- const chained_property_list *temp = proplist;
- proplist = proplist->next;
- try_free(temp);
+ const ivar_list_t *ivars = ro->ivars;
+ if (ivars) {
+ for (auto& ivar : *ivars) {
+ try_free(ivar.offset);
+ try_free(ivar.name);
+ try_free(ivar.type);
}
+ try_free(ivars);
+ }
+
+ for (auto& prop : rw->properties) {
+ try_free(prop.name);
+ try_free(prop.attributes);
}
+ rw->properties.tryFree();
+
+ rw->protocols.tryFree();
- try_free(cls->data()->ro->ivarLayout);
- try_free(cls->data()->ro->weakIvarLayout);
- try_free(cls->data()->ro->name);
- try_free(cls->data()->ro);
- try_free(cls->data());
+ try_free(ro->ivarLayout);
+ try_free(ro->weakIvarLayout);
+ try_free(ro->name);
+ try_free(ro);
+ try_free(rw);
try_free(cls);
}
void objc_disposeClassPair(Class cls)
{
- rwlock_write(&runtimeLock);
+ rwlock_writer_t lock(runtimeLock);
if (!(cls->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING)) ||
!(cls->ISA()->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING)))
_objc_inform("objc_disposeClassPair: class '%s' was not "
"allocated with objc_allocateClassPair!",
cls->data()->ro->name);
- rwlock_unlock_write(&runtimeLock);
return;
}
if (cls->isMetaClass()) {
_objc_inform("objc_disposeClassPair: class '%s' is a metaclass, "
"not a class!", cls->data()->ro->name);
- rwlock_unlock_write(&runtimeLock);
return;
}
detach_class(cls, NO);
free_class(cls->ISA());
free_class(cls);
-
- rwlock_unlock_write(&runtimeLock);
}
static __attribute__((always_inline))
id
-_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone)
+_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone,
+ bool cxxConstruct = true,
+ size_t *outAllocatedSize = nil)
{
if (!cls) return nil;
bool fast = cls->canAllocIndexed();
size_t size = cls->instanceSize(extraBytes);
+ if (outAllocatedSize) *outAllocatedSize = size;
id obj;
if (!UseGC && !zone && fast) {
#endif
if (zone) {
obj = (id)malloc_zone_calloc ((malloc_zone_t *)zone, 1, size);
- } else {
+ } else {
obj = (id)calloc(1, size);
}
if (!obj) return nil;
obj->initIsa(cls);
}
- if (hasCxxCtor) {
+ if (cxxConstruct && hasCxxCtor) {
obj = _objc_constructOrFree(obj, cls);
}
results, num_requested);
}
-static BOOL classOrSuperClassesUseARR(Class cls) {
+static bool classOrSuperClassesUseARR(Class cls) {
while (cls) {
if (_class_usesAutomaticRetainRelease(cls)) return true;
cls = cls->superclass;
static id
_object_copyFromZone(id oldObj, size_t extraBytes, void *zone)
{
- id obj;
- size_t size;
-
if (!oldObj) return nil;
if (oldObj->isTaggedPointer()) return oldObj;
- size = oldObj->ISA()->instanceSize(extraBytes);
-#if SUPPORT_GC
- if (UseGC) {
- obj = (id) auto_zone_allocate_object(gc_zone, size,
- AUTO_OBJECT_SCANNED, 0, 1);
- } else
-#endif
- if (zone) {
- obj = (id) malloc_zone_calloc((malloc_zone_t *)zone, size, 1);
- } else {
- obj = (id) calloc(1, size);
- }
+ // fixme this doesn't handle C++ ivars correctly (#4619414)
+
+ Class cls = oldObj->ISA();
+ size_t size;
+ id obj = _class_createInstanceFromZone(cls, extraBytes, zone, false, &size);
if (!obj) return nil;
- // fixme this doesn't handle C++ ivars correctly (#4619414)
- objc_memmove_collectable(obj, oldObj, size);
+ // Copy everything except the isa, which was already set above.
+ uint8_t *copyDst = (uint8_t *)obj + sizeof(Class);
+ uint8_t *copySrc = (uint8_t *)oldObj + sizeof(Class);
+ size_t copySize = size - sizeof(Class);
+#if SUPPORT_GC
+ objc_memmove_collectable(copyDst, copySrc, copySize);
+#else
+ memmove(copyDst, copySrc, copySize);
+#endif
#if SUPPORT_GC
if (UseGC)
gc_fixup_weakreferences(obj, oldObj);
else
#endif
- if (classOrSuperClassesUseARR(obj->ISA()))
+ if (classOrSuperClassesUseARR(cls))
arr_fixup_copied_references(obj, oldObj);
return obj;
{
Class oldSuper;
- rwlock_assert_writing(&runtimeLock);
+ runtimeLock.assertWriting();
assert(cls->isRealized());
assert(newSuper->isRealized());
addSubclass(newSuper->ISA(), cls->ISA());
// Flush subclass's method caches.
- // If subclass is not yet +initialized then its cache will be empty.
- // Otherwise this is very slow for sel-side caches.
- if (cls->isInitialized() || cls->ISA()->isInitialized()) {
- flushCaches(cls);
- }
+ flushCaches(cls);
return oldSuper;
}
Class class_setSuperclass(Class cls, Class newSuper)
{
- Class oldSuper;
-
- rwlock_write(&runtimeLock);
- oldSuper = setSuperclass(cls, newSuper);
- rwlock_unlock_write(&runtimeLock);
-
- return oldSuper;
+ rwlock_writer_t lock(runtimeLock);
+ return setSuperclass(cls, newSuper);
}
IMP getLoadMethod();
+ bool isFuture();
+
bool isConnected();
const char *mangledName() { return name; }
extern void _objc_insertMethods(Class cls, struct old_method_list *mlist, struct old_category *cat);
extern void _objc_removeMethods(Class cls, struct old_method_list *mlist);
extern void _objc_flush_caches (Class cls);
-extern BOOL _class_addProperties(Class cls, struct old_property_list *additions);
-extern BOOL _class_hasLoadMethod(Class cls);
-extern void change_class_references(Class imposter, Class original, Class copy, BOOL changeSuperRefs);
+extern bool _class_addProperties(Class cls, struct old_property_list *additions);
+extern bool _class_hasLoadMethod(Class cls);
+extern void change_class_references(Class imposter, Class original, Class copy, bool changeSuperRefs);
extern void flush_marked_caches(void);
-extern void set_superclass(Class cls, Class supercls, BOOL cls_is_new);
+extern void set_superclass(Class cls, Class supercls, bool cls_is_new);
extern void try_free(const void *p);
extern struct old_property *property_list_nth(const struct old_property_list *plist, uint32_t i);
extern struct old_property **copyPropertyList(struct old_property_list *plist, unsigned int *outCount);
-extern struct objc_method_description * lookup_protocol_method(struct old_protocol *proto, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod, BOOL recursive);
+extern struct objc_method_description * lookup_protocol_method(struct old_protocol *proto, SEL aSel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
// used by flush_caches outside objc-cache.m
extern void _cache_flush(Class cls);
static inline NXMapTable *pendingClassRefsMapTable(void);
static inline NXMapTable *pendingSubclassesMapTable(void);
static void pendClassInstallation(Class cls, const char *superName);
-static void pendClassReference(Class *ref, const char *className, BOOL isMeta);
+static void pendClassReference(Class *ref, const char *className, bool isMeta);
static void resolve_references_to_class(Class cls);
static void resolve_subclasses_of_class(Class cls);
static void really_connect_class(Class cls, Class supercls);
-static BOOL connect_class(Class cls);
-static void map_method_descs (struct objc_method_description_list * methods, BOOL copy);
+static bool connect_class(Class cls);
+static void map_method_descs (struct objc_method_description_list * methods, bool copy);
static void _objcTweakMethodListPointerForClass(Class cls);
static inline void _objc_add_category(Class cls, old_category *category, int version);
-static BOOL _objc_add_category_flush_caches(Class cls, old_category *category, int version);
+static bool _objc_add_category_flush_caches(Class cls, old_category *category, int version);
static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat);
static void resolve_categories_for_class(Class cls);
-static BOOL _objc_register_category(old_category *cat, int version);
+static bool _objc_register_category(old_category *cat, int version);
// Function called when a class is loaded from an image
// about 520 classes. Larger apps (like IB or WOB) have more like
// 800 classes. Some customers have massive quantities of classes.
// Foundation-only programs aren't likely to notice the ~6K loss.
- class_hash = NXCreateHashTableFromZone (classHashPrototype,
- 16,
- nil,
- _objc_internal_zone ());
+ class_hash = NXCreateHashTable(classHashPrototype, 16, nil);
_objc_debug_class_hash = class_hash;
}
Class cls;
int cnt, num;
- mutex_lock(&classLock);
- if (!class_hash) {
- mutex_unlock(&classLock);
- return 0;
- }
+ mutex_locker_t lock(classLock);
+ if (!class_hash) return 0;
+
num = NXCountHashTable(class_hash);
- if (nil == buffer) {
- mutex_unlock(&classLock);
- return num;
- }
+ if (nil == buffer) return num;
+
cnt = 0;
state = NXInitHashState(class_hash);
while (cnt < bufferLen &&
{
buffer[cnt++] = cls;
}
- mutex_unlock(&classLock);
+
return num;
}
Class *result;
unsigned int count;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
result = nil;
count = class_hash ? NXCountHashTable(class_hash) : 0;
}
result[count] = nil;
}
- mutex_unlock(&classLock);
if (outCount) *outCount = count;
return result;
NXMapState state;
Protocol **result;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
count = NXCountMapTable(protocol_map);
if (count == 0) {
- mutex_unlock(&classLock);
if (outCount) *outCount = 0;
return nil;
}
result[i++] = nil;
assert(i == count+1);
- mutex_unlock(&classLock);
-
if (outCount) *outCount = count;
return result;
}
{
if (!future_class_to_original_class_map) {
future_class_to_original_class_map =
- NXCreateMapTableFromZone (NXPtrValueMapPrototype, FUTURE_COUNT,
- _objc_internal_zone ());
+ NXCreateMapTable(NXPtrValueMapPrototype, FUTURE_COUNT);
original_class_to_future_class_map =
- NXCreateMapTableFromZone (NXPtrValueMapPrototype, FUTURE_COUNT,
- _objc_internal_zone ());
+ NXCreateMapTable(NXPtrValueMapPrototype, FUTURE_COUNT);
}
NXMapInsert (future_class_to_original_class_map,
// CF requests about 20 future classes, plus HIToolbox has one.
if (!future_class_hash) {
future_class_hash =
- NXCreateHashTableFromZone(classHashPrototype, FUTURE_COUNT,
- nil, _objc_internal_zone());
+ NXCreateHashTable(classHashPrototype, FUTURE_COUNT, nil);
}
- cls->name = _strdup_internal(name);
+ cls->name = strdup(name);
NXHashInsert(future_class_hash, cls);
if (PrintFuture) {
}
-/***********************************************************************
-* objc_setFutureClass.
-* Like objc_getFutureClass, but uses the provided memory block.
-* If the class already exists, a posing-like substitution is performed.
-* Not thread safe.
-**********************************************************************/
-void objc_setFutureClass(Class cls, const char *name)
+BOOL _class_isFutureClass(Class cls)
{
- Class oldcls;
- Class newcls = cls; // Not a real class!
-
- if ((oldcls = look_up_class(name, NO/*unconnected*/, NO/*classhandler*/))) {
- setOriginalClassForFutureClass(newcls, oldcls);
- // fixme hack
- memcpy(newcls, oldcls, sizeof(struct objc_class));
- newcls->info &= ~CLS_EXT;
-
- mutex_lock(&classLock);
- NXHashRemove(class_hash, oldcls);
- objc_removeRegisteredClass(oldcls);
- change_class_references(newcls, oldcls, nil, YES);
- NXHashInsert(class_hash, newcls);
- objc_addRegisteredClass(newcls);
- mutex_unlock(&classLock);
- } else {
- makeFutureClass(newcls, name);
- }
+ return cls && cls->isFuture();
}
-
-BOOL _class_isFutureClass(Class cls)
+bool objc_class::isFuture()
{
- return cls && future_class_hash && NXHashGet(future_class_hash, cls);
+ return future_class_hash && NXHashGet(future_class_hash, this);
}
**********************************************************************/
Protocol *objc_getProtocol(const char *name)
{
- Protocol *result;
+ mutex_locker_t lock(classLock);
if (!protocol_map) return nil;
- mutex_lock(&classLock);
- result = (Protocol *)NXMapGet(protocol_map, name);
- mutex_unlock(&classLock);
- return result;
+ return (Protocol *)NXMapGet(protocol_map, name);
}
* 3. classLoader callback
* 4. classHandler callback (optional)
**********************************************************************/
-Class look_up_class(const char *aClassName, BOOL includeUnconnected, BOOL includeClassHandler)
+Class look_up_class(const char *aClassName, bool includeUnconnected,
+ bool includeClassHandler)
{
- BOOL includeClassLoader = YES; // class loader cannot be skipped
+ bool includeClassLoader = YES; // class loader cannot be skipped
Class result = nil;
struct objc_class query;
if (!result && class_hash) {
// Check ordinary classes
- mutex_lock (&classLock);
+ mutex_locker_t lock(classLock);
result = (Class)NXHashGet(class_hash, &query);
- mutex_unlock (&classLock);
}
if (!result && includeUnconnected && unconnected_class_hash) {
// Check not-yet-connected classes
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
result = (Class)NXHashGet(unconnected_class_hash, &query);
- mutex_unlock(&classLock);
}
if (!result && includeClassLoader && _objc_classLoader) {
**********************************************************************/
bool objc_class::isConnected()
{
- bool result;
- mutex_lock(&classLock);
- result = NXHashMember(class_hash, this);
- mutex_unlock(&classLock);
- return result;
+ mutex_locker_t lock(classLock);
+ return NXHashMember(class_hash, this);
}
{
// Allocate table if needed
if (!pendingClassRefsMap) {
- pendingClassRefsMap =
- NXCreateMapTableFromZone(NXStrValueMapPrototype,
- 10, _objc_internal_zone ());
+ pendingClassRefsMap = NXCreateMapTable(NXStrValueMapPrototype, 10);
}
// Return table pointer
{
// Allocate table if needed
if (!pendingSubclassesMap) {
- pendingSubclassesMap =
- NXCreateMapTableFromZone(NXStrValueMapPrototype,
- 10, _objc_internal_zone ());
+ pendingSubclassesMap = NXCreateMapTable(NXStrValueMapPrototype, 10);
}
// Return table pointer
}
// Create entry referring to this class
- pending = (PendingSubclass *)_malloc_internal(sizeof(PendingSubclass));
+ pending = (PendingSubclass *)malloc(sizeof(PendingSubclass));
pending->subclass = cls;
// Link new entry into head of list of entries for this class
* pendClassReference
* Fix up a class ref when the class with the given name becomes connected.
**********************************************************************/
-static void pendClassReference(Class *ref, const char *className, BOOL isMeta)
+static void pendClassReference(Class *ref, const char *className, bool isMeta)
{
NXMapTable *table;
PendingClassRef *pending;
table = pendingClassRefsMapTable ();
// Create entry containing the class reference
- pending = (PendingClassRef *)_malloc_internal(sizeof(PendingClassRef));
+ pending = (PendingClassRef *)malloc(sizeof(PendingClassRef));
pending->ref = ref;
if (isMeta) {
pending->ref = (Class *)((uintptr_t)pending->ref | 1);
while (pending) {
PendingClassRef *next = pending->next;
if (pending->ref) {
- BOOL isMeta = ((uintptr_t)pending->ref & 1) ? YES : NO;
+ bool isMeta = (uintptr_t)pending->ref & 1;
Class *ref =
(Class *)((uintptr_t)pending->ref & ~(uintptr_t)1);
*ref = isMeta ? cls->ISA() : cls;
}
- _free_internal(pending);
+ free(pending);
pending = next;
}
while (pending) {
PendingSubclass *next = pending->next;
if (pending->subclass) connect_class(pending->subclass);
- _free_internal(pending);
+ free(pending);
pending = next;
}
}
if (UseGC && supercls &&
(cls->info & CLS_EXT) && (supercls->info & CLS_EXT))
{
- BOOL layoutChanged;
+ bool layoutChanged;
layout_bitmap ivarBitmap =
layout_bitmap_create(cls->ivar_layout,
cls->instance_size,
if (layoutChanged) {
layout_bitmap weakBitmap = {};
- BOOL weakLayoutChanged = NO;
+ bool weakLayoutChanged = NO;
if (cls->ext && cls->ext->weak_ivar_layout) {
// weak -> strong: strong bits should be cleared in weak layout
// Done!
cls->info |= CLS_CONNECTED;
- mutex_lock(&classLock);
-
- // Update hash tables.
- NXHashRemove(unconnected_class_hash, cls);
- oldCls = (Class)NXHashInsert(class_hash, cls);
- objc_addRegisteredClass(cls);
-
- // Delete unconnected_class_hash if it is now empty.
- if (NXCountHashTable(unconnected_class_hash) == 0) {
- NXFreeHashTable(unconnected_class_hash);
- unconnected_class_hash = nil;
- }
-
- // No duplicate classes allowed.
- // Duplicates should have been rejected by _objc_read_classes_from_image.
- assert(!oldCls);
-
- mutex_unlock(&classLock);
+ {
+ mutex_locker_t lock(classLock);
+
+ // Update hash tables.
+ NXHashRemove(unconnected_class_hash, cls);
+ oldCls = (Class)NXHashInsert(class_hash, cls);
+ objc_addRegisteredClass(cls);
+
+ // Delete unconnected_class_hash if it is now empty.
+ if (NXCountHashTable(unconnected_class_hash) == 0) {
+ NXFreeHashTable(unconnected_class_hash);
+ unconnected_class_hash = nil;
+ }
+
+ // No duplicate classes allowed.
+ // Duplicates should have been rejected by _objc_read_classes_from_image
+ assert(!oldCls);
+ }
// Fix up pended class refs to this class, if any
resolve_references_to_class(cls);
* Returns FALSE if cls could not be connected for some reason
* (missing superclass or still-unconnected superclass)
**********************************************************************/
-static BOOL connect_class(Class cls)
+static bool connect_class(Class cls)
{
if (cls->isConnected()) {
// This class is already connected to its superclass.
* installation.
* Returns YES if some method caches now need to be flushed.
**********************************************************************/
-static BOOL _objc_read_categories_from_image (header_info * hi)
+static bool _objc_read_categories_from_image (header_info * hi)
{
Module mods;
size_t midx;
- BOOL needFlush = NO;
+ bool needFlush = NO;
if (_objcHeaderIsReplacement(hi)) {
// Ignore any categories in this image
// If other Objective-C libraries are found, immediately resize
// class_hash, assuming that Foundation and AppKit are about
// to add lots of classes.
- mutex_lock(&classLock);
- if (hi->mhdr != libobjc_header && _NXHashCapacity(class_hash) < 1024) {
- _NXHashRehashToCapacity(class_hash, 1024);
+ {
+ mutex_locker_t lock(classLock);
+ if (hi->mhdr != libobjc_header && _NXHashCapacity(class_hash) < 1024) {
+ _NXHashRehashToCapacity(class_hash, 1024);
+ }
}
- mutex_unlock(&classLock);
// Major loop - process all modules in the image
mods = hi->mod_ptr;
for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1)
{
Class newCls, oldCls;
- BOOL rejected;
+ bool rejected;
// Locate the class description pointer
newCls = (Class)mods[midx].symtab->defs[index];
if (_class_hasLoadMethod(newCls)) {
newCls->ISA()->info |= CLS_HAS_LOAD_METHOD;
}
-
- // Install into unconnected_class_hash.
- mutex_lock(&classLock);
- if (future_class_hash) {
- Class futureCls = (Class)
- NXHashRemove(future_class_hash, newCls);
- if (futureCls) {
- // Another class structure for this class was already
- // prepared by objc_getFutureClass(). Use it instead.
- _free_internal((char *)futureCls->name);
- memcpy(futureCls, newCls, sizeof(objc_class));
- setOriginalClassForFutureClass(futureCls, newCls);
- newCls = futureCls;
-
- if (NXCountHashTable(future_class_hash) == 0) {
- NXFreeHashTable(future_class_hash);
- future_class_hash = nil;
+ // Install into unconnected_class_hash.
+ {
+ mutex_locker_t lock(classLock);
+
+ if (future_class_hash) {
+ Class futureCls = (Class)
+ NXHashRemove(future_class_hash, newCls);
+ if (futureCls) {
+ // Another class structure for this class was already
+ // prepared by objc_getFutureClass(). Use it instead.
+ free((char *)futureCls->name);
+ memcpy(futureCls, newCls, sizeof(objc_class));
+ setOriginalClassForFutureClass(futureCls, newCls);
+ newCls = futureCls;
+
+ if (NXCountHashTable(future_class_hash) == 0) {
+ NXFreeHashTable(future_class_hash);
+ future_class_hash = nil;
+ }
}
}
+
+ if (!unconnected_class_hash) {
+ unconnected_class_hash =
+ NXCreateHashTable(classHashPrototype, 128, nil);
+ }
+
+ if ((oldCls = (Class)NXHashGet(class_hash, newCls)) ||
+ (oldCls = (Class)NXHashGet(unconnected_class_hash, newCls)))
+ {
+ // Another class with this name exists. Complain and reject.
+ inform_duplicate(newCls->name, oldCls, newCls);
+ rejected = YES;
+ }
+ else {
+ NXHashInsert(unconnected_class_hash, newCls);
+ rejected = NO;
+ }
}
- if (!unconnected_class_hash) {
- unconnected_class_hash =
- NXCreateHashTableFromZone(classHashPrototype, 128,
- nil, _objc_internal_zone());
- }
-
- if ((oldCls = (Class)NXHashGet(class_hash, newCls)) ||
- (oldCls = (Class)NXHashGet(unconnected_class_hash, newCls)))
- {
- // Another class with this name exists. Complain and reject.
- inform_duplicate(newCls->name, oldCls, newCls);
- rejected = YES;
- }
- else {
- NXHashInsert(unconnected_class_hash, newCls);
- rejected = NO;
- }
-
- mutex_unlock(&classLock);
-
if (!rejected) {
// Attach pended categories for this class, if any
resolve_categories_for_class(newCls);
unsigned int index;
unsigned int midx;
Module mods;
- BOOL replacement = _objcHeaderIsReplacement(hi);
+ bool replacement = _objcHeaderIsReplacement(hi);
// Major loop - process all modules in the image
mods = hi->mod_ptr;
{
Class cls = (Class)mods[midx].symtab->defs[index];
if (! replacement) {
- BOOL connected;
+ bool connected;
Class futureCls = getFutureClassForOriginalClass(cls);
if (futureCls) {
// objc_getFutureClass() requested a different class
* not yet exist, the reference is added to a list of pending references
* to be fixed up at a later date.
**********************************************************************/
-static void fix_class_ref(Class *ref, const char *name, BOOL isMeta)
+static void fix_class_ref(Class *ref, const char *name, bool isMeta)
{
Class cls;
* can still be used after the bundle's data segment is unmapped.
* Returns YES if dst was written to, NO if it was unchanged.
**********************************************************************/
-static inline void map_selrefs(SEL *sels, size_t count, BOOL copy)
+static inline void map_selrefs(SEL *sels, size_t count, bool copy)
{
size_t index;
* for registering selectors from unloadable bundles, so the selector
* can still be used after the bundle's data segment is unmapped.
**********************************************************************/
-static void map_method_descs (struct objc_method_description_list * methods, BOOL copy)
+static void map_method_descs (struct objc_method_description_list * methods, bool copy)
{
int index;
**********************************************************************/
struct objc_method_description *
lookup_protocol_method(old_protocol *proto, SEL aSel,
- BOOL isRequiredMethod, BOOL isInstanceMethod,
- BOOL recursive)
+ bool isRequiredMethod, bool isInstanceMethod,
+ bool recursive)
{
struct objc_method_description *m = nil;
old_protocol_ext *ext;
{
Class cls = objc_getClass("__IncompleteProtocol");
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
- if (NXMapGet(protocol_map, name)) {
- mutex_unlock(&classLock);
- return nil;
- }
+ if (NXMapGet(protocol_map, name)) return nil;
old_protocol *result = (old_protocol *)
- _calloc_internal(1, sizeof(old_protocol)
+ calloc(1, sizeof(old_protocol)
+ sizeof(old_protocol_ext));
old_protocol_ext *ext = (old_protocol_ext *)(result+1);
result->isa = cls;
- result->protocol_name = _strdup_internal(name);
+ result->protocol_name = strdup(name);
ext->size = sizeof(old_protocol_ext);
// fixme reserve name without installing
NXMapInsert(protocol_ext_map, result, result+1);
- mutex_unlock(&classLock);
-
return (Protocol *)result;
}
Class oldcls = objc_getClass("__IncompleteProtocol");
Class cls = objc_getClass("Protocol");
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
if (proto->isa == cls) {
_objc_inform("objc_registerProtocol: protocol '%s' was already "
"registered!", proto->protocol_name);
- mutex_unlock(&classLock);
return;
}
if (proto->isa != oldcls) {
_objc_inform("objc_registerProtocol: protocol '%s' was not allocated "
"with objc_allocateProtocol!", proto->protocol_name);
- mutex_unlock(&classLock);
return;
}
proto->isa = cls;
NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto);
-
- mutex_unlock(&classLock);
}
if (!proto_gen) return;
if (!addition_gen) return;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
if (proto->isa != cls) {
_objc_inform("protocol_addProtocol: modified protocol '%s' is not "
"under construction!", proto->protocol_name);
- mutex_unlock(&classLock);
return;
}
if (addition->isa == cls) {
_objc_inform("protocol_addProtocol: added protocol '%s' is still "
"under construction!", addition->protocol_name);
- mutex_unlock(&classLock);
return;
}
size_t size = sizeof(old_protocol_list)
+ protolist->count * sizeof(protolist->list[0]);
protolist = (old_protocol_list *)
- _realloc_internal(protolist, size);
+ realloc(protolist, size);
} else {
protolist = (old_protocol_list *)
- _calloc_internal(1, sizeof(old_protocol_list));
+ calloc(1, sizeof(old_protocol_list));
}
protolist->list[protolist->count++] = addition;
proto->protocol_list = protolist;
-
- mutex_unlock(&classLock);
}
{
if (!*list) {
*list = (struct objc_method_description_list *)
- _calloc_internal(sizeof(struct objc_method_description_list), 1);
+ calloc(sizeof(struct objc_method_description_list), 1);
} else {
size_t size = sizeof(struct objc_method_description_list)
+ (*list)->count * sizeof(struct objc_method_description);
*list = (struct objc_method_description_list *)
- _realloc_internal(*list, size);
+ realloc(*list, size);
}
struct objc_method_description *desc = &(*list)->list[(*list)->count++];
desc->name = name;
- desc->types = _strdup_internal(types ?: "");
+ desc->types = strdup(types ?: "");
}
void
if (!proto_gen) return;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
if (proto->isa != cls) {
_objc_inform("protocol_addMethodDescription: protocol '%s' is not "
"under construction!", proto->protocol_name);
- mutex_unlock(&classLock);
return;
}
old_protocol_ext *ext = (old_protocol_ext *)(proto+1);
_protocol_addMethod(&ext->optional_class_methods, name, types);
}
-
- mutex_unlock(&classLock);
}
{
if (!*plist) {
*plist = (old_property_list *)
- _calloc_internal(sizeof(old_property_list), 1);
+ calloc(sizeof(old_property_list), 1);
(*plist)->entsize = sizeof(old_property);
} else {
*plist = (old_property_list *)
- _realloc_internal(*plist, sizeof(old_property_list)
+ realloc(*plist, sizeof(old_property_list)
+ (*plist)->count * (*plist)->entsize);
}
old_property *prop = property_list_nth(*plist, (*plist)->count++);
- prop->name = _strdup_internal(name);
+ prop->name = strdup(name);
prop->attributes = copyPropertyAttributeString(attrs, count);
}
if (!proto) return;
if (!name) return;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
if (proto->isa != cls) {
_objc_inform("protocol_addProperty: protocol '%s' is not "
"under construction!", proto->protocol_name);
- mutex_unlock(&classLock);
return;
}
//} else /* !isRequiredProperty && !isInstanceProperty) */ {
// _protocol_addProperty(&ext->optional_class_properties, name, attrs, count);
//}
-
- mutex_unlock(&classLock);
}
* specified image, selectorize the method names and add to the protocol hash.
**********************************************************************/
-static BOOL versionIsExt(uintptr_t version, const char *names, size_t size)
+static bool versionIsExt(uintptr_t version, const char *names, size_t size)
{
// CodeWarrior used isa field for string "Protocol"
// from section __OBJC,__class_names. rdar://4951638
}
static void fix_protocol(old_protocol *proto, Class protocolClass,
- BOOL isBundle, const char *names, size_t names_size)
+ bool isBundle, const char *names, size_t names_size)
{
uintptr_t version;
if (!proto) return;
const char *names;
size_t names_size;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Allocate the protocol registry if necessary.
if (!protocol_map) {
protocol_map =
- NXCreateMapTableFromZone(NXStrValueMapPrototype, 32,
- _objc_internal_zone());
+ NXCreateMapTable(NXStrValueMapPrototype, 32);
}
if (!protocol_ext_map) {
protocol_ext_map =
- NXCreateMapTableFromZone(NXPtrValueMapPrototype, 32,
- _objc_internal_zone());
+ NXCreateMapTable(NXPtrValueMapPrototype, 32);
}
protos = _getObjcProtocols(hi, &count);
for (i = 0; i < count; i++) {
fix_protocol(protos[i], protocolClass, isBundle, names, names_size);
}
-
- mutex_unlock(&classLock);
}
size_t count;
SEL *sels;
+ bool preoptimized = hi->isPreoptimized();
+# if SUPPORT_IGNORED_SELECTOR_CONSTANT
+ // shared cache can't fix constant ignored selectors
+ if (UseGC) preoptimized = NO;
+# endif
+
if (PrintPreopt) {
- if (sel_preoptimizationValid(hi)) {
+ if (preoptimized) {
_objc_inform("PREOPTIMIZATION: honoring preoptimized selectors in %s",
hi->fname);
}
}
}
- if (sel_preoptimizationValid(hi)) return;
+ if (preoptimized) return;
sels = _getObjcSelectorRefs (hi, &count);
map_selrefs(sels, count, headerIsBundle(hi));
}
-static inline BOOL _is_threaded() {
+static inline bool _is_threaded() {
#if TARGET_OS_WIN32
return YES;
#else
void
unmap_image(const struct mach_header *mh, intptr_t vmaddr_slide)
{
- recursive_mutex_lock(&loadMethodLock);
+ recursive_mutex_locker_t lock(loadMethodLock);
unmap_image_nolock(mh);
- recursive_mutex_unlock(&loadMethodLock);
}
* Calls ABI-agnostic code after taking ABI-specific locks.
**********************************************************************/
const char *
-map_images(enum dyld_image_states state, uint32_t infoCount,
- const struct dyld_image_info infoList[])
+map_2_images(enum dyld_image_states state, uint32_t infoCount,
+ const struct dyld_image_info infoList[])
{
- const char *err;
-
- recursive_mutex_lock(&loadMethodLock);
- err = map_images_nolock(state, infoCount, infoList);
- recursive_mutex_unlock(&loadMethodLock);
-
- return err;
+ recursive_mutex_locker_t lock(loadMethodLock);
+ return map_images_nolock(state, infoCount, infoList);
}
load_images(enum dyld_image_states state, uint32_t infoCount,
const struct dyld_image_info infoList[])
{
- BOOL found;
+ bool found;
- recursive_mutex_lock(&loadMethodLock);
+ recursive_mutex_locker_t lock(loadMethodLock);
// Discover +load methods
found = load_images_nolock(state, infoCount, infoList);
call_load_methods();
}
- recursive_mutex_unlock(&loadMethodLock);
-
return nil;
}
#endif
void _read_images(header_info **hList, uint32_t hCount)
{
uint32_t i;
- BOOL categoriesLoaded = NO;
+ bool categoriesLoaded = NO;
if (!class_hash) _objc_init_class_hash();
// But not if any other threads are running - they might
// call a category method before the fixups below are complete.
if (!_is_threaded()) {
- BOOL needFlush = NO;
+ bool needFlush = NO;
for (i = 0; i < hCount; i++) {
needFlush |= _objc_read_categories_from_image(hList[i]);
}
// But not if this is the only thread - it's more
// efficient to attach categories earlier if safe.
if (!categoriesLoaded) {
- BOOL needFlush = NO;
+ bool needFlush = NO;
for (i = 0; i < hCount; i++) {
needFlush |= _objc_read_categories_from_image(hList[i]);
}
cls->info |= CLS_LOADED;
}
-void prepare_load_methods(header_info *hi)
+bool hasLoadMethods(const headerType *mhdr)
+{
+ return true;
+}
+
+void prepare_load_methods(const headerType *mhdr)
{
Module mods;
unsigned int midx;
-
+
+ header_info *hi;
+ for (hi = FirstHeader; hi; hi = hi->next) {
+ if (mhdr == hi->mhdr) break;
+ }
+ if (!hi) return;
if (_objcHeaderIsReplacement(hi)) {
// Ignore any classes in this image
// Process each class ref
for (i = 0; i < count; i++) {
if ((uintptr_t)(refs[i]) >= start && (uintptr_t)(refs[i]) < end) {
- pendClassReference(&refs[i], refs[i]->name,
- (refs[i]->info & CLS_META) ? YES : NO);
+ pendClassReference(&refs[i], refs[i]->name,
+ refs[i]->info & CLS_META);
refs[i] = nil;
}
}
unsigned int midx;
Module mods;
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Major loop - process all modules in the image
mods = hi->mod_ptr;
other_refs = _getObjcClassRefs(other_hi, &count);
rependClassReferences(other_refs, count, seg, seg+seg_size);
}
-
- mutex_unlock(&classLock);
}
_objc_inform("UNLOAD DEBUG: unloading image '%s' [%p..%p]",
hi->fname, (void *)seg, (void*)(seg+seg_size));
- mutex_lock(&classLock);
+ mutex_locker_t lock(classLock);
// Make sure the image contains no categories on surviving classes.
{
}
}
}
-
- mutex_unlock(&classLock);
}
**********************************************************************/
void _unload_image(header_info *hi)
{
- recursive_mutex_assert_locked(&loadMethodLock);
+ loadMethodLock.assertLocked();
// Cleanup:
// Remove image's classes from the class list and free auxiliary data.
OBJC_WARN_DEPRECATED;
// Synchronize access to hash table
- mutex_lock (&classLock);
+ mutex_locker_t lock(classLock);
// Make sure both the class and the metaclass have caches!
// Clear all bits of the info fields except CLS_CLASS and CLS_META.
cls->superclass->clearInfo(CLS_LEAF);
cls->superclass->ISA()->clearInfo(CLS_LEAF);
}
-
- // Desynchronize
- mutex_unlock (&classLock);
}
/***********************************************************************
// Allocate and zero a method list array
mallocSize = sizeof(old_method_list *) * initialEntries;
- ptr = (old_method_list **) _calloc_internal(1, mallocSize);
+ ptr = (old_method_list **) calloc(1, mallocSize);
// Insert the existing list into the array
ptr[initialEntries - 1] = END_OF_METHODS_LIST;
newSize = oldSize + sizeof(old_method_list *); // only increase by 1
// Grow the method list array by one.
- // This block may be from user code; don't use _realloc_internal
*list = (old_method_list **)realloc(*list, newSize);
// Zero out addition part of new array
* methods into the class it augments, and flush the class' method cache.
* Return YES if some method caches now need to be flushed.
**********************************************************************/
-static BOOL _objc_add_category_flush_caches(Class cls, old_category *category, int version)
+static bool _objc_add_category_flush_caches(Class cls, old_category *category, int version)
{
- BOOL needFlush = NO;
+ bool needFlush = NO;
// Install the category's methods into its intended class
- mutex_lock(&methodListLock);
- _objc_add_category (cls, category, version);
- mutex_unlock(&methodListLock);
+ {
+ mutex_locker_t lock(methodListLock);
+ _objc_add_category (cls, category, version);
+ }
// Queue for cache flushing so category's methods can get called
if (category->instance_methods) {
// Delink and reclaim this registration
next = pending->next;
- _free_internal(pending);
+ free(pending);
pending = next;
}
}
* they were discovered.
* Returns YES if some method caches now need to be flushed.
**********************************************************************/
-static BOOL _objc_register_category(old_category *cat, int version)
+static bool _objc_register_category(old_category *cat, int version)
{
_objc_unresolved_category * new_cat;
_objc_unresolved_category * old;
// Create category lookup table if needed
if (!category_hash)
- category_hash = NXCreateMapTableFromZone (NXStrValueMapPrototype,
- 128,
- _objc_internal_zone ());
+ category_hash = NXCreateMapTable(NXStrValueMapPrototype, 128);
// Locate an existing list of categories, if any, for the class.
old = (_objc_unresolved_category *)
// The category list is built backwards, and is reversed again
// by resolve_categories_for_class().
new_cat = (_objc_unresolved_category *)
- _malloc_internal(sizeof(_objc_unresolved_category));
+ malloc(sizeof(_objc_unresolved_category));
new_cat->next = old;
new_cat->cat = cat;
new_cat->version = version;
/***********************************************************************
* Lock management
**********************************************************************/
-rwlock_t selLock = {};
-mutex_t classLock = MUTEX_INITIALIZER;
-mutex_t methodListLock = MUTEX_INITIALIZER;
-mutex_t cacheUpdateLock = MUTEX_INITIALIZER;
-recursive_mutex_t loadMethodLock = RECURSIVE_MUTEX_INITIALIZER;
+rwlock_t selLock;
+mutex_t classLock;
+mutex_t methodListLock;
+mutex_t cacheUpdateLock;
+recursive_mutex_t loadMethodLock;
void lock_init(void)
{
- rwlock_init(&selLock);
- recursive_mutex_init(&loadMethodLock);
}
header_info *LastHeader = 0; // NULL means invalid; recompute it
int HeaderCount = 0;
-uint32_t AppSDKVersion = 0;
-
/***********************************************************************
* objc_getClass. Return the id of the named class. If the class does
bool PrintHelp = false;
bool PrintOptions = false;
+ bool maybeMallocDebugging = false;
// Scan environ[] directly instead of calling getenv() a lot.
// This optimizes the case where none are set.
for (char **p = *_NSGetEnviron(); *p != nil; p++) {
+ if (0 == strncmp(*p, "Malloc", 6) || 0 == strncmp(*p, "DYLD", 4) ||
+ 0 == strncmp(*p, "NSZombiesEnabled", 16))
+ {
+ maybeMallocDebugging = true;
+ }
+
if (0 != strncmp(*p, "OBJC_", 5)) continue;
if (0 == strncmp(*p, "OBJC_HELP=", 10)) {
}
}
+ // Special case: enable some autorelease pool debugging
+ // when some malloc debugging is enabled
+ // and OBJC_DEBUG_POOL_ALLOCATION is not set to something other than NO.
+ if (maybeMallocDebugging) {
+ const char *insert = getenv("DYLD_INSERT_LIBRARIES");
+ const char *zombie = getenv("NSZombiesEnabled");
+ const char *pooldebug = getenv("OBJC_DEBUG_POOL_ALLOCATION");
+ if ((getenv("MallocStackLogging")
+ || getenv("MallocStackLoggingNoCompact")
+ || (zombie && (*zombie == 'Y' || *zombie == 'y'))
+ || (insert && strstr(insert, "libgmalloc")))
+ &&
+ (!pooldebug || 0 == strcmp(pooldebug, "YES")))
+ {
+ DebugPoolAllocation = true;
+ }
+ }
+
// Print OBJC_HELP and OBJC_PRINT_OPTIONS output.
if (PrintHelp || PrintOptions) {
if (PrintHelp) {
**********************************************************************/
void
logReplacedMethod(const char *className, SEL s,
- BOOL isMeta, const char *catName,
+ bool isMeta, const char *catName,
IMP oldImp, IMP newImp)
{
const char *oldImage = "??";
* If the data doesn't exist yet and create is NO, return NULL.
* If the data doesn't exist yet and create is YES, allocate and return it.
**********************************************************************/
-_objc_pthread_data *_objc_fetch_pthread_data(BOOL create)
+_objc_pthread_data *_objc_fetch_pthread_data(bool create)
{
_objc_pthread_data *data;
data = (_objc_pthread_data *)tls_get(_objc_pthread_key);
if (!data && create) {
data = (_objc_pthread_data *)
- _calloc_internal(1, sizeof(_objc_pthread_data));
+ calloc(1, sizeof(_objc_pthread_data));
tls_set(_objc_pthread_key, data);
}
// add further cleanup here...
- _free_internal(data);
+ free(data);
}
}
DWORD charactersCopied;
Class origCls;
HMODULE classModule;
- BOOL res;
+ bool res;
#endif
if (!cls) return NULL;
#if SUPPORT_GC
id objc_getAssociatedObject_gc(id object, const void *key) {
+ // auto_zone doesn't handle tagged pointer objects. Track it ourselves.
+ if (object->isTaggedPointer()) return objc_getAssociatedObject_non_gc(object, key);
+
return (id)auto_zone_get_associative_ref(gc_zone, object, (void *)key);
}
void objc_setAssociatedObject_gc(id object, const void *key, id value, objc_AssociationPolicy policy) {
+ // auto_zone doesn't handle tagged pointer objects. Track it ourselves.
+ if (object->isTaggedPointer()) return objc_setAssociatedObject_non_gc(object, key, value, policy);
+
if ((policy & OBJC_ASSOCIATION_COPY_NONATOMIC) == OBJC_ASSOCIATION_COPY_NONATOMIC) {
value = ((id(*)(id, SEL))objc_msgSend)(value, SEL_copy);
}
BOOL sel_isMapped(SEL name)
{
- SEL result;
+ SEL sel;
if (!name) return NO;
#if SUPPORT_IGNORED_SELECTOR_CONSTANT
if ((uintptr_t)name == kIgnore) return YES;
#endif
- result = _objc_search_builtins((const char *)name);
- if (result) return YES;
+ sel = _objc_search_builtins((const char *)name);
+ if (sel) return YES;
- rwlock_read(&selLock);
+ rwlock_reader_t lock(selLock);
if (_objc_selectors) {
- result = __objc_sel_set_get(_objc_selectors, name);
+ sel = __objc_sel_set_get(_objc_selectors, name);
}
- rwlock_unlock_read(&selLock);
- return result ? YES : NO;
+ return bool(sel);
}
static SEL __sel_registerName(const char *name, int lock, int copy)
{
SEL result = 0;
- if (lock) rwlock_assert_unlocked(&selLock);
- else rwlock_assert_writing(&selLock);
+ if (lock) selLock.assertUnlocked();
+ else selLock.assertWriting();
if (!name) return (SEL)0;
result = _objc_search_builtins(name);
if (result) return result;
- if (lock) rwlock_read(&selLock);
+ if (lock) selLock.read();
if (_objc_selectors) {
result = __objc_sel_set_get(_objc_selectors, (SEL)name);
}
- if (lock) rwlock_unlock_read(&selLock);
+ if (lock) selLock.unlockRead();
if (result) return result;
// No match. Insert.
- if (lock) rwlock_write(&selLock);
+ if (lock) selLock.write();
if (!_objc_selectors) {
_objc_selectors = __objc_sel_set_create(SelrefCount);
result = __objc_sel_set_get(_objc_selectors, (SEL)name);
}
if (!result) {
- result = (SEL)(copy ? _strdup_internal(name) : name);
+ result = (SEL)(copy ? strdup(name) : name);
__objc_sel_set_add(_objc_selectors, result);
#if defined(DUMP_UNKNOWN_SELECTORS)
printf("\t\"%s\",\n", name);
#endif
}
- if (lock) rwlock_unlock_write(&selLock);
+ if (lock) selLock.unlockWrite();
return result;
}
return __sel_registerName(name, 1, 1); // YES lock, YES copy
}
-SEL sel_registerNameNoLock(const char *name, BOOL copy) {
+SEL sel_registerNameNoLock(const char *name, bool copy) {
return __sel_registerName(name, 0, copy); // NO lock, maybe copy
}
void sel_lock(void)
{
- rwlock_write(&selLock);
+ selLock.write();
}
void sel_unlock(void)
{
- rwlock_unlock_write(&selLock);
+ selLock.unlockWrite();
}
BOOL sel_isEqual(SEL lhs, SEL rhs)
{
- return (lhs == rhs) ? YES : NO;
-}
-
-
-/***********************************************************************
-* sel_preoptimizationValid
-* Return YES if this image's selector fixups are valid courtesy
-* of the dyld shared cache.
-**********************************************************************/
-BOOL sel_preoptimizationValid(const header_info *hi)
-{
-#if !SUPPORT_PREOPT
-
- return NO;
-
-#else
-
-# if SUPPORT_IGNORED_SELECTOR_CONSTANT
- // shared cache can't fix constant ignored selectors
- if (UseGC) return NO;
-# endif
-
- // preoptimization disabled for some reason
- if (!isPreoptimized()) return NO;
-
- // image not from shared cache, or not fixed inside shared cache
- if (!_objcHeaderOptimizedByDyld(hi)) return NO;
-
- return YES;
-
-#endif
+ return bool(lhs == rhs);
}
* sel_init
* Initialize selector tables and register selectors used internally.
**********************************************************************/
-void sel_init(BOOL wantsGC, size_t selrefCount)
+void sel_init(bool wantsGC, size_t selrefCount)
{
// save this value for later
SelrefCount = selrefCount;
uint32_t idx;
struct __objc_sel_set *sset = (struct __objc_sel_set *)
- _malloc_internal(sizeof(struct __objc_sel_set));
+ malloc(sizeof(struct __objc_sel_set));
if (!sset) _objc_fatal("objc_sel_set failure");
sset->_count = 0;
if (SIZE <= idx) _objc_fatal("objc_sel_set failure");
sset->_capacity = __objc_sel_set_capacities[idx];
sset->_bucketsNum = __objc_sel_set_buckets[idx];
- sset->_buckets = (SEL *)_calloc_internal(sset->_bucketsNum, sizeof(SEL));
+ sset->_buckets = (SEL *)calloc(sset->_bucketsNum, sizeof(SEL));
if (!sset->_buckets) _objc_fatal("objc_sel_set failure");
return sset;
}
sset->_capacity = __objc_sel_set_capacities[idx];
sset->_bucketsNum = __objc_sel_set_buckets[idx];
sset->_buckets = (SEL *)
- _calloc_internal(sset->_bucketsNum, sizeof(SEL));
+ calloc(sset->_bucketsNum, sizeof(SEL));
if (!sset->_buckets) _objc_fatal("objc_sel_set failure");
for (idx = 0; idx < oldnbuckets; idx++) {
SEL currentSel = oldbuckets[idx];
sset->_buckets[nomatch] = currentSel;
}
}
- _free_internal(oldbuckets);
+ free(oldbuckets);
}
{
uint32_t nomatch = __objc_sel_set_findBuckets(sset, value).nomatch;
+#include <TargetConditionals.h>
#include <mach/vm_param.h>
+
+#if __LP64__
+# define PTR(x) .quad x
+#else
+# define PTR(x) .long x
+#endif
+
.section __TEXT,__objc_opt_ro
.align 3
.private_extern __objc_opt_data
__objc_opt_data:
-.long 12 /* table.version */
+.long 13 /* table.version */
.long 0 /* table.selopt_offset */
.long 0 /* table.headeropt_offset */
.long 0 /* table.clsopt_offset */
.space PAGE_MAX_SIZE-16
-/* space for selopt, smax/capacity=262144, blen/mask=131071+1 */
-.space 131072 /* mask tab */
-.space 262144 /* checkbytes */
-.space 262144*4 /* offsets */
+/* space for selopt, smax/capacity=262144, blen/mask=262143+1 */
+.space 262144 /* mask tab */
+.space 524288 /* checkbytes */
+.space 524288*4 /* offsets */
/* space for clsopt, smax/capacity=32768, blen/mask=16383+1 */
.space 16384 /* mask tab */
-.space 32768 /* checkbytes */
-.space 32768*12 /* offsets to name and class and header_info */
-.space PAGE_MAX_SIZE /* some duplicate classes */
+.space 32768 /* checkbytes */
+.space 32768*12 /* offsets to name and class and header_info */
+.space PAGE_MAX_SIZE /* some duplicate classes */
+
+/* space for protocolopt, smax/capacity=8192, blen/mask=4095+1 */
+.space 4096 /* mask tab */
+.space 8192 /* checkbytes */
+.space 8192*4 /* offsets */
.section __DATA,__objc_opt_rw
__objc_opt_rw_data:
/* space for header_info structures */
.space 32768
+
+/* space for 8192 protocols */
+#if __LP64__
+.space 8192 * 11 * 8
+#else
+.space 8192 * 11 * 4
+#endif
+
+
+/* section of pointers that the shared cache optimizer wants to know about */
+.section __DATA,__objc_opt_ptrs
+.align 3
+
+#if TARGET_OS_MAC && !TARGET_OS_IPHONE && __i386__
+// old ABI
+.globl .objc_class_name_Protocol
+PTR(.objc_class_name_Protocol)
+#else
+// new ABI
+.globl _OBJC_CLASS_$_Protocol
+PTR(_OBJC_CLASS_$_Protocol)
+#endif
* sel_init
* Initialize selector tables and register selectors used internally.
**********************************************************************/
-void sel_init(BOOL wantsGC, size_t selrefCount)
+void sel_init(bool wantsGC, size_t selrefCount)
{
// save this value for later
SelrefCount = selrefCount;
static SEL sel_alloc(const char *name, bool copy)
{
- rwlock_assert_writing(&selLock);
- return (SEL)(copy ? _strdup_internal(name) : name);
+ selLock.assertWriting();
+ return (SEL)(copy ? strdup(name) : name);
}
if (sel == search_builtins(name)) return YES;
- bool result = false;
- rwlock_read(&selLock);
- if (namedSelectors) result = (sel == (SEL)NXMapGet(namedSelectors, name));
- rwlock_unlock_read(&selLock);
-
- return result;
+ rwlock_reader_t lock(selLock);
+ if (namedSelectors) {
+ return (sel == (SEL)NXMapGet(namedSelectors, name));
+ }
+ return false;
}
{
SEL result = 0;
- if (lock) rwlock_assert_unlocked(&selLock);
- else rwlock_assert_writing(&selLock);
+ if (lock) selLock.assertUnlocked();
+ else selLock.assertWriting();
if (!name) return (SEL)0;
result = search_builtins(name);
if (result) return result;
- if (lock) rwlock_read(&selLock);
+ if (lock) selLock.read();
if (namedSelectors) {
result = (SEL)NXMapGet(namedSelectors, name);
}
- if (lock) rwlock_unlock_read(&selLock);
+ if (lock) selLock.unlockRead();
if (result) return result;
// No match. Insert.
- if (lock) rwlock_write(&selLock);
+ if (lock) selLock.write();
if (!namedSelectors) {
namedSelectors = NXCreateMapTable(NXStrValueMapPrototype,
NXMapInsert(namedSelectors, sel_getName(result), result);
}
- if (lock) rwlock_unlock_write(&selLock);
+ if (lock) selLock.unlockWrite();
return result;
}
return __sel_registerName(name, 1, 1); // YES lock, YES copy
}
-SEL sel_registerNameNoLock(const char *name, BOOL copy) {
+SEL sel_registerNameNoLock(const char *name, bool copy) {
return __sel_registerName(name, 0, copy); // NO lock, maybe copy
}
void sel_lock(void)
{
- rwlock_write(&selLock);
+ selLock.write();
}
void sel_unlock(void)
{
- rwlock_unlock_write(&selLock);
+ selLock.unlockWrite();
}
BOOL sel_isEqual(SEL lhs, SEL rhs)
{
- return (lhs == rhs) ? YES : NO;
-}
-
-
-/***********************************************************************
-* sel_preoptimizationValid
-* Return YES if this image's selector fixups are valid courtesy
-* of the dyld shared cache.
-**********************************************************************/
-BOOL sel_preoptimizationValid(const header_info *hi)
-{
-#if !SUPPORT_PREOPT
-
- return NO;
-
-#else
-
- // preoptimization disabled for some reason
- if (!isPreoptimized()) return NO;
-
- // image not from shared cache, or not fixed inside shared cache
- if (!_objcHeaderOptimizedByDyld(hi)) return NO;
-
- return YES;
-
-#endif
+ return bool(lhs == rhs);
}
typedef struct SyncData {
struct SyncData* nextData;
- id object;
- int threadCount; // number of THREADS using this block
- recursive_mutex_t mutex;
+ DisguisedPtr<objc_object> object;
+ int32_t threadCount; // number of THREADS using this block
+ recursive_mutex_t mutex;
} SyncData;
typedef struct {
SYNC_COUNT_DIRECT_KEY == SyncCacheItem.lockCount
*/
-typedef struct {
+struct SyncList {
SyncData *data;
spinlock_t lock;
- char align[64 - sizeof (spinlock_t) - sizeof (SyncData *)];
-} SyncList __attribute__((aligned(64)));
-// aligned to put locks on separate cache lines
+ SyncList() : data(nil) { }
+};
// Use multiple parallel lists to decrease contention among unrelated objects.
-#define COUNT 16
-#define HASH(obj) ((((uintptr_t)(obj)) >> 5) & (COUNT - 1))
-#define LOCK_FOR_OBJ(obj) sDataLists[HASH(obj)].lock
-#define LIST_FOR_OBJ(obj) sDataLists[HASH(obj)].data
-static SyncList sDataLists[COUNT];
+#define LOCK_FOR_OBJ(obj) sDataLists[obj].lock
+#define LIST_FOR_OBJ(obj) sDataLists[obj].data
+static StripedMap<SyncList> sDataLists;
enum usage { ACQUIRE, RELEASE, CHECK };
-static SyncCache *fetch_cache(BOOL create)
+static SyncCache *fetch_cache(bool create)
{
_objc_pthread_data *data;
#if SUPPORT_DIRECT_THREAD_KEYS
// Check per-thread single-entry fast cache for matching object
- BOOL fastCacheOccupied = NO;
+ bool fastCacheOccupied = NO;
SyncData *data = (SyncData *)tls_get_direct(SYNC_DATA_DIRECT_KEY);
if (data) {
fastCacheOccupied = YES;
result = data;
lockCount = (uintptr_t)tls_get_direct(SYNC_COUNT_DIRECT_KEY);
- require_action_string(result->threadCount > 0, fastcache_done,
- result = NULL, "id2data fastcache is buggy");
- require_action_string(lockCount > 0, fastcache_done,
- result = NULL, "id2data fastcache is buggy");
+ if (result->threadCount <= 0 || lockCount <= 0) {
+ _objc_fatal("id2data fastcache is buggy");
+ }
switch(why) {
case ACQUIRE: {
break;
}
- fastcache_done:
- return result;
+ return result;
}
}
#endif
// Found a match.
result = item->data;
- require_action_string(result->threadCount > 0, cache_done,
- result = NULL, "id2data cache is buggy");
- require_action_string(item->lockCount > 0, cache_done,
- result = NULL, "id2data cache is buggy");
+ if (result->threadCount <= 0 || item->lockCount <= 0) {
+ _objc_fatal("id2data cache is buggy");
+ }
switch(why) {
case ACQUIRE:
break;
}
- cache_done:
return result;
}
}
// We could keep the nodes in some hash table if we find that there are
// more than 20 or so distinct locks active, but we don't do that now.
- spinlock_lock(lockp);
+ lockp->lock();
{
SyncData* p;
// an unused one was found, use it
if ( firstUnused != NULL ) {
result = firstUnused;
- result->object = object;
+ result->object = (objc_object *)object;
result->threadCount = 1;
goto done;
}
}
-
+
// malloc a new SyncData and add to list.
// XXX calling malloc with a global lock held is bad practice,
// might be worth releasing the lock, mallocing, and searching again.
// But since we never free these guys we won't be stuck in malloc very often.
result = (SyncData*)calloc(sizeof(SyncData), 1);
- result->object = object;
+ result->object = (objc_object *)object;
result->threadCount = 1;
- recursive_mutex_init(&result->mutex);
+ new (&result->mutex) recursive_mutex_t();
result->nextData = *listp;
*listp = result;
done:
- spinlock_unlock(lockp);
+ lockp->unlock();
if (result) {
// Only new ACQUIRE should get here.
// All RELEASE and CHECK and recursive ACQUIRE are
// handled by the per-thread caches above.
-
- require_string(result != NULL, really_done, "id2data is buggy");
- require_action_string(why == ACQUIRE, really_done,
- result = NULL, "id2data is buggy");
- require_action_string(result->object == object, really_done,
- result = NULL, "id2data is buggy");
+ if (why == RELEASE) {
+ // Probably some thread is incorrectly exiting
+ // while the object is held by another thread.
+ return nil;
+ }
+ if (why != ACQUIRE) _objc_fatal("id2data is buggy");
+ if (result->object != object) _objc_fatal("id2data is buggy");
#if SUPPORT_DIRECT_THREAD_KEYS
if (!fastCacheOccupied) {
}
}
- really_done:
return result;
}
if (obj) {
SyncData* data = id2data(obj, ACQUIRE);
- require_action_string(data != NULL, done, result = OBJC_SYNC_NOT_INITIALIZED, "id2data failed");
-
- result = recursive_mutex_lock(&data->mutex);
- require_noerr_string(result, done, "mutex_lock failed");
+ assert(data);
+ data->mutex.lock();
} else {
// @synchronized(nil) does nothing
if (DebugNilSync) {
objc_sync_nil();
}
-done:
return result;
}
if (obj) {
SyncData* data = id2data(obj, RELEASE);
- require_action_string(data != NULL, done, result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR, "id2data failed");
-
- result = recursive_mutex_unlock(&data->mutex);
- require_noerr_string(result, done, "mutex_unlock failed");
+ if (!data) {
+ result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
+ } else {
+ bool okay = data->mutex.tryUnlock();
+ if (!okay) {
+ result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
+ }
+ }
} else {
// @synchronized(nil) does nothing
}
-done:
- if ( result == RECURSIVE_MUTEX_NOT_LOCKED )
- result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR;
return result;
}
{
unsigned nargs = 0;
int self_offset = 0;
- BOOL offset_is_negative = NO;
+ bool offset_is_negative = NO;
// First, skip the return type
typedesc = SkipFirstType (typedesc);
};
/// Adds an (object, weak pointer) pair to the weak table.
-id weak_register_no_lock(weak_table_t *weak_table, id referent, id *referrer);
+id weak_register_no_lock(weak_table_t *weak_table, id referent,
+ id *referrer, bool crashIfDeallocating);
/// Removes an (object, weak pointer) pair from the weak table.
void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer);
-#if !NDEBUG
+#if DEBUG
/// Returns true if an object is weakly referenced somewhere.
bool weak_is_registered_no_lock(weak_table_t *weak_table, id referent);
#endif
entry->mask = new_size - 1;
entry->referrers = (weak_referrer_t *)
- _calloc_internal(TABLE_SIZE(entry), sizeof(weak_referrer_t));
+ calloc(TABLE_SIZE(entry), sizeof(weak_referrer_t));
entry->num_refs = 0;
entry->max_hash_displacement = 0;
}
// Insert
append_referrer(entry, new_referrer);
- if (old_refs) _free_internal(old_refs);
+ if (old_refs) free(old_refs);
}
/**
// Couldn't insert inline. Allocate out of line.
weak_referrer_t *new_referrers = (weak_referrer_t *)
- _calloc_internal(WEAK_INLINE_COUNT, sizeof(weak_referrer_t));
+ calloc(WEAK_INLINE_COUNT, sizeof(weak_referrer_t));
// This constructed table is invalid, but grow_refs_and_insert
// will fix it and rehash it.
for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
weak_entry_t *old_entries = weak_table->weak_entries;
weak_entry_t *new_entries = (weak_entry_t *)
- _calloc_internal(new_size, sizeof(weak_entry_t));
+ calloc(new_size, sizeof(weak_entry_t));
weak_table->mask = new_size - 1;
weak_table->weak_entries = new_entries;
weak_entry_insert(weak_table, entry);
}
}
- _free_internal(old_entries);
+ free(old_entries);
}
}
static void weak_entry_remove(weak_table_t *weak_table, weak_entry_t *entry)
{
// remove entry
- if (entry->out_of_line) _free_internal(entry->referrers);
+ if (entry->out_of_line) free(entry->referrers);
bzero(entry, sizeof(*entry));
weak_table->num_entries--;
* @param referrer The weak pointer address.
*/
id
-weak_register_no_lock(weak_table_t *weak_table, id referent_id, id *referrer_id)
+weak_register_no_lock(weak_table_t *weak_table, id referent_id,
+ id *referrer_id, bool crashIfDeallocating)
{
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;
}
if (deallocating) {
- _objc_fatal("Cannot form weak reference to instance (%p) of "
- "class %s. It is possible that this object was "
- "over-released, or is in the process of deallocation.",
- (void*)referent, object_getClassName((id)referent));
+ if (crashIfDeallocating) {
+ _objc_fatal("Cannot form weak reference to instance (%p) of "
+ "class %s. It is possible that this object was "
+ "over-released, or is in the process of deallocation.",
+ (void*)referent, object_getClassName((id)referent));
+ } else {
+ return nil;
+ }
}
// now remember it and where it is being stored
}
-#if !NDEBUG
+#if DEBUG
bool
weak_is_registered_no_lock(weak_table_t *weak_table, id referent_id)
{
#define OBJC_BOOL_DEFINED
/// Type to represent a boolean value.
-#if !defined(OBJC_HIDE_64) && TARGET_OS_IPHONE && __LP64__
+#if (TARGET_OS_IPHONE && __LP64__) || TARGET_OS_WATCH
+#define OBJC_BOOL_IS_BOOL 1
typedef bool BOOL;
#else
+#define OBJC_BOOL_IS_CHAR 1
typedef signed char BOOL;
// BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C"
// even if -funsigned-char is used.
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
OBJC_ARC_UNAVAILABLE;
-/**
- * Used by CoreFoundation's toll-free bridging.
- *
- * @warning Do not call this function yourself.
- */
-OBJC_EXPORT void objc_setFutureClass(Class cls, const char *name)
- __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
- OBJC_ARC_UNAVAILABLE;
-
/* Instantiating Classes */
* Policies related to associative references.
* These are options to objc_setAssociatedObject()
*/
-enum {
+typedef OBJC_ENUM(uintptr_t, objc_AssociationPolicy) {
OBJC_ASSOCIATION_ASSIGN = 0, /**< Specifies a weak reference to the associated object. */
OBJC_ASSOCIATION_RETAIN_NONATOMIC = 1, /**< Specifies a strong reference to the associated object.
* The association is not made atomically. */
* The association is made atomically. */
};
-/// Type to specify the behavior of an association.
-typedef uintptr_t objc_AssociationPolicy;
-
/**
* Sets an associated value for a given object using a given key and association policy.
*
@protocol SuperProto2 @end
@protocol UnrelatedProto @end
+
+void Crash(id self, SEL _cmd)
+{
+ fail("%c[%s %s] called unexpectedly",
+ class_isMetaClass(object_getClass(self)) ? '+' : '-',
+ object_getClassName(self), sel_getName(_cmd));
+}
+
+
int main()
{
Protocol *proto, *proto2;
objc_property_t *proplist;
unsigned int count;
+ // If objc_registerProtocol() fails to preserve the retain count
+ // then ARC will deallocate Protocol objects too early.
+ class_replaceMethod(objc_getClass("Protocol"),
+ sel_registerName("dealloc"), (IMP)Crash, "v@:");
+ class_replaceMethod(objc_getClass("__IncompleteProtocol"),
+ sel_registerName("dealloc"), (IMP)Crash, "v@:");
+
// make sure binary contains hard copies of these protocols
proto = @protocol(SuperProto);
proto = @protocol(SuperProto2);
-// TEST_CONFIG SDK=macos
+// TEST_CONFIG OS=macosx
// TEST_CFLAGS -framework AppleScriptObjC -framework Foundation
// Verify that trivial AppleScriptObjC apps run with GC off.
static id weak;
static id weak2;
+static id weak3;
+static id weak4;
static bool did_dealloc;
static int state;
@interface Test : NSObject @end
@implementation Test
-(void)dealloc {
+ testprintf("Weak storeOrNil does not crash while deallocating\n");
+ weak4 = (id)0x100; // old value must not be used
+ id result = objc_initWeakOrNil(&weak4, self);
+ testassert(result == nil);
+ testassert(weak4 == nil);
+ result = objc_storeWeakOrNil(&weak4, self);
+ testassert(result == nil);
+ testassert(weak4 == nil);
+
// The value returned by objc_loadWeak() is now nil,
// but the storage is not yet cleared.
testassert(weak == self);
testassert(objc_loadWeakRetained(&weak) == nil);
testassert(objc_loadWeakRetained(&weak2) == nil);
+ testprintf("Weak storeOrNil does not crash while deallocating\n");
+ id result = objc_storeWeakOrNil(&weak, self);
+ testassert(result == nil);
+
testprintf("Weak store crashes while deallocating\n");
objc_storeWeak(&weak, self);
fail("objc_storeWeak of deallocating value should have crashed");
@end
-void cycle(Class cls, Test *obj, Test *obj2)
+void cycle(Class cls, Test *obj, Test *obj2, bool storeOrNil)
{
testprintf("Cycling class %s\n", class_getName(cls));
id result;
+ id (*storeWeak)(id *location, id obj);
+ id (*initWeak)(id *location, id obj);
+ if (storeOrNil) {
+ testprintf("Using objc_storeWeakOrNil\n");
+ storeWeak = objc_storeWeakOrNil;
+ initWeak = objc_initWeakOrNil;
+ } else {
+ testprintf("Using objc_storeWeak\n");
+ storeWeak = objc_storeWeak;
+ initWeak = objc_initWeak;
+ }
+
// state counts calls to custom weak methods
// Difference test classes have different expected values.
int storeTarget;
testprintf("Weak assignment\n");
state = 0;
- result = objc_storeWeak(&weak, obj);
+ result = storeWeak(&weak, obj);
testassert(state == storeTarget);
testassert(result == obj);
testassert(weak == obj);
testprintf("Weak assignment to the same value\n");
state = 0;
- result = objc_storeWeak(&weak, obj);
+ result = storeWeak(&weak, obj);
testassert(state == storeTarget);
testassert(result == obj);
testassert(weak == obj);
testprintf("Weak assignment to different value\n");
state = 0;
- result = objc_storeWeak(&weak, obj2);
+ result = storeWeak(&weak, obj2);
testassert(state == storeTarget);
testassert(result == obj2);
testassert(weak == obj2);
testprintf("Weak assignment to NULL\n");
state = 0;
- result = objc_storeWeak(&weak, NULL);
+ result = storeWeak(&weak, NULL);
testassert(state == 0);
testassert(result == NULL);
testassert(weak == NULL);
testprintf("Weak re-assignment to NULL\n");
state = 0;
- result = objc_storeWeak(&weak, NULL);
+ result = storeWeak(&weak, NULL);
testassert(state == 0);
testassert(result == NULL);
testassert(weak == NULL);
testprintf("Weak move\n");
state = 0;
- result = objc_storeWeak(&weak, obj);
+ result = storeWeak(&weak, obj);
testassert(state == storeTarget);
testassert(result == obj);
testassert(weak == obj);
objc_moveWeak(&weak2, &weak);
testassert(weak == nil);
testassert(weak2 == obj);
- objc_storeWeak(&weak2, NULL);
+ storeWeak(&weak2, NULL);
testprintf("Weak copy\n");
state = 0;
- result = objc_storeWeak(&weak, obj);
+ result = storeWeak(&weak, obj);
testassert(state == storeTarget);
testassert(result == obj);
testassert(weak == obj);
objc_copyWeak(&weak2, &weak);
testassert(weak == obj);
testassert(weak2 == obj);
- objc_storeWeak(&weak, NULL);
- objc_storeWeak(&weak2, NULL);
+ storeWeak(&weak, NULL);
+ storeWeak(&weak2, NULL);
testprintf("Weak clear\n");
id obj3 = [cls new];
state = 0;
- result = objc_storeWeak(&weak, obj3);
+ result = storeWeak(&weak, obj3);
testassert(state == storeTarget);
testassert(result == obj3);
testassert(weak == obj3);
state = 0;
- result = objc_storeWeak(&weak2, obj3);
+ result = storeWeak(&weak2, obj3);
testassert(state == storeTarget);
testassert(result == obj3);
testassert(weak2 == obj3);
testassert(did_dealloc);
testassert(weak == NULL);
testassert(weak2 == NULL);
+
+
+ testprintf("Weak init and destroy\n");
+
+ id obj4 = [cls new];
+
+ state = 0;
+ weak = (id)0x100; // old value must not be used
+ result = initWeak(&weak, obj4);
+ testassert(state == storeTarget);
+ testassert(result == obj4);
+ testassert(weak == obj4);
+
+ state = 0;
+ weak2 = (id)0x100; // old value must not be used
+ result = initWeak(&weak2, obj4);
+ testassert(state == storeTarget);
+ testassert(result == obj4);
+ testassert(weak2 == obj4);
+
+ state = 0;
+ weak3 = (id)0x100; // old value must not be used
+ result = initWeak(&weak3, obj4);
+ testassert(state == storeTarget);
+ testassert(result == obj4);
+ testassert(weak3 == obj4);
+
+ state = 0;
+ objc_destroyWeak(&weak3);
+ testassert(state == 0);
+ testassert(weak3 == obj4); // storage is unchanged
+
+ did_dealloc = false;
+ [obj4 release];
+ testassert(did_dealloc);
+ testassert(weak == NULL); // not destroyed earlier so cleared now
+ testassert(weak2 == NULL); // not destroyed earlier so cleared now
+ testassert(weak3 == obj4); // destroyed earlier so not cleared now
+
+ objc_destroyWeak(&weak);
+ objc_destroyWeak(&weak2);
}
void test_class(Class cls)
{
+ // prime strong and weak side tables before leak checking
+ Test *prime[256] = {nil};
+ for (size_t i = 0; i < sizeof(prime)/sizeof(prime[0]); i++) {
+ objc_storeWeak(&prime[i], [cls new]);
+ }
+
Test *obj = [cls new];
Test *obj2 = [cls new];
for (int i = 0; i < 100000; i++) {
- if (i == 10) leak_mark();
- cycle(cls, obj, obj2);
+ cycle(cls, obj, obj2, false);
+ cycle(cls, obj, obj2, true);
}
- // allow some slop for [Test new] inside cycle()
- // to land in different side table stripes
- leak_check(8192);
-
+ leak_mark();
+ for (int i = 0; i < 100000; i++) {
+ cycle(cls, obj, obj2, false);
+ cycle(cls, obj, obj2, true);
+ }
+ // allow some slop for side table expansion
+ // 5120 is common with this configuration
+ leak_check(6000);
// rdar://14105994
id weaks[8];
// for OBJC2 mac only
-/* TEST_CONFIG SDK=macos ARCH=x86_64
+/* TEST_CONFIG OS=macosx ARCH=x86_64
TEST_CRASHES
TEST_RUN_OUTPUT
#endif
struct bucket_t {
- void *sel;
- void *imp;
+ uintptr_t sel;
+ uintptr_t imp;
};
struct cache_t {
id obj = [cls new];
[obj self];
- // Test objc_msgSend.
struct cache_t *cache = &((__bridge struct class_t *)cls)->cache;
- cache->mask = 0;
- cache->buckets[0].sel = (void*)~0;
- cache->buckets[0].imp = (void*)~0;
- cache->buckets[1].sel = (void*)(uintptr_t)1;
- cache->buckets[1].imp = (void*)cache->buckets;
+
+# define COUNT 4
+ struct bucket_t *buckets = calloc(sizeof(struct bucket_t), COUNT+1);
+ for (int i = 0; i < COUNT; i++) {
+ buckets[i].sel = ~0;
+ buckets[i].imp = ~0;
+ }
+ buckets[COUNT].sel = 1;
+ buckets[COUNT].imp = (uintptr_t)buckets;
+
+ cache->mask = COUNT-1;
+ cache->occupied = 0;
+ cache->buckets = buckets;
fprintf(stderr, "crash now\n");
[obj self];
+++ /dev/null
-/*
-TEST_CRASHES
-TEST_RUN_OUTPUT
-objc1
-OK: badCache2.m
-OR
-crash now
-objc\[\d+\]: Method cache corrupted.*
-objc\[\d+\]: .*
-objc\[\d+\]: .*
-objc\[\d+\]: .*
-objc\[\d+\]: .*
-objc\[\d+\]: Method cache corrupted\.
-CRASHED: SIG(ILL|TRAP)
-END
-*/
-
-
-#include "test.h"
-
-#if !__OBJC2__ || __arm__
-
-int main()
-{
- fprintf(stderr, "objc1\n");
- succeed(__FILE__);
-}
-
-#else
-
-#include "testroot.i"
-
-#if __LP64__
-typedef uint32_t mask_t;
-#else
-typedef uint16_t mask_t;
-#endif
-
-struct bucket_t {
- void *sel;
- void *imp;
-};
-
-struct cache_t {
- struct bucket_t *buckets;
- mask_t mask;
- mask_t occupied;
-};
-
-struct class_t {
- void *isa;
- void *supercls;
- struct cache_t cache;
-};
-
-@interface Subclass : TestRoot @end
-@implementation Subclass @end
-
-int main()
-{
- Class cls = [TestRoot class];
- id obj = [cls new];
- [obj self];
-
- // Test cache::find by clobbering the cache and then adding a method
- struct cache_t *cache = &((__bridge struct class_t *)cls)->cache;
- cache->mask = 0;
- cache->buckets[0].sel = (void*)~0;
- cache->buckets[0].imp = (void*)~0;
-
- fprintf(stderr, "crash now\n");
- class_addMethod(cls, @selector(fake:o:rama:), nil, nil);
-
- fail("should have crashed");
-}
-
-#endif
testassert(rc == 1);
testassert([o retainCount] == rc);
- objc_storeWeak(&w, nil);
-
-
testprintf("dealloc\n");
testassert(TestRootDealloc == 0);
+ testassert(w != nil);
[o release];
testassert(TestRootDealloc == 1);
-
+ testassert(w == nil);
succeed(__FILE__);
}
// TEST_CONFIG
#if USE_FOUNDATION
+#include <Foundation/Foundation.h>
#define SUPERCLASS NSObject
#define FILENAME "nscdtors.mm"
#else
int main()
{
+ if (objc_collectingEnabled()) {
+ testwarn("rdar://19042235 test disabled in GC because it is slow");
+ succeed(FILENAME);
+ }
+
for (int i = 0; i < 1000; i++) {
testonthread(^{ test_single(); });
testonthread(^{ test_inplace(); });
{
int count = 1000;
- cycle();
- cycle();
+ testonthread(^{ cycle(); });
+ testonthread(^{ cycle(); });
+ testonthread(^{ cycle(); });
leak_mark();
while (count--) {
testonthread(^{ cycle(); });
}
- leak_check(256); // fixme should be 0
+#if __OBJC_GC__
+ testwarn("rdar://19042235 possible leaks suppressed under GC");
+ leak_check(16000);
+#else
+ leak_check(0);
+#endif
succeed(__FILE__);
}
#ifndef OBJC_NO_GC
#include <auto_zone.h>
#else
-static void* objc_collectableZone(void) { return NULL; }
static BOOL auto_zone_is_valid_pointer(void *a, void *b) { return a||b; }
#endif
#include "test.h"
--- /dev/null
+/*
+
+TEST_CONFIG MEM=mrc
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+
+TEST_BUILD
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-awz.out -DSWIZZLE_AWZ=1
+END
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: CUSTOM AWZ: NSObject \(meta\)
+OK: customrr-nsobject-awz.out
+END
+
+*/
+
--- /dev/null
+/*
+
+TEST_CONFIG MEM=mrc
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+
+TEST_BUILD
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-none.out
+END
+
+TEST_RUN_OUTPUT
+OK: customrr-nsobject-none.out
+END
+
+*/
+
--- /dev/null
+/*
+
+TEST_CONFIG MEM=mrc
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+
+TEST_BUILD
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-rr.out -DSWIZZLE_RELEASE=1
+END
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: CUSTOM RR: NSObject
+OK: customrr-nsobject-rr.out
+END
+
+*/
+
--- /dev/null
+/*
+
+TEST_CONFIG MEM=mrc
+TEST_ENV OBJC_PRINT_CUSTOM_RR=YES OBJC_PRINT_CUSTOM_AWZ=YES
+
+TEST_BUILD
+ $C{COMPILE} $DIR/customrr-nsobject.m -o customrr-nsobject-rrawz.out -DSWIZZLE_RELEASE=1 -DSWIZZLE_AWZ=1
+END
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: CUSTOM AWZ: NSObject \(meta\)
+objc\[\d+\]: CUSTOM RR: NSObject
+OK: customrr-nsobject-rrawz.out
+END
+
+*/
+
--- /dev/null
+// This file is used in the customrr-nsobject-*.m tests
+
+#include "test.h"
+#include <objc/NSObject.h>
+
+#if __OBJC2__
+# define BYPASS 1
+#else
+// old ABI does not implement the optimization
+# define BYPASS 0
+#endif
+
+static int Retains;
+static int Releases;
+static int Autoreleases;
+static int PlusInitializes;
+static int Allocs;
+static int AllocWithZones;
+
+id (*RealRetain)(id self, SEL _cmd);
+void (*RealRelease)(id self, SEL _cmd);
+id (*RealAutorelease)(id self, SEL _cmd);
+id (*RealAlloc)(id self, SEL _cmd);
+id (*RealAllocWithZone)(id self, SEL _cmd, void *zone);
+
+id HackRetain(id self, SEL _cmd) { Retains++; return RealRetain(self, _cmd); }
+void HackRelease(id self, SEL _cmd) { Releases++; return RealRelease(self, _cmd); }
+id HackAutorelease(id self, SEL _cmd) { Autoreleases++; return RealAutorelease(self, _cmd); }
+
+id HackAlloc(Class self, SEL _cmd) { Allocs++; return RealAlloc(self, _cmd); }
+id HackAllocWithZone(Class self, SEL _cmd, void *zone) { AllocWithZones++; return RealAllocWithZone(self, _cmd, zone); }
+
+void HackPlusInitialize(id self __unused, SEL _cmd __unused) { PlusInitializes++; }
+
+
+int main(int argc __unused, char **argv)
+{
+ Class cls = objc_getClass("NSObject");
+ Method meth;
+
+ meth = class_getClassMethod(cls, @selector(initialize));
+ method_setImplementation(meth, (IMP)HackPlusInitialize);
+
+ // We either swizzle the method normally (testing that it properly
+ // disables optimizations), or we hack the implementation into place
+ // behind objc's back (so we can see whether it got called with the
+ // optimizations still enabled).
+
+ meth = class_getClassMethod(cls, @selector(allocWithZone:));
+ RealAllocWithZone = (typeof(RealAllocWithZone))method_getImplementation(meth);
+#if SWIZZLE_AWZ
+ method_setImplementation(meth, (IMP)HackAllocWithZone);
+#else
+ ((IMP *)meth)[2] = (IMP)HackAllocWithZone;
+#endif
+
+ meth = class_getInstanceMethod(cls, @selector(release));
+ RealRelease = (typeof(RealRelease))method_getImplementation(meth);
+#if SWIZZLE_RELEASE
+ method_setImplementation(meth, (IMP)HackRelease);
+#else
+ ((IMP *)meth)[2] = (IMP)HackRelease;
+#endif
+
+ // These other methods get hacked for counting purposes only
+
+ meth = class_getInstanceMethod(cls, @selector(retain));
+ RealRetain = (typeof(RealRetain))method_getImplementation(meth);
+ ((IMP *)meth)[2] = (IMP)HackRetain;
+
+ meth = class_getInstanceMethod(cls, @selector(autorelease));
+ RealAutorelease = (typeof(RealAutorelease))method_getImplementation(meth);
+ ((IMP *)meth)[2] = (IMP)HackAutorelease;
+
+ meth = class_getClassMethod(cls, @selector(alloc));
+ RealAlloc = (typeof(RealAlloc))method_getImplementation(meth);
+ ((IMP *)meth)[2] = (IMP)HackAlloc;
+
+ // Verify that the swizzles occurred before +initialize by provoking it now
+ testassert(PlusInitializes == 0);
+ [NSObject self];
+ testassert(PlusInitializes == 1);
+
+#if !__OBJC2__
+ // hack: fool the expected output because old ABI doesn't optimize this
+# if SWIZZLE_AWZ
+ fprintf(stderr, "objc[1234]: CUSTOM AWZ: NSObject (meta)\n");
+# endif
+# if SWIZZLE_RELEASE
+ fprintf(stderr, "objc[1234]: CUSTOM RR: NSObject\n");
+# endif
+#endif
+
+ id obj;
+
+ Allocs = 0;
+ AllocWithZones = 0;
+ obj = objc_alloc(cls);
+#if SWIZZLE_AWZ || !BYPASS
+ testprintf("swizzled AWZ should be called\n");
+ testassert(Allocs == 1);
+ testassert(AllocWithZones == 1);
+#else
+ testprintf("unswizzled AWZ should be bypassed\n");
+ testassert(Allocs == 0);
+ testassert(AllocWithZones == 0);
+#endif
+
+ Allocs = 0;
+ AllocWithZones = 0;
+ obj = [NSObject alloc];
+#if SWIZZLE_AWZ || !BYPASS
+ testprintf("swizzled AWZ should be called\n");
+ testassert(Allocs == 1);
+ testassert(AllocWithZones == 1);
+#else
+ testprintf("unswizzled AWZ should be bypassed\n");
+ testassert(Allocs == 1);
+ testassert(AllocWithZones == 0);
+#endif
+
+ Retains = 0;
+ objc_retain(obj);
+#if SWIZZLE_RELEASE || !BYPASS
+ testprintf("swizzled release should force retain\n");
+ testassert(Retains == 1);
+#else
+ testprintf("unswizzled release should bypass retain\n");
+ testassert(Retains == 0);
+#endif
+
+ Releases = 0;
+ Autoreleases = 0;
+ PUSH_POOL {
+ objc_autorelease(obj);
+#if SWIZZLE_RELEASE || !BYPASS
+ testprintf("swizzled release should force autorelease\n");
+ testassert(Autoreleases == 1);
+#else
+ testprintf("unswizzled release should bypass autorelease\n");
+ testassert(Autoreleases == 0);
+#endif
+ } POP_POOL
+
+#if SWIZZLE_RELEASE || !BYPASS
+ testprintf("swizzled release should be called\n");
+ testassert(Releases == 1);
+#else
+ testprintf("unswizzled release should be bypassed\n");
+ testassert(Releases == 0);
+#endif
+
+ succeed(basename(argv[0]));
+}
/* TEST_BUILD_OUTPUT
.*designatedinit.m:\d+:\d+: warning: designated initializer should only invoke a designated initializer on 'super'.*
.*designatedinit.m:\d+:\d+: note: .*
-.*designatedinit.m:\d+:\d+: warning: designated initializer missing a 'super' call to a designated initializer of the super class.*
-.*designatedinit.m:\d+:\d+: note: .*
.*designatedinit.m:\d+:\d+: warning: method override for the designated initializer of the superclass '-init' not found.*
.*NSObject.h:\d+:\d+: note: .*
END */
+#define NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER 1
#include "test.h"
#include <objc/NSObject.h>
TEST_RUN_OUTPUT
objc\[\d+\]: Class GKScore is implemented in both [^\s]+ and [^\s]+ One of the two will be used. Which one is undefined.
CRASHED: SIG(ILL|TRAP)
+OR
+OK: duplicatedClasses.m
END
*/
int main()
{
+ if (objc_collectingEnabled()) {
+ testwarn("rdar://19042235 test disabled because GameKit is not GC");
+ succeed(__FILE__);
+ }
void *dl = dlopen("/System/Library/Frameworks/GameKit.framework/GameKit", RTLD_LAZY);
if (!dl) fail("couldn't open GameKit");
fail("should have crashed already");
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
/*
rdar://8553305
-TEST_CONFIG SDK=iphoneos
+TEST_DISABLED rdar://19200100
+
+TEST_CONFIG OS=iphoneos
TEST_CRASHES
TEST_BUILD
#define str2(x) str(x)
__BEGIN_DECLS
-id nop(id self) { return self; }
+// not id to avoid ARC operations because the class doesn't implement RR methods
+void* nop(void* self) { return self; }
__END_DECLS
asm(
#elif defined(__i386__)
asm(".text \n _getSP: movl %esp, %eax \n ret \n");
#elif defined(__arm__)
- asm(".text \n _getSP: mov r0, sp \n bx lr \n");
+ asm(".text \n .thumb \n .thumb_func _getSP \n "
+ "_getSP: mov r0, sp \n bx lr \n");
#elif defined(__arm64__)
asm(".text \n _getSP: mov x0, sp \n ret \n");
#else
Class oldTestRoot;
Class oldSub1;
Class newSub1;
-#if !__OBJC2__
- Class oldSub2;
- Class newSub2;
- uintptr_t buf[20];
-#endif
// objc_getFutureClass with existing class
oldTestRoot = objc_getFutureClass("TestRoot");
// objc_getFutureClass a second time
testassert(oldSub1 == objc_getFutureClass("Sub1"));
-#if !__OBJC2__
- // objc_setFutureClass with existing class
- oldSub2 = objc_getClass("Sub2");
- testassert(oldSub2 == [Sub2 class]);
- testassert(oldSub2 == class_getSuperclass(objc_getClass("SubSub2")));
- objc_setFutureClass((Class)buf, "Sub2");
- testassert(0 == strcmp(class_getName((Class)buf), "Sub2"));
- newSub2 = objc_getClass("Sub2");
- testassert(newSub2 == (Class)buf);
- testassert(newSub2 != oldSub2);
- // check classrefs
- testassert(newSub2 == [Sub2 class]);
- testassert(newSub2 == [newSub2 class]);
- testassert(newSub2 == [newSub2 classref]);
- testassert(newSub2 != [oldSub2 class]);
- // check superclass chains
- testassert(newSub2 == class_getSuperclass(objc_getClass("SubSub2")));
-#else
- // 64-bit ABI ignores objc_setFutureClass.
-#endif
-
// Load class Sub1
dlopen("future2.dylib", 0);
testassert(1 == [oldSub1 method]);
testassert(1 == [newSub1 method]);
-#if !__OBJC2__
- testassert(2 == [newSub2 method]);
- testassert(2 == [oldSub2 method]);
- testassert(3 == [SubSub2 method]);
-#endif
succeed(__FILE__);
}
// gc-off app loading gc-off dylib: should work
/*
-TEST_CONFIG MEM=mrc,arc SDK=macos
+TEST_CONFIG MEM=mrc,arc OS=macosx
TEST_BUILD
$C{COMPILE_C} $DIR/gc.c -dynamiclib -o libnoobjc.dylib
// gc-on app loading gc-off dylib: should crash
/*
-TEST_CONFIG MEM=gc SDK=macos
+TEST_CONFIG MEM=gc OS=macosx
TEST_CRASHES
TEST_RUN_OUTPUT
/*
-TEST_CONFIG SDK=macos
+TEST_CONFIG OS=macosx
TEST_BUILD
$C{COMPILE_C} $DIR/gc.c -dynamiclib -o libnoobjc.dylib
// linker sees librequiresgc.fake.dylib, runtime uses librequiresgc.dylib
/*
-TEST_CONFIG MEM=mrc,arc SDK=macos
+TEST_CONFIG MEM=mrc,arc OS=macosx
TEST_CRASHES
TEST_RUN_OUTPUT
// linker sees librequiresgc.fake.dylib, runtime uses librequiresgc.dylib
/*
-TEST_CONFIG MEM=gc SDK=macos
+TEST_CONFIG MEM=gc OS=macosx
TEST_BUILD
$C{COMPILE_C} $DIR/gc.c -dynamiclib -o libnoobjc.dylib
/*
-TEST_CONFIG SDK=macos
+TEST_CONFIG OS=macosx
TEST_BUILD
$C{COMPILE_C} $DIR/gc.c -dynamiclib -o libnoobjc.dylib
/*
-TEST_CONFIG SDK=macos
+TEST_CONFIG OS=macosx
TEST_BUILD
$C{COMPILE_C} $DIR/gc.c -dynamiclib -o libnoobjc.dylib
@implementation Empty
+(id)class { return self; }
+(void)initialize { }
-+(id)forward:(SEL)sel :(marg_list)margs {
- (void)sel; (void)margs;
- state = 1;
- return nil;
-}
@end
+void *forward_handler(id obj, SEL _cmd) {
+ testassert(obj == [Empty class]);
+ testassert(_cmd == @selector(ordinary));
+ state = 1;
+ return nil;
+}
+
@interface Empty (Unimplemented)
+(id)ordinary;
+(id)retain;
{
Class cls;
+ objc_setForwardHandler((void*)&forward_handler, nil);
+
// Test selector API
testassert(sel_registerName("retain") == @selector(retain));
leak_mark();
count = 10000000;
testonthread(testblock);
+#if __OBJC_GC__
+ testwarn("rdar://19042235 possible leaks suppressed under GC");
+ leak_check(2000);
+#else
leak_check(0);
+#endif
}
succeed(__FILE__);
--- /dev/null
+// TEST_CONFIG MEM=arc
+// TEST_CFLAGS -framework Foundation
+
+// Problem: If weak reference operations provoke +initialize, the runtime
+// can deadlock (recursive weak lock, or lock inversion between weak lock
+// and +initialize lock).
+// Solution: object_setClass() and objc_storeWeak() perform +initialize
+// if needed so that no weakly-referenced object can ever have an
+// un-+initialized isa.
+
+#include <Foundation/Foundation.h>
+#include <objc/objc-internal.h>
+#include "test.h"
+
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#pragma clang diagnostic ignored "-Warc-unsafe-retained-assign"
+
+// This is StripedMap's pointer hash
+uintptr_t hash(id obj) {
+ uintptr_t addr = (uintptr_t)obj;
+ return ((addr >> 4) ^ (addr >> 9)) % 64;
+}
+
+bool sameAlignment(id o1, id o2)
+{
+ return hash(o1) == hash(o2);
+}
+
+// Return a new string object that uses the same striped weak locks as `obj`.
+NSMutableString *newAlignedString(id obj)
+{
+ NSMutableArray *strings = [NSMutableArray new];
+ NSMutableString *result;
+ do {
+ result = [NSMutableString new];
+ [strings addObject:result];
+ } while (!sameAlignment(obj, result));
+ return result;
+}
+
+
+__weak NSObject *weak1;
+__weak NSMutableString *weak2;
+NSMutableString *strong2;
+
+@interface A : NSObject @end
+@implementation A
++(void)initialize {
+ weak2 = strong2; // weak store #2
+ strong2 = nil;
+}
+@end
+
+void testA()
+{
+ // Weak store #1 provokes +initialize which performs weak store #2.
+ // Solution: weak store #1 runs +initialize if needed
+ // without holding locks.
+ @autoreleasepool {
+ A *obj = [A new];
+ strong2 = newAlignedString(obj);
+ [obj addObserver:obj forKeyPath:@"foo" options:0 context:0];
+ weak1 = obj; // weak store #1
+ [obj removeObserver:obj forKeyPath:@"foo"];
+ obj = nil;
+ }
+}
+
+
+__weak NSObject *weak3;
+__weak NSMutableString *weak4;
+NSMutableString *strong4;
+
+@interface B : NSObject @end
+@implementation B
++(void)initialize {
+ weak4 = strong4; // weak store #4
+ strong4 = nil;
+}
+@end
+
+
+void testB()
+{
+ // Weak load #3 provokes +initialize which performs weak store #4.
+ // Solution: object_setClass() runs +initialize if needed
+ // without holding locks.
+ @autoreleasepool {
+ B *obj = [B new];
+ strong4 = newAlignedString(obj);
+ weak3 = obj;
+ [obj addObserver:obj forKeyPath:@"foo" options:0 context:0];
+ [weak3 self]; // weak load #3
+ [obj removeObserver:obj forKeyPath:@"foo"];
+ obj = nil;
+ }
+}
+
+
+__weak id weak5;
+
+@interface C : NSObject @end
+@implementation C
++(void)initialize {
+ weak5 = [self new];
+}
+@end
+
+void testC()
+{
+ // +initialize performs a weak store of itself.
+ // Make sure the retry in objc_storeWeak() doesn't spin.
+ @autoreleasepool {
+ [C self];
+ }
+}
+
+
+int main()
+{
+ alarm(10); // replace hangs with crashes
+
+ testA();
+ testB();
+ testC();
+
+ succeed(__FILE__);
+}
+
-// TEST_CONFIG MEM=gc SDK=macos
+// TEST_CONFIG MEM=gc OS=macosx
#include "test.h"
#include <string.h>
--- /dev/null
+/*
+TEST_BUILD
+ $C{COMPILE} $DIR/load-noobjc.m -o load-noobjc.out
+ $C{COMPILE} $DIR/load-noobjc2.m -o libload-noobjc2.dylib -bundle -bundle_loader load-noobjc.out
+ $C{COMPILE} $DIR/load-noobjc3.m -o libload-noobjc3.dylib -bundle -bundle_loader load-noobjc.out
+END
+*/
+
+#include "test.h"
+
+#if !__OBJC2__
+// old runtime can't fix this deadlock
+
+int main()
+{
+ succeed(__FILE__);
+}
+
+#else
+
+#include <dlfcn.h>
+
+int state = 0;
+semaphore_t go;
+
+void *thread(void *arg __unused)
+{
+ objc_registerThreadWithCollector();
+ dlopen("libload-noobjc2.dylib", RTLD_LAZY);
+ fail("dlopen should not have returned");
+}
+
+int main()
+{
+ semaphore_create(mach_task_self(), &go, SYNC_POLICY_FIFO, 0);
+
+ pthread_t th;
+ pthread_create(&th, nil, &thread, nil);
+
+ // Wait for thread to stop in libload-noobjc2's +load method.
+ semaphore_wait(go);
+
+ // run nooobjc3's constructor function.
+ // There's no objc code here so it shouldn't require the +load lock.
+ void *dlh = dlopen("libload-noobjc3.dylib", RTLD_LAZY);
+ testassert(dlh);
+ testassert(state == 1);
+
+ succeed(__FILE__);
+}
+
+#endif
--- /dev/null
+#include "test.h"
+#if __OBJC2__
+
+extern semaphore_t go;
+
+OBJC_ROOT_CLASS
+@interface noobjc @end
+@implementation noobjc
++(void)load
+{
+ semaphore_signal(go);
+ while (1) sleep(1);
+}
+@end
+
+#endif
--- /dev/null
+#include "test.h"
+
+#if __OBJC2__
+
+extern int state;
+
+__attribute__((constructor))
+static void ctor(void)
+{
+ state = 1;
+}
+
+#endif
-// TEST_CFLAGS -Wno-unused-parameter
+// TEST_CFLAGS -Wno-unused-parameter -Wundeclared-selector
#include "test.h"
#include "testroot.i"
#include <objc/runtime.h>
#include <objc/objc-internal.h>
#include <objc/objc-abi.h>
+#include <simd/simd.h>
+
+// rdar://21694990 simd.h should have a vector_equal(a, b) function
+static bool vector_equal(vector_ulong2 lhs, vector_ulong2 rhs) {
+ return vector_all(lhs == rhs);
+}
#if __arm64__
// no stret dispatchers
// struct stret (*stretmsg0)(id, SEL) __attribute__((unused));
double (*fpmsg0)(id, SEL) __attribute__((unused));
long double (*lfpmsg0)(id, SEL) __attribute__((unused));
+vector_ulong2 (*vecmsg0)(id, SEL) __attribute__((unused));
+#define VEC1 ((vector_ulong2){1, 1})
+#define VEC2 ((vector_ulong2){2, 2})
+#define VEC3 ((vector_ulong2){3, 3})
+#define VEC4 ((vector_ulong2){4, 4})
+#define VEC5 ((vector_ulong2){5, 5})
+#define VEC6 ((vector_ulong2){6, 6})
+#define VEC7 ((vector_ulong2){7, 7})
+#define VEC8 ((vector_ulong2){8, 8})
#define CHECK_ARGS(sel) \
do { \
testassert(self == SELF); \
- testassert(_cmd == sel_registerName(#sel "::::::::::::::::::::::::::::"));\
+ testassert(_cmd == sel_registerName(#sel "::::::::::::::::::::::::::::::::::::"));\
+ testassert(vector_all(v1 == 1)); \
+ testassert(vector_all(v2 == 2)); \
+ testassert(vector_all(v3 == 3)); \
+ testassert(vector_all(v4 == 4)); \
+ testassert(vector_all(v5 == 5)); \
+ testassert(vector_all(v6 == 6)); \
+ testassert(vector_all(v7 == 7)); \
+ testassert(vector_all(v8 == 8)); \
testassert(i1 == 1); \
testassert(i2 == 2); \
testassert(i3 == 3); \
long long LL_RESULT = __LONG_LONG_MAX__ - 2LL*__INT_MAX__;
double FP_RESULT = __DBL_MIN__ + __DBL_EPSILON__;
long double LFP_RESULT = __LDBL_MIN__ + __LDBL_EPSILON__;
+vector_ulong2 VEC_RESULT = { 0x1234567890abcdefULL, 0xfedcba0987654321ULL };
// STRET_RESULT in test.h
static struct stret zero;
};
+@interface Super (Prototypes)
+
+// Method prototypes to pacify -Wundeclared-selector.
+
+-(id)idret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+
+-(long long)llret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+
+-(struct stret)stret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+
+-(double)fpret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+
+-(long double)lfpret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+
+-(vector_ulong2)vecret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15;
+
+@end
+
+
+// Zero all volatile registers.
+#if __cplusplus
+extern "C"
+#endif
+void stomp(void);
+
+#if __x86_64__
+asm("\n .text"
+ "\n .globl _stomp"
+ "\n _stomp:"
+ "\n mov $0, %rax"
+ "\n mov $0, %rcx"
+ "\n mov $0, %rdx"
+ "\n mov $0, %rsi"
+ "\n mov $0, %rdi"
+ "\n mov $0, %r8"
+ "\n mov $0, %r9"
+ "\n mov $0, %r10"
+ "\n mov $0, %r11"
+ "\n xorps %xmm0, %xmm0"
+ "\n xorps %xmm1, %xmm1"
+ "\n xorps %xmm2, %xmm2"
+ "\n xorps %xmm3, %xmm3"
+ "\n xorps %xmm4, %xmm4"
+ "\n xorps %xmm5, %xmm5"
+ "\n xorps %xmm6, %xmm6"
+ "\n xorps %xmm7, %xmm7"
+ "\n xorps %xmm8, %xmm8"
+ "\n xorps %xmm9, %xmm9"
+ "\n xorps %xmm10, %xmm10"
+ "\n xorps %xmm11, %xmm11"
+ "\n xorps %xmm12, %xmm12"
+ "\n xorps %xmm13, %xmm13"
+ "\n xorps %xmm14, %xmm14"
+ "\n xorps %xmm15, %xmm15"
+ "\n ret");
+
+#elif __i386__
+asm("\n .text"
+ "\n .globl _stomp"
+ "\n _stomp:"
+ "\n mov $0, %eax"
+ "\n mov $0, %ecx"
+ "\n mov $0, %edx"
+ "\n xorps %xmm0, %xmm0"
+ "\n xorps %xmm1, %xmm1"
+ "\n xorps %xmm2, %xmm2"
+ "\n xorps %xmm3, %xmm3"
+ "\n xorps %xmm4, %xmm4"
+ "\n xorps %xmm5, %xmm5"
+ "\n xorps %xmm6, %xmm6"
+ "\n xorps %xmm7, %xmm7"
+ "\n ret");
+
+#elif __arm64__
+asm("\n .text"
+ "\n .globl _stomp"
+ "\n _stomp:"
+ "\n mov x0, #0"
+ "\n mov x1, #0"
+ "\n mov x2, #0"
+ "\n mov x3, #0"
+ "\n mov x4, #0"
+ "\n mov x5, #0"
+ "\n mov x6, #0"
+ "\n mov x7, #0"
+ "\n mov x8, #0"
+ "\n mov x9, #0"
+ "\n mov x10, #0"
+ "\n mov x11, #0"
+ "\n mov x12, #0"
+ "\n mov x13, #0"
+ "\n mov x14, #0"
+ "\n mov x15, #0"
+ "\n mov x16, #0"
+ "\n mov x17, #0"
+ "\n movi d0, #0"
+ "\n movi d1, #0"
+ "\n movi d2, #0"
+ "\n movi d3, #0"
+ "\n movi d4, #0"
+ "\n movi d5, #0"
+ "\n movi d6, #0"
+ "\n movi d7, #0"
+ "\n ret"
+ );
+
+#elif __arm__
+asm("\n .text"
+ "\n .globl _stomp"
+ "\n .thumb_func _stomp"
+ "\n _stomp:"
+ "\n mov r0, #0"
+ "\n mov r1, #0"
+ "\n mov r2, #0"
+ "\n mov r3, #0"
+ "\n mov r9, #0"
+ "\n mov r12, #0"
+ "\n vmov.i32 q0, #0"
+ "\n vmov.i32 q1, #0"
+ "\n vmov.i32 q2, #0"
+ "\n vmov.i32 q3, #0"
+ "\n vmov.i32 q4, #0"
+ "\n vmov.i32 q5, #0"
+ "\n vmov.i32 q6, #0"
+ "\n vmov.i32 q7, #0"
+ "\n vmov.i32 q8, #0"
+ "\n vmov.i32 q9, #0"
+ "\n vmov.i32 q10, #0"
+ "\n vmov.i32 q11, #0"
+ "\n vmov.i32 q12, #0"
+ "\n vmov.i32 q13, #0"
+ "\n vmov.i32 q14, #0"
+ "\n vmov.i32 q15, #0"
+ "\n bx lr"
+ );
+
+#else
+# error unknown architecture
+#endif
+
+
@implementation Super
-(struct stret)stret { return STRET_RESULT; }
--(id)idret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+// The IMPL_ methods are not called directly. Instead the non IMPL_ name is
+// called. The resolver function installs the real method. This allows
+// the resolver function to stomp on registers to help test register
+// preservation in the uncached path.
+
++(BOOL) resolveInstanceMethod:(SEL)sel
+{
+ const char *name = sel_getName(sel);
+ if (! strstr(name, "::::::::")) return false;
+
+ testprintf("resolving %s\n", name);
+
+ stomp();
+ char *realName;
+ asprintf(&realName, "IMPL_%s", name);
+ SEL realSel = sel_registerName(realName);
+ free(realName);
+
+ IMP imp = class_getMethodImplementation(self, realSel);
+ if (imp == &_objc_msgForward) return false;
+ return class_addMethod(self, sel, imp, "");
+}
+
+-(id)IMPL_idret:
+(vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(idret);
state = 1;
return ID_RESULT;
}
--(long long)llret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(long long)IMPL_llret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(llret);
state = 2;
return LL_RESULT;
}
--(struct stret)stret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(struct stret)IMPL_stret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(stret);
state = 3;
return STRET_RESULT;
}
--(double)fpret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(double)IMPL_fpret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(fpret);
state = 4;
return FP_RESULT;
}
--(long double)lfpret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(long double)IMPL_lfpret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
CHECK_ARGS(lfpret);
state = 5;
return LFP_RESULT;
}
+-(vector_ulong2)IMPL_vecret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+{
+ CHECK_ARGS(vecret);
+ state = 6;
+ return VEC_RESULT;
+}
+
-(id)idret_noarg
{
return LFP_RESULT;
}
+-(vector_ulong2)vecret_noarg
+{
+ CHECK_ARGS_NOARG(vecret);
+ state = 16;
+ return VEC_RESULT;
+}
+
-(void)voidret_nop
{
return LFP_RESULT;
}
+-(vector_ulong2)vecret_nop
+{
+ return VEC_RESULT;
+}
+
#define STRET_IMP(n) \
+(struct stret_##n)stret_##n##_zero \
{ \
+(id)idret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+idret called instead of -idret");
CHECK_ARGS(idret);
}
+(long long)llret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+llret called instead of -llret");
CHECK_ARGS(llret);
}
+(struct stret)stret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+stret called instead of -stret");
CHECK_ARGS(stret);
}
+(double)fpret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+fpret called instead of -fpret");
CHECK_ARGS(fpret);
}
+(long double)lfpret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
fail("+lfpret called instead of -lfpret");
CHECK_ARGS(lfpret);
CHECK_ARGS_NOARG(lfpret);
}
++(vector_ulong2)vecret_noarg
+{
+ fail("+vecret_noarg called instead of -vecret_noarg");
+ CHECK_ARGS_NOARG(vecret);
+}
+
@end
@implementation Sub
--(id)idret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(id)IMPL_idret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
id result;
CHECK_ARGS(idret);
state = 100;
- result = [super idret:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
+ result = [super idret:v1:v2:v3:v4:v5:v6:v7:v8:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
testassert(state == 1);
testassert(result == ID_RESULT);
state = 101;
return result;
}
--(long long)llret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(long long)IMPL_llret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
long long result;
CHECK_ARGS(llret);
state = 100;
- result = [super llret:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
+ result = [super llret:v1:v2:v3:v4:v5:v6:v7:v8:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
testassert(state == 2);
testassert(result == LL_RESULT);
state = 102;
return result;
}
--(struct stret)stret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(struct stret)IMPL_stret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
struct stret result;
CHECK_ARGS(stret);
state = 100;
- result = [super stret:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
+ result = [super stret:v1:v2:v3:v4:v5:v6:v7:v8:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
testassert(state == 3);
testassert(stret_equal(result, STRET_RESULT));
state = 103;
return result;
}
--(double)fpret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(double)IMPL_fpret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
double result;
CHECK_ARGS(fpret);
state = 100;
- result = [super fpret:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
+ result = [super fpret:v1:v2:v3:v4:v5:v6:v7:v8:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
testassert(state == 4);
testassert(result == FP_RESULT);
state = 104;
return result;
}
--(long double)lfpret:
- (int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+-(long double)IMPL_lfpret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
{
long double result;
CHECK_ARGS(lfpret);
state = 100;
- result = [super lfpret:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
+ result = [super lfpret:v1:v2:v3:v4:v5:v6:v7:v8:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
testassert(state == 5);
testassert(result == LFP_RESULT);
state = 105;
return result;
}
+-(vector_ulong2)IMPL_vecret:
+ (vector_ulong2)v1 :(vector_ulong2)v2 :(vector_ulong2)v3 :(vector_ulong2)v4 :(vector_ulong2)v5 :(vector_ulong2)v6 :(vector_ulong2)v7 :(vector_ulong2)v8 :(int)i1 :(int)i2 :(int)i3 :(int)i4 :(int)i5 :(int)i6 :(int)i7 :(int)i8 :(int)i9 :(int)i10 :(int)i11 :(int)i12 :(int)i13 :(double)f1 :(double)f2 :(double)f3 :(double)f4 :(double)f5 :(double)f6 :(double)f7 :(double)f8 :(double)f9 :(double)f10 :(double)f11 :(double)f12 :(double)f13 :(double)f14 :(double)f15
+{
+ vector_ulong2 result;
+ CHECK_ARGS(vecret);
+ state = 100;
+ result = [super vecret:v1:v2:v3:v4:v5:v6:v7:v8:i1:i2:i3:i4:i5:i6:i7:i8:i9:i10:i11:i12:i13:f1:f2:f3:f4:f5:f6:f7:f8:f9:f10:f11:f12:f13:f14:f15];
+ testassert(state == 6);
+ testassert(vector_equal(result, VEC_RESULT));
+ state = 106;
+ return result;
+}
+
-(id)idret_noarg
{
return result;
}
+-(vector_ulong2)vecret_noarg
+{
+ vector_ulong2 result;
+ CHECK_ARGS_NOARG(vecret);
+ state = 100;
+ result = [super vecret_noarg];
+ testassert(state == 16);
+ testassert(vector_equal(result, VEC_RESULT));
+ state = 116;
+ return result;
+}
+
@end
void test_dw(const char *name, id sub, id tagged, bool stret,
int uncaughtAllowed)
{
- SEL sel = @selector(a);
testprintf("DWARF FOR %s%s\n", name, stret ? " (stret)" : "");
// sel_registerName() never returns those alignments because they
// differ from malloc's alignment. So we create lots of compiled-in
// SELs here and hope something fits.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wundeclared-selector"
+ SEL sel = @selector(a);
SEL lotsOfSels[] = {
@selector(a1), @selector(a2), @selector(a3), @selector(a4),
@selector(a5), @selector(a6), @selector(a7), @selector(a8),
@selector(CCCa), @selector(CCCb), @selector(CCCc), @selector(CCCd),
@selector(CCCe), @selector(CCCf), @selector(CCCg), @selector(CCCh),
};
+#pragma clang diagnostic pop
+
+ {
+ IMP imp = stret ? (IMP)test_dw_forward_stret : (IMP)test_dw_forward;
+ Class cls = object_getClass(sub);
+ Class tagcls = object_getClass(tagged);
+ class_replaceMethod(cls, sel, imp, "");
+ class_replaceMethod(tagcls, sel, imp, "");
+ for (size_t i = 0; i < sizeof(lotsOfSels)/sizeof(lotsOfSels[0]); i++) {
+ class_replaceMethod(cls, lotsOfSels[i], imp, "");
+ class_replaceMethod(tagcls, lotsOfSels[i], imp, "");
+ }
+ }
+
#define ALIGNCOUNT 16
SEL sels[ALIGNCOUNT][2] = {{0}};
for (int align = 0; align < ALIGNCOUNT; align++) {
// implementation's cache scan direction
_objc_flush_caches(cache_cls);
- for (int x2 = 0; x2 < 1; x2++) {
+ for (int x2 = 0; x2 < 8; x2++) {
for (int s = 0; s < 4; s++) {
int align = (a+s) % ALIGNCOUNT;
CALLIT(sub_arg, sels[align][0], sels[align][0], fn, stret);
}
_objc_flush_caches(cache_cls);
- for (int x2 = 0; x2 < 1; x2++) {
+ for (int x2 = 0; x2 < 8; x2++) {
for (int s = 0; s < 4; s++) {
int align = abs(a-s) % ALIGNCOUNT;
CALLIT(sub_arg, sels[align][0], sels[align][0], fn, stret);
struct stret stretval;
double fpval;
long double lfpval;
+ vector_ulong2 vecval;
// message uncached
// message uncached long long
testprintf("idret\n");
state = 0;
idval = nil;
- idval = [receiver idret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ idval = [receiver idret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 101);
testassert(idval == ID_RESULT);
testprintf("llret\n");
llval = 0;
- llval = [receiver llret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ llval = [receiver llret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 102);
testassert(llval == LL_RESULT);
testprintf("stret\n");
stretval = zero;
- stretval = [receiver stret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ stretval = [receiver stret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 103);
testassert(stret_equal(stretval, STRET_RESULT));
testprintf("fpret\n");
fpval = 0;
- fpval = [receiver fpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ fpval = [receiver fpret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 104);
testassert(fpval == FP_RESULT);
testprintf("lfpret\n");
lfpval = 0;
- lfpval = [receiver lfpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ lfpval = [receiver lfpret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 105);
testassert(lfpval == LFP_RESULT);
+
+ testprintf("vecret\n");
+ vecval = 0;
+ vecval = [receiver vecret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ testassert(state == 106);
+ testassert(vector_equal(vecval, VEC_RESULT));
#if __OBJC2__
// explicitly call noarg messenger, even if compiler doesn't emit it
testassert(state == 111);
testassert(idval == ID_RESULT);
- llval = 0;
testprintf("llret noarg\n");
+ llval = 0;
llval = ((typeof(llmsg0))objc_msgSend_noarg)(receiver, @selector(llret_noarg));
testassert(state == 112);
testassert(llval == LL_RESULT);
no objc_msgSend_stret_noarg
stretval = zero;
stretval = ((typeof(stretmsg0))objc_msgSend_stret_noarg)(receiver, @selector(stret_noarg));
- stretval = [receiver stret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ stretval = [receiver stret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 113);
testassert(stret_equal(stretval, STRET_RESULT));
*/
fpval = ((typeof(fpmsg0))objc_msgSend_noarg)(receiver, @selector(fpret_noarg));
testassert(state == 114);
testassert(fpval == FP_RESULT);
+
+ testprintf("vecret noarg\n");
+ vecval = 0;
+ vecval = ((typeof(vecmsg0))objc_msgSend_noarg)(receiver, @selector(vecret_noarg));
+ testassert(state == 116);
+ testassert(vector_equal(vecval, VEC_RESULT));
# endif
# if !__i386__ && !__x86_64__
testprintf("lfpret noarg\n");
struct stret stretval;
double fpval;
long double lfpval;
+ vector_ulong2 vecval;
#if __x86_64__
struct stret *stretptr;
Method stretmethod;
Method fpmethod;
Method lfpmethod;
-
- id (*idfn)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
- long long (*llfn)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
- struct stret (*stretfn)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
- double (*fpfn)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
- long double (*lfpfn)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
-
- id (*idmsg)(id, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
- id (*idmsgsuper)(struct objc_super *, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
- long long (*llmsg)(id, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
- struct stret (*stretmsg)(id, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
- struct stret (*stretmsgsuper)(struct objc_super *, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
- double (*fpmsg)(id, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
- long double (*lfpmsg)(id, SEL, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ Method vecmethod;
+
+ id (*idfn)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
+ long long (*llfn)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
+ struct stret (*stretfn)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
+ double (*fpfn)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
+ long double (*lfpfn)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
+ vector_ulong2 (*vecfn)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double);
+
+ id (*idmsg)(id, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ id (*idmsgsuper)(struct objc_super *, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ long long (*llmsg)(id, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ struct stret (*stretmsg)(id, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ struct stret (*stretmsgsuper)(struct objc_super *, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ double (*fpmsg)(id, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ long double (*lfpmsg)(id, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
+ vector_ulong2 (*vecmsg)(id, SEL, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double) __attribute__((unused));
// get +initialize out of the way
[Sub class];
test_basic(tagged);
#endif
- idmethod = class_getInstanceMethod([Super class], @selector(idret::::::::::::::::::::::::::::));
+ idmethod = class_getInstanceMethod([Super class], @selector(idret::::::::::::::::::::::::::::::::::::));
testassert(idmethod);
- llmethod = class_getInstanceMethod([Super class], @selector(llret::::::::::::::::::::::::::::));
+ llmethod = class_getInstanceMethod([Super class], @selector(llret::::::::::::::::::::::::::::::::::::));
testassert(llmethod);
- stretmethod = class_getInstanceMethod([Super class], @selector(stret::::::::::::::::::::::::::::));
+ stretmethod = class_getInstanceMethod([Super class], @selector(stret::::::::::::::::::::::::::::::::::::));
testassert(stretmethod);
- fpmethod = class_getInstanceMethod([Super class], @selector(fpret::::::::::::::::::::::::::::));
+ fpmethod = class_getInstanceMethod([Super class], @selector(fpret::::::::::::::::::::::::::::::::::::));
testassert(fpmethod);
- lfpmethod = class_getInstanceMethod([Super class], @selector(lfpret::::::::::::::::::::::::::::));
+ lfpmethod = class_getInstanceMethod([Super class], @selector(lfpret::::::::::::::::::::::::::::::::::::));
testassert(lfpmethod);
+ vecmethod = class_getInstanceMethod([Super class], @selector(vecret::::::::::::::::::::::::::::::::::::));
+ testassert(vecmethod);
- idfn = (id (*)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
- llfn = (long long (*)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
- stretfn = (struct stret (*)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke_stret;
- fpfn = (double (*)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
- lfpfn = (long double (*)(id, Method, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
+ idfn = (id (*)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
+ llfn = (long long (*)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
+ stretfn = (struct stret (*)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke_stret;
+ fpfn = (double (*)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
+ lfpfn = (long double (*)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
+ vecfn = (vector_ulong2 (*)(id, Method, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, vector_ulong2, int, int, int, int, int, int, int, int, int, int, int, int, int, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double)) method_invoke;
// cached message performance
// catches failure to cache or (abi=2) failure to fixup (#5584187)
[sub stret_nop];
[sub fpret_nop];
[sub lfpret_nop];
+ [sub vecret_nop];
[sub voidret_nop];
[sub voidret_nop2];
[sub llret_nop];
[sub stret_nop];
[sub fpret_nop];
[sub lfpret_nop];
+ [sub vecret_nop];
[sub voidret_nop];
[sub voidret_nop2];
[sub llret_nop];
[sub stret_nop];
[sub fpret_nop];
[sub lfpret_nop];
+ [sub vecret_nop];
// Some of these times have high variance on some compilers.
// The errors we're trying to catch should be catastrophically slow,
}
totalTime = mach_absolute_time() - startTime;
timecheck("stret ", totalTime, targetTime * 0.7, targetTime * 5.0);
-
+
startTime = mach_absolute_time();
ALIGN_();
for (i = 0; i < COUNT; i++) {
}
totalTime = mach_absolute_time() - startTime;
timecheck("fpret ", totalTime, targetTime * 0.7, targetTime * 4.0);
-
+
startTime = mach_absolute_time();
ALIGN_();
for (i = 0; i < COUNT; i++) {
totalTime = mach_absolute_time() - startTime;
timecheck("lfpret", totalTime, targetTime * 0.7, targetTime * 4.0);
+ startTime = mach_absolute_time();
+ ALIGN_();
+ for (i = 0; i < COUNT; i++) {
+ [sub vecret_nop];
+ }
+ totalTime = mach_absolute_time() - startTime;
+ timecheck("vecret", totalTime, targetTime * 0.7, targetTime * 4.0);
+
#if __arm64__
// Removing this testwarn(), or changing voidret_nop to nop;ret,
// changes the voidret_nop and stret_nop times above by a factor of 2.
state = 0;
idval = nil;
- idval = (*idfn)(sup, idmethod, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ idval = (*idfn)(sup, idmethod, VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 1);
testassert(idval == ID_RESULT);
-
+
llval = 0;
- llval = (*llfn)(sup, llmethod, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ llval = (*llfn)(sup, llmethod, VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 2);
testassert(llval == LL_RESULT);
-
+
stretval = zero;
- stretval = (*stretfn)(sup, stretmethod, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ stretval = (*stretfn)(sup, stretmethod, VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 3);
testassert(stret_equal(stretval, STRET_RESULT));
-
+
fpval = 0;
- fpval = (*fpfn)(sup, fpmethod, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ fpval = (*fpfn)(sup, fpmethod, VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 4);
testassert(fpval == FP_RESULT);
-
+
lfpval = 0;
- lfpval = (*lfpfn)(sup, lfpmethod, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ lfpval = (*lfpfn)(sup, lfpmethod, VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 5);
testassert(lfpval == LFP_RESULT);
+ vecval = 0;
+ vecval = (*vecfn)(sup, vecmethod, VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ testassert(state == 6);
+ testassert(vector_equal(vecval, VEC_RESULT));
+
// message to nil
// message to nil long long
state = 0;
idval = ID_RESULT;
- idval = [(id)NIL_RECEIVER idret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ idval = [(id)NIL_RECEIVER idret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(idval == nil);
state = 0;
llval = LL_RESULT;
- llval = [(id)NIL_RECEIVER llret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ llval = [(id)NIL_RECEIVER llret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(llval == 0LL);
state = 0;
stretval = zero;
- stretval = [(id)NIL_RECEIVER stret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ stretval = [(id)NIL_RECEIVER stret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
#if __clang__
testassert(0 == memcmp(&stretval, &zero, sizeof(stretval)));
state = 0;
fpval = FP_RESULT;
- fpval = [(id)NIL_RECEIVER fpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ fpval = [(id)NIL_RECEIVER fpret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(fpval == 0.0);
state = 0;
lfpval = LFP_RESULT;
- lfpval = [(id)NIL_RECEIVER lfpret :1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ lfpval = [(id)NIL_RECEIVER lfpret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
testassert(state == 0);
testassert(lfpval == 0.0);
+
+ state = 0;
+ vecval = VEC_RESULT;
+ vecval = [(id)NIL_RECEIVER vecret :VEC1:VEC2:VEC3:VEC4:VEC5:VEC6:VEC7:VEC8:1:2:3:4:5:6:7:8:9:10:11:12:13:1.0:2.0:3.0:4.0:5.0:6.0:7.0:8.0:9.0:10.0:11.0:12.0:13.0:14.0:15.0];
+ testassert(state == 0);
+ testassert(vector_all(vecval == 0));
// message to nil, different struct types
// This verifies that ordinary objc_msgSend() erases enough registers
fpval = ((typeof(fpmsg0))objc_msgSend_noarg)(nil, @selector(fpret_noarg));
testassert(state == 0);
testassert(fpval == 0.0);
+
+ state = 0;
+ vecval = VEC_RESULT;
+ vecval = ((typeof(vecmsg0))objc_msgSend_noarg)(nil, @selector(vecret_noarg));
+ testassert(state == 0);
+ testassert(vector_all(vecval == 0));
# endif
# if !__i386__ && !__x86_64__
state = 0;
state = 100;
idval = nil;
- idval = ((id(*)(struct objc_super *, SEL, int,int,int,int,int,int,int,int,int,int,int,int,int, double,double,double,double,double,double,double,double,double,double,double,double,double,double,double))objc_msgSendSuper2) (&sup_st, @selector(idret::::::::::::::::::::::::::::), 1,2,3,4,5,6,7,8,9,10,11,12,13, 1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0);
+ idval = ((id(*)(struct objc_super *, SEL, vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2, int,int,int,int,int,int,int,int,int,int,int,int,int, double,double,double,double,double,double,double,double,double,double,double,double,double,double,double))objc_msgSendSuper2) (&sup_st, @selector(idret::::::::::::::::::::::::::::::::::::), VEC1,VEC2,VEC3,VEC4,VEC5,VEC6,VEC7,VEC8, 1,2,3,4,5,6,7,8,9,10,11,12,13, 1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0);
testassert(state == 1);
testassert(idval == ID_RESULT);
testassert(sup_st.receiver == sub);
state = 100;
stretval = zero;
- stretval = ((struct stret(*)(struct objc_super *, SEL, int,int,int,int,int,int,int,int,int,int,int,int,int, double,double,double,double,double,double,double,double,double,double,double,double,double,double,double))objc_msgSendSuper2_stret) (&sup_st, @selector(stret::::::::::::::::::::::::::::), 1,2,3,4,5,6,7,8,9,10,11,12,13, 1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0);
+ stretval = ((struct stret(*)(struct objc_super *, SEL, vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2,vector_ulong2, int,int,int,int,int,int,int,int,int,int,int,int,int, double,double,double,double,double,double,double,double,double,double,double,double,double,double,double))objc_msgSendSuper2_stret) (&sup_st, @selector(stret::::::::::::::::::::::::::::::::::::), VEC1,VEC2,VEC3,VEC4,VEC5,VEC6,VEC7,VEC8, 1,2,3,4,5,6,7,8,9,10,11,12,13, 1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0);
testassert(state == 3);
testassert(stret_equal(stretval, STRET_RESULT));
testassert(sup_st.receiver == sub);
state = 0;
idmsg = (typeof(idmsg))objc_msgSend_debug;
idval = nil;
- idval = (*idmsg)(sub, @selector(idret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ idval = (*idmsg)(sub, @selector(idret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 101);
testassert(idval == ID_RESULT);
state = 0;
llmsg = (typeof(llmsg))objc_msgSend_debug;
llval = 0;
- llval = (*llmsg)(sub, @selector(llret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ llval = (*llmsg)(sub, @selector(llret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 102);
testassert(llval == LL_RESULT);
state = 0;
stretmsg = (typeof(stretmsg))objc_msgSend_stret_debug;
stretval = zero;
- stretval = (*stretmsg)(sub, @selector(stret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ stretval = (*stretmsg)(sub, @selector(stret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 103);
testassert(stret_equal(stretval, STRET_RESULT));
sup_st.super_class = object_getClass(sub);
idmsgsuper = (typeof(idmsgsuper))objc_msgSendSuper2_debug;
idval = nil;
- idval = (*idmsgsuper)(&sup_st, @selector(idret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ idval = (*idmsgsuper)(&sup_st, @selector(idret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 1);
testassert(idval == ID_RESULT);
sup_st.super_class = object_getClass(sub);
stretmsgsuper = (typeof(stretmsgsuper))objc_msgSendSuper2_stret_debug;
stretval = zero;
- stretval = (*stretmsgsuper)(&sup_st, @selector(stret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ stretval = (*stretmsgsuper)(&sup_st, @selector(stret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 3);
testassert(stret_equal(stretval, STRET_RESULT));
state = 0;
fpmsg = (typeof(fpmsg))objc_msgSend_fpret_debug;
fpval = 0;
- fpval = (*fpmsg)(sub, @selector(fpret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ fpval = (*fpmsg)(sub, @selector(fpret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 104);
testassert(fpval == FP_RESULT);
#endif
state = 0;
lfpmsg = (typeof(lfpmsg))objc_msgSend_fpret_debug;
lfpval = 0;
- lfpval = (*lfpmsg)(sub, @selector(lfpret::::::::::::::::::::::::::::), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
+ lfpval = (*lfpmsg)(sub, @selector(lfpret::::::::::::::::::::::::::::::::::::), VEC1, VEC2, VEC3, VEC4, VEC5, VEC6, VEC7, VEC8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0);
testassert(state == 105);
testassert(lfpval == LFP_RESULT);
objc_setForwardHandler((void*)test_dw_forward, (void*)test_dw_forward_stret);
# if __x86_64__
- test_dw("objc_msgSend", dw, tagged, false, 2);
- test_dw("objc_msgSend_stret", dw, tagged, true, 4);
- test_dw("objc_msgSend_fpret", dw, tagged, false, 2);
- test_dw("objc_msgSend_fp2ret", dw, tagged, false, 2);
- test_dw("objc_msgSendSuper", dw, tagged, false, 2);
- test_dw("objc_msgSendSuper2", dw, tagged, false, 2);
- test_dw("objc_msgSendSuper_stret", dw, tagged, true, 4);
- test_dw("objc_msgSendSuper2_stret", dw, tagged, true, 4);
+ test_dw("objc_msgSend", dw, tagged, false, 0);
+ test_dw("objc_msgSend_stret", dw, tagged, true, 0);
+ test_dw("objc_msgSend_fpret", dw, tagged, false, 0);
+ test_dw("objc_msgSend_fp2ret", dw, tagged, false, 0);
+ test_dw("objc_msgSendSuper", dw, tagged, false, 0);
+ test_dw("objc_msgSendSuper2", dw, tagged, false, 0);
+ test_dw("objc_msgSendSuper_stret", dw, tagged, true, 0);
+ test_dw("objc_msgSendSuper2_stret", dw, tagged, true, 0);
# elif __i386__
- test_dw("objc_msgSend", dw, dw, false, 10);
- test_dw("objc_msgSend_stret", dw, dw, true, 10);
- test_dw("objc_msgSend_fpret", dw, dw, false, 10);
- test_dw("objc_msgSendSuper", dw, dw, false, 10);
- test_dw("objc_msgSendSuper2", dw, dw, false, 10);
- test_dw("objc_msgSendSuper_stret", dw, dw, true, 10);
- test_dw("objc_msgSendSuper2_stret", dw, dw, true, 10);
+ test_dw("objc_msgSend", dw, dw, false, 0);
+ test_dw("objc_msgSend_stret", dw, dw, true, 0);
+ test_dw("objc_msgSend_fpret", dw, dw, false, 0);
+ test_dw("objc_msgSendSuper", dw, dw, false, 0);
+ test_dw("objc_msgSendSuper2", dw, dw, false, 0);
+ test_dw("objc_msgSendSuper_stret", dw, dw, true, 0);
+ test_dw("objc_msgSendSuper2_stret", dw, dw, true, 0);
# elif __arm64__
- test_dw("objc_msgSend", dw, tagged, false, 2);
- test_dw("objc_msgSendSuper", dw, tagged, false, 2);
- test_dw("objc_msgSendSuper2", dw, tagged, false, 2);
+ test_dw("objc_msgSend", dw, tagged, false, 1);
+ test_dw("objc_msgSendSuper", dw, tagged, false, 1);
+ test_dw("objc_msgSendSuper2", dw, tagged, false, 1);
# else
# error unknown architecture
# endif
#if SUPPORT_NONPOINTER_ISA
# if __x86_64__
-# define RC_ONE (1ULL<<50)
+# define RC_ONE (1ULL<<56)
# elif __arm64__
# define RC_ONE (1ULL<<45)
# else
#include <objc/NSObject.h>
@interface Test : NSObject {
- char bytes[16-sizeof(void*)];
+@public
+ char bytes[32-sizeof(void*)];
}
@end
@implementation Test
int main()
{
- id o1 = [Test new];
- id o2 = object_copy(o1, 16);
- testassert(malloc_size(o1) == 16);
+ Test *o0 = [Test new];
+ [o0 retain];
+ Test *o1 = class_createInstance([Test class], 32);
+ [o1 retain];
+ id o2 = object_copy(o0, 0);
+ id o3 = object_copy(o1, 0);
+ id o4 = object_copy(o1, 32);
+ testassert(malloc_size(o0) == 32);
+ testassert(malloc_size(o1) == 64);
testassert(malloc_size(o2) == 32);
+ testassert(malloc_size(o3) == 32);
+ testassert(malloc_size(o4) == 64);
+ if (!objc_collecting_enabled()) {
+ testassert([o0 retainCount] == 2);
+ testassert([o1 retainCount] == 2);
+ testassert([o2 retainCount] == 1);
+ testassert([o3 retainCount] == 1);
+ testassert([o4 retainCount] == 1);
+ }
succeed(__FILE__);
}
--- /dev/null
+/*
+TEST_CFLAGS -Xlinker -sectcreate -Xlinker __DATA -Xlinker __objc_rawisa -Xlinker /dev/null
+TEST_ENV OBJC_PRINT_RAW_ISA=YES
+
+TEST_RUN_OUTPUT
+objc\[\d+\]: RAW ISA: disabling non-pointer isa because the app has a __DATA,__objc_rawisa section
+(.* RAW ISA: .*\n)*
+OK: rawisa.m
+OR
+(.* RAW ISA: .*\n)*
+no __DATA,__rawisa support
+OK: rawisa.m
+END
+*/
+
+#include "test.h"
+
+int main()
+{
+ fprintf(stderr, "\n");
+#if ! (SUPPORT_NONPOINTER_ISA && TARGET_OS_MAC && !TARGET_OS_IPHONE)
+ // only 64-bit Mac supports this
+ fprintf(stderr, "no __DATA,__rawisa support\n");
+#endif
+ succeed(__FILE__);
+}
+
int main()
{
- succeed(__FILE__)
+ succeed(__FILE__);
}
#else
// TEST_CFLAGS -Os
#include "test.h"
+#include "testroot.i"
#if __i386__
#include <objc/objc-abi.h>
#include <Foundation/Foundation.h>
-static int did_dealloc;
+@interface TestObject : TestRoot @end
+@implementation TestObject @end
-@interface TestObject : NSObject
-@end
-@implementation TestObject
--(void)dealloc
-{
- did_dealloc = 1;
- [super dealloc];
-}
-@end
-// rdar://9319305 clang transforms objc_retainAutoreleasedReturnValue()
-// into objc_retain() sometimes
-extern id objc_retainAutoreleasedReturnValue(id obj) __asm__("_objc_retainAutoreleasedReturnValue");
+#ifdef __arm__
+# define MAGIC asm volatile("mov r7, r7")
+# define NOT_MAGIC asm volatile("mov r6, r6")
+#elif __arm64__
+# define MAGIC asm volatile("mov x29, x29")
+# define NOT_MAGIC asm volatile("mov x28, x28")
+#elif __x86_64__
+# define MAGIC asm volatile("")
+# define NOT_MAGIC asm volatile("nop")
+#else
+# error unknown architecture
+#endif
+
int
main()
PUSH_POOL {
TestObject *warm_up = [[TestObject alloc] init];
testassert(warm_up);
- warm_up = objc_retainAutoreleasedReturnValue(_objc_rootAutorelease(warm_up));
+ warm_up = objc_retainAutoreleasedReturnValue(warm_up);
+ warm_up = objc_unsafeClaimAutoreleasedReturnValue(warm_up);
[warm_up release];
warm_up = nil;
} POP_POOL;
#endif
- testprintf("Successful return autorelease handshake\n");
+ testprintf(" Successful +1 -> +1 handshake\n");
PUSH_POOL {
obj = [[TestObject alloc] init];
testassert(obj);
- did_dealloc = 0;
- tmp = _objc_rootAutorelease(obj);
-#ifdef __arm__
- asm volatile("mov r7, r7");
-#elif __arm64__
- asm volatile("mov fp, fp");
-#elif __x86_64__
- // nothing to do
-#else
-#error unknown architecture
-#endif
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_autoreleaseReturnValue(obj);
+ MAGIC;
tmp = objc_retainAutoreleasedReturnValue(tmp);
- testassert(!did_dealloc);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
- did_dealloc = 0;
[tmp release];
- testassert(did_dealloc);
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+ testprintf("Unsuccessful +1 -> +1 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_autoreleaseReturnValue(obj);
+ NOT_MAGIC;
+ tmp = objc_retainAutoreleasedReturnValue(tmp);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 1);
- did_dealloc = 0;
+ [tmp release];
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 1);
+
} POP_POOL;
- testassert(!did_dealloc);
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 1);
+
+
+ testprintf(" Successful +0 -> +1 handshake\n");
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_retainAutoreleaseReturnValue(obj);
+ MAGIC;
+ tmp = objc_retainAutoreleasedReturnValue(tmp);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
+
+ [tmp release];
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ [tmp release];
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
- testprintf("Failed return autorelease handshake\n");
+ testprintf("Unsuccessful +0 -> +1 handshake\n");
PUSH_POOL {
obj = [[TestObject alloc] init];
testassert(obj);
- did_dealloc = 0;
- tmp = _objc_rootAutorelease(obj);
-#ifdef __arm__
- asm volatile("mov r6, r6");
-#elif __arm64__
- asm volatile("mov x6, x6");
-#elif __x86_64__
- asm volatile("mov %rdi, %rdi");
-#else
-#error unknown architecture
-#endif
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_retainAutoreleaseReturnValue(obj);
+ NOT_MAGIC;
tmp = objc_retainAutoreleasedReturnValue(tmp);
- testassert(!did_dealloc);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 2);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 1);
- did_dealloc = 0;
[tmp release];
- testassert(!did_dealloc);
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 2);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 1);
+
+ [tmp release];
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 2);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 1);
+
+ } POP_POOL;
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 2);
+ testassert(TestRootRelease == 3);
+ testassert(TestRootAutorelease == 1);
+
+
+ testprintf(" Successful +1 -> +0 handshake\n");
+
+ PUSH_POOL {
+ obj = [[[TestObject alloc] init] retain];
+ testassert(obj);
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_autoreleaseReturnValue(obj);
+ MAGIC;
+ tmp = objc_unsafeClaimAutoreleasedReturnValue(tmp);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
- did_dealloc = 0;
+ [tmp release];
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+ testprintf("Unsuccessful +1 -> +0 handshake\n");
+
+ PUSH_POOL {
+ obj = [[[TestObject alloc] init] retain];
+ testassert(obj);
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_autoreleaseReturnValue(obj);
+ NOT_MAGIC;
+ tmp = objc_unsafeClaimAutoreleasedReturnValue(tmp);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 1);
+
+ [tmp release];
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 1);
+
} POP_POOL;
- testassert(did_dealloc);
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 1);
+ testprintf(" Successful +0 -> +0 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_retainAutoreleaseReturnValue(obj);
+ MAGIC;
+ tmp = objc_unsafeClaimAutoreleasedReturnValue(tmp);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
+
+ [tmp release];
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+ testprintf("Unsuccessful +0 -> +0 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ tmp = objc_retainAutoreleaseReturnValue(obj);
+ NOT_MAGIC;
+ tmp = objc_unsafeClaimAutoreleasedReturnValue(tmp);
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 1);
+
+ [tmp release];
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 1);
+
+ } POP_POOL;
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 1);
+
succeed(__FILE__);
return 0;
--- /dev/null
+// TEST_CFLAGS -Os -framework Foundation
+// TEST_DISABLED pending clang support for rdar://20530049
+
+#include "test.h"
+#include "testroot.i"
+
+#if __i386__
+
+int main()
+{
+ // no optimization on i386 (neither Mac nor Simulator)
+ succeed(__FILE__);
+}
+
+#else
+
+#include <objc/objc-internal.h>
+#include <objc/objc-abi.h>
+#include <Foundation/Foundation.h>
+
+@interface TestObject : TestRoot @end
+@implementation TestObject @end
+
+
+#ifdef __arm__
+# define MAGIC asm volatile("mov r7, r7")
+# define NOT_MAGIC asm volatile("mov r6, r6")
+#elif __arm64__
+# define MAGIC asm volatile("mov x29, x29")
+# define NOT_MAGIC asm volatile("mov x28, x28")
+#elif __x86_64__
+# define MAGIC asm volatile("")
+# define NOT_MAGIC asm volatile("nop")
+#else
+# error unknown architecture
+#endif
+
+
+@interface Tester : NSObject @end
+@implementation Tester {
+@public
+ id ivar;
+}
+
+-(id) return0 {
+ return ivar;
+}
+-(id) return1 {
+ id x = ivar;
+ [x self];
+ return x;
+}
+
+@end
+
+OBJC_EXPORT
+id
+objc_retainAutoreleasedReturnValue(id obj)
+ __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0);
+
+// Accept a value returned through a +0 autoreleasing convention for use at +0.
+OBJC_EXPORT
+id
+objc_unsafeClaimAutoreleasedReturnValue(id obj)
+ __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0);
+
+
+int
+main()
+{
+ TestObject *obj;
+ Tester *tt = [Tester new];
+
+#ifdef __x86_64__
+ // need to get DYLD to resolve the stubs on x86
+ PUSH_POOL {
+ TestObject *warm_up = [[TestObject alloc] init];
+ testassert(warm_up);
+ warm_up = objc_retainAutoreleasedReturnValue(warm_up);
+ warm_up = objc_unsafeClaimAutoreleasedReturnValue(warm_up);
+ warm_up = nil;
+ } POP_POOL;
+#endif
+
+ testprintf(" Successful +1 -> +1 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+ tt->ivar = obj;
+ obj = nil;
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ TestObject *tmp = [tt return1];
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
+
+ tt->ivar = nil;
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ tmp = nil;
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+ testprintf(" Successful +0 -> +0 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+ tt->ivar = obj;
+ obj = nil;
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ __unsafe_unretained TestObject *tmp = [tt return0];
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
+
+ tmp = nil;
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
+
+ tt->ivar = nil;
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 0);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+
+ testprintf(" Successful +1 -> +0 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+ tt->ivar = obj;
+ obj = nil;
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ __unsafe_unretained TestObject *tmp = [tt return1];
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ tmp = nil;
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ tt->ivar = nil;
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+
+ testprintf(" Successful +0 -> +1 handshake\n");
+
+ PUSH_POOL {
+ obj = [[TestObject alloc] init];
+ testassert(obj);
+ tt->ivar = obj;
+ obj = nil;
+
+ TestRootRetain = 0;
+ TestRootRelease = 0;
+ TestRootAutorelease = 0;
+ TestRootDealloc = 0;
+
+ TestObject *tmp = [tt return0];
+
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 0);
+ testassert(TestRootAutorelease == 0);
+
+ tmp = nil;
+ testassert(TestRootDealloc == 0);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 1);
+ testassert(TestRootAutorelease == 0);
+
+ tt->ivar = nil;
+ testassert(TestRootDealloc == 1);
+ testassert(TestRootRetain == 1);
+ testassert(TestRootRelease == 2);
+ testassert(TestRootAutorelease == 0);
+
+ } POP_POOL;
+
+
+
+ succeed(__FILE__);
+
+ return 0;
+}
+
+
+#endif
--- /dev/null
+// Test OBJC_DEBUG_POOL_ALLOCATION (which is also enabled by MallocStackLogging)
+
+// TEST_ENV OBJC_DEBUG_POOL_ALLOCATION=YES
+// TEST_CFLAGS -framework Foundation
+// TEST_CONFIG MEM=mrc
+
+#include "test.h"
+
+#define FOUNDATION 0
+#define NAME "rr-autorelease-stacklogging"
+
+#include "rr-autorelease2.m"
testassert(state == 1);
}
+
+ // Autorelease with no pool.
+ testprintf("-- Autorelease with no pool.\n");
+ {
+ state = 0;
+ testonthread(^{
+ RR_AUTORELEASE([[Deallocator alloc] init]);
+ });
+ testassert(state == 1);
+ }
+
+ // Autorelease with no pool after popping the top-level pool.
+ testprintf("-- Autorelease with no pool after popping the last pool.\n");
+ {
+ state = 0;
+ testonthread(^{
+ void *pool = RR_PUSH();
+ RR_AUTORELEASE([[Deallocator alloc] init]);
+ RR_POP(pool);
+ RR_AUTORELEASE([[Deallocator alloc] init]);
+ });
+ testassert(state == 2);
+ }
+
// Top-level thread pool not popped.
// The runtime should clean it up.
#if FOUNDATION
testassert(state == 1);
}
#endif
-
-
-#if !FOUNDATION
- // NSThread calls NSPopAutoreleasePool(0)
- // rdar://9167170 but that currently breaks CF
- {
- static bool warned;
- if (!warned) testwarn("rdar://9167170 ignore NSPopAutoreleasePool(0)");
- warned = true;
- }
- /*
- testprintf("-- pop(0).\n");
- {
- RR_PUSH();
- state = 0;
- RR_AUTORELEASE([[AutoreleaseDuringDealloc alloc] init]);
- testassert(state == 0);
- RR_POP(0);
- testassert(state == 2);
- }
- */
-#endif
}
}
#endif
-
- for (int i = 0; i < 100; i++) {
- cycle();
+ // preheat
+ {
+ for (int i = 0; i < 100; i++) {
+ cycle();
+ }
+
+ slow_cycle();
}
-
- slow_cycle();
- leak_mark();
-
- for (int i = 0; i < 1000; i++) {
- cycle();
+ // check for leaks using top-level pools
+ {
+ leak_mark();
+
+ for (int i = 0; i < 1000; i++) {
+ cycle();
+ }
+
+ leak_check(0);
+
+ slow_cycle();
+
+ leak_check(0);
}
-
- leak_check(0);
-
- slow_cycle();
-
- leak_check(0);
-
+
+ // check for leaks using pools not at top level
+ void *pool = RR_PUSH();
+ {
+ leak_mark();
+
+ for (int i = 0; i < 1000; i++) {
+ cycle();
+ }
+
+ leak_check(0);
+
+ slow_cycle();
+
+ leak_check(0);
+ }
+ RR_POP(pool);
// NSThread.
// Can't leak check this because it's too noisy.
--- /dev/null
+// TEST_CFLAGS -framework Foundation
+// TEST_CONFIG MEM=mrc ARCH=x86_64
+
+// Stress-test nonpointer isa's side table retain count transfers.
+
+// x86_64 only. arm64's side table limit is high enough that bugs
+// are harder to reproduce.
+
+#include "test.h"
+#import <Foundation/Foundation.h>
+
+#define OBJECTS 1
+#define LOOPS 256
+#define THREADS 16
+#if __x86_64__
+# define RC_HALF (1ULL<<7)
+#else
+# error sorry
+#endif
+#define RC_DELTA RC_HALF
+
+static bool Deallocated = false;
+@interface Deallocator : NSObject @end
+@implementation Deallocator
+-(void)dealloc {
+ Deallocated = true;
+ [super dealloc];
+}
+@end
+
+// This is global to avoid extra retains by the dispatch block objects.
+static Deallocator *obj;
+
+int main() {
+ dispatch_queue_t queue =
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
+
+ for (size_t i = 0; i < OBJECTS; i++) {
+ obj = [Deallocator new];
+
+ dispatch_apply(THREADS, queue, ^(size_t i __unused) {
+ for (size_t a = 0; a < LOOPS; a++) {
+ for (size_t b = 0; b < RC_DELTA; b++) {
+ [obj retain];
+ }
+ for (size_t b = 0; b < RC_DELTA; b++) {
+ [obj release];
+ }
+ }
+ });
+
+ testassert(!Deallocated);
+ [obj release];
+ testassert(Deallocated);
+ Deallocated = false;
+ }
+
+ succeed(__FILE__);
+}
#endif
#define SwiftV1MangledName "_TtC6Module12SwiftV1Class"
+#define SwiftV1MangledName2 "_TtC2Sw13SwiftV1Class2"
+#define SwiftV1MangledName3 "_TtCSs13SwiftV1Class3"
+#define SwiftV1MangledName4 "_TtC6Swiftt13SwiftV1Class4"
#if TEST_SWIFT
__attribute__((objc_runtime_name(SwiftV1MangledName)))
@interface SwiftV1Class : TestRoot @end
@implementation SwiftV1Class @end
+
+__attribute__((objc_runtime_name(SwiftV1MangledName2)))
+@interface SwiftV1Class2 : TestRoot @end
+@implementation SwiftV1Class2 @end
+
+__attribute__((objc_runtime_name(SwiftV1MangledName3)))
+@interface SwiftV1Class3 : TestRoot @end
+@implementation SwiftV1Class3 @end
+
+__attribute__((objc_runtime_name(SwiftV1MangledName4)))
+@interface SwiftV1Class4 : TestRoot @end
+@implementation SwiftV1Class4 @end
#endif
int foundTestRoot;
int foundSub;
int foundSwiftV1;
+ int foundSwiftV1class2;
+ int foundSwiftV1class3;
+ int foundSwiftV1class4;
const char **names;
Dl_info info;
names = objc_copyClassNamesForImage(info.dli_fname, &count);
testassert(names);
#if TEST_SWIFT
- testassert(count == 3);
+ testassert(count == 6);
#else
testassert(count == 2);
#endif
foundTestRoot = 0;
foundSub = 0;
foundSwiftV1 = 0;
+ foundSwiftV1class2 = 0;
+ foundSwiftV1class3 = 0;
+ foundSwiftV1class4 = 0;
for (i = 0; i < count; i++) {
if (0 == strcmp(names[i], "TestRoot")) foundTestRoot++;
if (0 == strcmp(names[i], "Sub")) foundSub++;
if (0 == strcmp(names[i], "Module.SwiftV1Class")) foundSwiftV1++;
+ if (0 == strcmp(names[i], "Sw.SwiftV1Class2")) foundSwiftV1class2++;
+ if (0 == strcmp(names[i], "Swift.SwiftV1Class3")) foundSwiftV1class3++;
+ if (0 == strcmp(names[i], "Swiftt.SwiftV1Class4")) foundSwiftV1class4++;
}
testassert(foundTestRoot == 1);
testassert(foundSub == 1);
#if TEST_SWIFT
testassert(foundSwiftV1 == 1);
+ testassert(foundSwiftV1class2 == 1);
+ testassert(foundSwiftV1class3 == 1);
+ testassert(foundSwiftV1class4 == 1);
#endif
foundTestRoot = 0;
foundSub = 0;
foundSwiftV1 = 0;
+ foundSwiftV1class2 = 0;
+ foundSwiftV1class3 = 0;
+ foundSwiftV1class4 = 0;
for (i = 0; i < count; i++) {
if (0 == strcmp(class_getName(list[i]), "TestRoot")) foundTestRoot++;
if (0 == strcmp(class_getName(list[i]), "Sub")) foundSub++;
if (0 == strcmp(class_getName(list[i]), "Module.SwiftV1Class")) foundSwiftV1++;
+ if (0 == strcmp(class_getName(list[i]), "Sw.SwiftV1Class2")) foundSwiftV1class2++;
+ if (0 == strcmp(class_getName(list[i]), "Swift.SwiftV1Class3")) foundSwiftV1class3++;
+ if (0 == strcmp(class_getName(list[i]), "Swiftt.SwiftV1Class4")) foundSwiftV1class4++;
// list should be non-meta classes only
testassert(!class_isMetaClass(list[i]));
}
testassert(foundSub == 1);
#if TEST_SWIFT
testassert(foundSwiftV1 == 1);
+ testassert(foundSwiftV1class2 == 1);
+ testassert(foundSwiftV1class3 == 1);
+ testassert(foundSwiftV1class4 == 1);
#endif
// fixme check class handler
#if TEST_SWIFT
testassert(objc_getClass("Module.SwiftV1Class") == [SwiftV1Class class]);
testassert(objc_getClass(SwiftV1MangledName) == [SwiftV1Class class]);
+ testassert(objc_getClass("Sw.SwiftV1Class2") == [SwiftV1Class2 class]);
+ testassert(objc_getClass(SwiftV1MangledName2) == [SwiftV1Class2 class]);
+ testassert(objc_getClass("Swift.SwiftV1Class3") == [SwiftV1Class3 class]);
+ testassert(objc_getClass(SwiftV1MangledName3) == [SwiftV1Class3 class]);
+ testassert(objc_getClass("Swiftt.SwiftV1Class4") == [SwiftV1Class4 class]);
+ testassert(objc_getClass(SwiftV1MangledName4) == [SwiftV1Class4 class]);
#endif
testassert(objc_getClass("SwiftV1Class") == nil);
testassert(objc_getClass("DoesNotExist") == nil);
testassert([taggedNS isKindOfClass: [NSNumber class]]);
testassert([taggedNS respondsToSelector: @selector(intValue)]);
- [taggedNS description];
+ (void)[taggedNS description];
}
int main()
int main()
{
+ if (objc_collecting_enabled()) {
+ // GC's block objects crash without this
+ dlopen("/System/Library/Frameworks/Foundation.framework/Foundation", RTLD_LAZY);
+ }
+
testassert(objc_debug_taggedpointer_mask != 0);
testassert(_objc_taggedPointersEnabled());
-// TEST_CONFIG SDK=iphoneos ARCH=arm64
+// TEST_CONFIG OS=iphoneos ARCH=arm64
#include "test.h"
#define TEST_H
#include <stdio.h>
+#include <dlfcn.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <objc/objc-internal.h>
#include <TargetConditionals.h>
+#if TARGET_OS_EMBEDDED || TARGET_IPHONE_SIMULATOR
+static OBJC_INLINE malloc_zone_t *objc_collectableZone(void) { return nil; }
+#endif
+
+
// Configuration macros
-#if !__LP64__ || TARGET_OS_WIN32 || __OBJC_GC__ || TARGET_IPHONE_SIMULATOR || (TARGET_OS_MAC && !TARGET_OS_IPHONE)
+#if !__LP64__ || TARGET_OS_WIN32 || __OBJC_GC__ || TARGET_IPHONE_SIMULATOR
# define SUPPORT_NONPOINTER_ISA 0
#elif __x86_64__
# define SUPPORT_NONPOINTER_ISA 1
static inline void testprintf(const char *msg, ...)
{
- if (msg && getenv("VERBOSE")) {
+ static int verbose = -1;
+ if (verbose < 0) verbose = atoi(getenv("VERBOSE") ?: "0");
+
+ // VERBOSE=1 prints test harness info only
+ if (msg && verbose >= 2) {
char *msg2;
asprintf(&msg2, "VERBOSE: %s", msg);
va_list v;
}
static inline void testonthread(__unsafe_unretained testblock_t code)
{
+ // GC crashes without Foundation because the block object classes
+ // are insufficiently initialized.
+ if (objc_collectingEnabled()) {
+ static bool foundationified = false;
+ if (!foundationified) {
+ dlopen("/System/Library/Frameworks/Foundation.framework/Foundation", RTLD_LAZY);
+ foundationified = true;
+ }
+ }
+
pthread_t th;
testcodehack = code; // force GC not-thread-local, avoid ARC void* casts
pthread_create(&th, NULL, _testthread, NULL);
static struct stret STRET_RESULT __attribute__((used)) = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+
+#if TARGET_IPHONE_SIMULATOR
+// Force cwd to executable's directory during launch.
+// sim used to do this but simctl does not.
+#include <crt_externs.h>
+ __attribute__((constructor))
+static void hack_cwd(void)
+{
+ if (!getenv("HACKED_CWD")) {
+ chdir(dirname((*_NSGetArgv())[0]));
+ setenv("HACKED_CWD", "1", 1);
+ }
+}
+#endif
+
#endif
# xterm colors
my $red = "\e[41;37m";
-my $yellow = "\e[43;37m";
-my $def = "\e[0m";
+my $yellow = "\e[43;30m";
+my $nocolor = "\e[0m";
# clean, help
if (scalar(@ARGV) == 1) {
options:
ARCH=<arch>
- SDK=<sdk name>
+ OS=<sdk name>[sdk version][-<deployment target>[-<run target>]]
ROOT=/path/to/project.roots/
CC=<compiler name>
LANGUAGE=c,c++,objective-c,objective-c++,swift
MEM=mrc,arc,gc
STDLIB=libc++,libstdc++
- GUARDMALLOC=0|1
+ GUARDMALLOC=0|1|before|after
BUILD=0|1
RUN=0|1
- VERBOSE=0|1
+ VERBOSE=0|1|2
examples:
test buildit-built root, i386 and x86_64, MRC and ARC and GC, clang compiler
$0 ARCH=i386,x86_64 ROOT=/tmp/libclosure.roots MEM=mrc,arc,gc CC=clang
- test buildit-built root with iOS simulator
- $0 ARCH=i386 ROOT=/tmp/libclosure.roots SDK=iphonesimulator
+ test buildit-built root with iOS simulator, deploy to iOS 7, run on iOS 8
+ $0 ARCH=i386 ROOT=/tmp/libclosure.roots OS=iphonesimulator-7.0-8.0
test buildit-built root on attached iOS device
- $0 ARCH=armv7 ROOT=/tmp/libclosure.roots SDK=iphoneos
+ $0 ARCH=armv7 ROOT=/tmp/libclosure.roots OS=iphoneos
END
exit 0;
}
# things you can multiplex on the command line
# ARCH=i386,x86_64,armv6,armv7
-# SDK=macosx,iphoneos,iphonesimulator
+# OS=macosx,iphoneos,iphonesimulator (plus sdk/deployment/run versions)
# LANGUAGE=c,c++,objective-c,objective-c++,swift
# CC=clang,gcc-4.2,llvm-gcc-4.2
# MEM=mrc,arc,gc
# STDLIB=libc++,libstdc++
-# GUARDMALLOC=0,1
+# GUARDMALLOC=0,1,before,after
# things you can set once on the command line
# ROOT=/path/to/project.roots
# BUILD=0|1
# RUN=0|1
-# VERBOSE=0|1
+# VERBOSE=0|1|2
sub chdir_verbose {
my $dir = shift;
- chdir $dir || die;
print "cd $dir\n" if $VERBOSE;
+ chdir $dir || die;
}
open(my $in, "< $file") || die "$file";
my $contents = join "", <$in>;
if (defined $ALL_TESTS{$name}) {
- print "${yellow}SKIP: multiple tests named '$name'; skipping file '$file'.${def}\n";
+ print "${yellow}SKIP: multiple tests named '$name'; skipping file '$file'.${nocolor}\n";
} else {
$ALL_TESTS{$name} = $ext if ($contents =~ m#^[/*\s]*TEST_#m);
}
return @sdks_memo;
}
+my %sdk_path_memo = {};
+sub getsdkpath {
+ my ($sdk) = @_;
+ if (!defined $sdk_path_memo{$sdk}) {
+ ($sdk_path_memo{$sdk}) = (`xcodebuild -version -sdk '$sdk' Path` =~ /^\s*(.+?)\s*$/);
+ }
+ return $sdk_path_memo{$sdk};
+}
+
+# Extract a version number from a string.
+# Ignore trailing "internal".
+sub versionsuffix {
+ my ($str) = @_;
+ my ($vers) = ($str =~ /([0-9]+\.[0-9]+)(?:\.?internal)?$/);
+ return $vers;
+}
+sub majorversionsuffix {
+ my ($str) = @_;
+ my ($vers) = ($str =~ /([0-9]+)\.[0-9]+(?:\.?internal)?$/);
+ return $vers;
+}
+sub minorversionsuffix {
+ my ($str) = @_;
+ my ($vers) = ($str =~ /[0-9]+\.([0-9]+)(?:\.?internal)?$/);
+ return $vers;
+}
+
+# Compares two SDK names and returns the newer one.
+# Assumes the two SDKs are the same OS.
+sub newersdk {
+ my ($lhs, $rhs) = @_;
+
+ # Major version wins.
+ my $lhsMajor = majorversionsuffix($lhs);
+ my $rhsMajor = majorversionsuffix($rhs);
+ if ($lhsMajor > $rhsMajor) { return $lhs; }
+ if ($lhsMajor < $rhsMajor) { return $rhs; }
+
+ # Minor version wins.
+ my $lhsMinor = minorversionsuffix($lhs);
+ my $rhsMinor = minorversionsuffix($rhs);
+ if ($lhsMinor > $rhsMinor) { return $lhs; }
+ if ($lhsMinor < $rhsMinor) { return $rhs; }
+
+ # Lexically-last wins (i.e. internal is better than not internal)
+ if ($lhs gt $rhs) { return $lhs; }
+ return $rhs;
+}
+
# Returns whether the given sdk supports -lauto
sub supportslibauto {
my ($sdk) = @_;
return 1 if $sdk =~ /^macosx/;
- return 0 if $sdk =~ /^iphone/;
- die;
+ return 0;
}
# print text with a colored prefix on each line
while (my @lines = split("\n", shift)) {
for my $line (@lines) {
chomp $line;
- print "$color $def$line\n";
+ print "$color $nocolor$line\n";
}
}
}
return %results;
}
-# Get the name of the system SDK from sw_vers
-sub systemsdkname {
- my @lines = `/usr/bin/sw_vers`;
- my $name;
- my $vers;
- for my $line (@lines) {
- ($name) = ($line =~ /^ProductName:\s+(.*)/) if !$name;
- ($vers) = ($line =~ /^ProductVersion:\s+(.*)/) if !$vers;
- }
-
- $name =~ s/ //g;
- $name = lc($name);
- my $internal = "";
- if (-d "/usr/local/include/objc") {
- if ($name eq "macosx") {
- $internal = "internal";
- } else {
- $internal = ".internal";
- }
- }
- return $name . $vers . $internal;
-}
-
sub check_output {
my %C = %{shift()};
my $name = shift;
my $runerror = $T{TEST_RUN_OUTPUT};
filter_hax(\@output);
filter_verbose(\@output);
+ filter_simulator(\@output);
$warn = filter_warn(\@output);
$bad |= filter_guardmalloc(\@output) if ($C{GUARDMALLOC});
$bad |= filter_valgrind(\@output) if ($C{VALGRIND});
$bad = "(output not 'OK: $name')" if ($bad eq "" && (scalar(@output) != 1 || $output[0] !~ /^OK: $name/));
if ($bad ne "") {
- print "${red}FAIL: /// test '$name' \\\\\\$def\n";
+ print "${red}FAIL: /// test '$name' \\\\\\$nocolor\n";
colorprint($red, @original_output);
- print "${red}FAIL: \\\\\\ test '$name' ///$def\n";
- print "${red}FAIL: $name: $bad$def\n";
+ print "${red}FAIL: \\\\\\ test '$name' ///$nocolor\n";
+ print "${red}FAIL: $name: $bad$nocolor\n";
$xit = 0;
}
elsif ($warn ne "") {
- print "${yellow}PASS: /// test '$name' \\\\\\$def\n";
+ print "${yellow}PASS: /// test '$name' \\\\\\$nocolor\n";
colorprint($yellow, @original_output);
- print "${yellow}PASS: \\\\\\ test '$name' ///$def\n";
+ print "${yellow}PASS: \\\\\\ test '$name' ///$nocolor\n";
print "PASS: $name (with warnings)\n";
}
else {
@$outputref = @new_output;
}
+sub filter_simulator
+{
+ my $outputref = shift;
+
+ my @new_output;
+ for my $line (@$outputref) {
+ if ($line !~ /No simulator devices appear to be running/) {
+ push @new_output, $line;
+ }
+ }
+
+ @$outputref = @new_output;
+}
+
+sub filter_simulator
+{
+ my $outputref = shift;
+
+ my @new_output;
+ for my $line (@$outputref) {
+ if ($line !~ /No simulator devices appear to be running/) {
+ push @new_output, $line;
+ }
+ }
+
+ @$outputref = @new_output;
+}
+
sub filter_hax
{
my $outputref = shift;
if ($line =~ /malloc: enabling scribbling to detect mods to free/ ||
$line =~ /Deleted objects will be dirtied by the collector/ ||
$line =~ /malloc: stack logs being written into/ ||
+ $line =~ /malloc: stack logs deleted from/ ||
+ $line =~ /malloc: process \d+ no longer exists/ ||
$line =~ /malloc: recording malloc and VM allocation stacks/)
{
next;
# search file for 'TEST_CONFIG' or '#include "test.h"'
# also collect other values:
+ # TEST_DISABLED disable test with an optional message
+ # TEST_CRASHES test is expected to crash
# TEST_CONFIG test conditions
# TEST_ENV environment prefix
# TEST_CFLAGS compile flags
my $contents = join "", <$in>;
my $test_h = ($contents =~ /^\s*#\s*(include|import)\s*"test\.h"/m);
- my $disabled = ($contents =~ /\bTEST_DISABLED\b/m);
+ my ($disabled) = ($contents =~ /\b(TEST_DISABLED\b.*)$/m);
my $crashes = ($contents =~ /\bTEST_CRASHES\b/m);
my ($conditionstring) = ($contents =~ /\bTEST_CONFIG\b(.*)$/m);
my ($envstring) = ($contents =~ /\bTEST_ENV\b(.*)$/m);
return 0 if !$test_h && !$disabled && !$crashes && !defined($conditionstring) && !defined($envstring) && !defined($cflags) && !defined($buildcmd) && !defined($builderror) && !defined($runerror);
if ($disabled) {
- print "${yellow}SKIP: $name (disabled by TEST_DISABLED)$def\n";
+ print "${yellow}SKIP: $name (disabled by $disabled)$nocolor\n";
return 0;
}
$ok = 1 if ($testvalue eq $condvalue);
- # special case: SDK allows prefixes
- if ($condkey eq "SDK") {
- $ok = 1 if ($testvalue =~ /^$condvalue/);
- }
-
# special case: CC and CXX allow substring matches
if ($condkey eq "CC" || $condkey eq "CXX") {
$ok = 1 if ($testvalue =~ /$condvalue/);
if ($output =~ /$builderror/) {
$ok = 1;
} else {
- print "${red}FAIL: /// test '$name' \\\\\\$def\n";
+ print "${red}FAIL: /// test '$name' \\\\\\$nocolor\n";
colorprint $red, $output;
- print "${red}FAIL: \\\\\\ test '$name' ///$def\n";
- print "${red}FAIL: $name (build output does not match TEST_BUILD_OUTPUT)$def\n";
+ print "${red}FAIL: \\\\\\ test '$name' ///$nocolor\n";
+ print "${red}FAIL: $name (build output does not match TEST_BUILD_OUTPUT)$nocolor\n";
$ok = 0;
}
} elsif ($?) {
- print "${red}FAIL: /// test '$name' \\\\\\$def\n";
+ print "${red}FAIL: /// test '$name' \\\\\\$nocolor\n";
colorprint $red, $output;
- print "${red}FAIL: \\\\\\ test '$name' ///$def\n";
- print "${red}FAIL: $name (build failed)$def\n";
+ print "${red}FAIL: \\\\\\ test '$name' ///$nocolor\n";
+ print "${red}FAIL: $name (build failed)$nocolor\n";
$ok = 0;
} elsif ($output ne "") {
- print "${red}FAIL: /// test '$name' \\\\\\$def\n";
+ print "${red}FAIL: /// test '$name' \\\\\\$nocolor\n";
colorprint $red, $output;
- print "${red}FAIL: \\\\\\ test '$name' ///$def\n";
- print "${red}FAIL: $name (unexpected build output)$def\n";
+ print "${red}FAIL: \\\\\\ test '$name' ///$nocolor\n";
+ print "${red}FAIL: $name (unexpected build output)$nocolor\n";
$ok = 0;
} else {
$ok = 1;
print "PASS: $name (build only)\n";
return 1;
}
- else {
- chdir_verbose "$C{DIR}/$name.build";
- }
+
+ my $testdir = "$C{DIR}/$name.build";
+ chdir_verbose $testdir;
my $env = "$C{ENV} $T{TEST_ENV}";
- if ($T{TEST_CRASHES}) {
- $env .= " DYLD_INSERT_LIBRARIES=libcrashcatch.dylib";
- }
my $output;
if ($C{ARCH} =~ /^arm/ && `unamep -p` !~ /^arm/) {
- # run on iOS device
+ # run on iOS or watchos device
- my $remotedir = "/var/root/test/" . basename($C{DIR}) . "/$name.build";
- my $remotedyld = "";
- $remotedyld .= " DYLD_LIBRARY_PATH=$remotedir";
- $remotedyld .= ":/var/root/test/" if ($C{TESTLIB} ne $TESTLIBPATH);
+ my $remotedir = "/var/root/objctest/" . basename($C{DIR}) . "/$name.build";
- # elide host-specific paths
- $env =~ s/DYLD_LIBRARY_PATH=\S+//;
- $env =~ s/DYLD_ROOT_PATH=\S+//;
+ # Add test dir and libobjc's dir to DYLD_LIBRARY_PATH.
+ # Insert libcrashcatch.dylib if necessary.
+ $env .= " DYLD_LIBRARY_PATH=$remotedir";
+ $env .= ":/var/root/objctest/" if ($C{TESTLIB} ne $TESTLIBPATH);
+ if ($T{TEST_CRASHES}) {
+ $env .= " DYLD_INSERT_LIBRARIES=$remotedir/libcrashcatch.dylib";
+ }
- my $cmd = "ssh iphone 'cd $remotedir && env $env $remotedyld ./$name.out'";
+ my $cmd = "ssh iphone 'cd $remotedir && env $env ./$name.out'";
$output = make("$cmd");
}
+ elsif ($C{OS} =~ /simulator/) {
+ # run locally in an iOS simulator
+ # fixme appletvsimulator and watchsimulator
+ # fixme SDK
+ my $sim = "xcrun -sdk iphonesimulator simctl spawn 'iPhone 6'";
+
+ # Add test dir and libobjc's dir to DYLD_LIBRARY_PATH.
+ # Insert libcrashcatch.dylib if necessary.
+ $env .= " DYLD_LIBRARY_PATH=$testdir";
+ $env .= ":" . dirname($C{TESTLIB}) if ($C{TESTLIB} ne $TESTLIBPATH);
+ if ($T{TEST_CRASHES}) {
+ $env .= " DYLD_INSERT_LIBRARIES=$testdir/libcrashcatch.dylib";
+ }
+
+ my $simenv = "";
+ foreach my $keyvalue (split(' ', $env)) {
+ $simenv .= "SIMCTL_CHILD_$keyvalue ";
+ }
+ # Use the full path here so hack_cwd in test.h works.
+ $output = make("env $simenv $sim $testdir/$name.out");
+ }
else {
# run locally
- my $cmd = "env $env ./$name.out";
- $output = make("sh -c '$cmd 2>&1' 2>&1");
- # need extra sh level to capture "sh: Illegal instruction" after crash
- # fixme fail if $? except tests that expect to crash
+ # Add test dir and libobjc's dir to DYLD_LIBRARY_PATH.
+ # Insert libcrashcatch.dylib if necessary.
+ $env .= " DYLD_LIBRARY_PATH=$testdir";
+ $env .= ":" . dirname($C{TESTLIB}) if ($C{TESTLIB} ne $TESTLIBPATH);
+ if ($T{TEST_CRASHES}) {
+ $env .= " DYLD_INSERT_LIBRARIES=$testdir/libcrashcatch.dylib";
+ }
+
+ $output = make("sh -c '$env ./$name.out'");
}
return check_output(\%C, $name, split("\n", $output));
my %compiler_memo;
sub find_compiler {
- my ($cc, $sdk, $sdk_path) = @_;
+ my ($cc, $toolchain, $sdk_path) = @_;
# memoize
- my $key = $cc . ':' . $sdk;
+ my $key = $cc . ':' . $toolchain;
my $result = $compiler_memo{$key};
return $result if defined $result;
- $result = `xcrun -sdk $sdk -find $cc 2>/dev/null`;
+ $result = make("xcrun -toolchain $toolchain -find $cc 2>/dev/null");
chomp $result;
$compiler_memo{$key} = $result;
$C{LANGUAGE} = "objective-c" if $C{LANGUAGE} eq "objc";
$C{LANGUAGE} = "objective-c++" if $C{LANGUAGE} eq "objc++";
+ # Interpret OS version string from command line.
+ my ($sdk_arg, $deployment_arg, $run_arg, undef) = split('-', $C{OSVERSION});
+ delete $C{OSVERSION};
+ my ($os_arg) = ($sdk_arg =~ /^([^\.0-9]+)/);
+ $deployment_arg = "default" if !defined($deployment_arg);
+ $run_arg = "default" if !defined($run_arg);
+
+
+ die "unknown OS '$os_arg' (expected iphoneos or iphonesimulator or watchos or watchsimulator or macosx)\n" if ($os_arg ne "iphoneos" && $os_arg ne "iphonesimulator" && $os_arg ne "watchos" && $os_arg ne "watchsimulator" && $os_arg ne "macosx");
+
+ $C{OS} = $os_arg;
+
+ if ($os_arg eq "iphoneos" || $os_arg eq "iphonesimulator") {
+ $C{TOOLCHAIN} = "ios";
+ } elsif ($os_arg eq "watchos" || $os_arg eq "watchsimulator") {
+ $C{TOOLCHAIN} = "watchos";
+ } elsif ($os_arg eq "macosx") {
+ $C{TOOLCHAIN} = "osx";
+ } else {
+ print "${yellow}WARN: don't know toolchain for OS $C{OS}${nocolor}\n";
+ $C{TOOLCHAIN} = "default";
+ }
+
# Look up SDK
# Try exact match first.
- # Then try lexically-last prefix match (so "macosx" => "macosx10.7internal").
+ # Then try lexically-last prefix match (so "macosx" => "macosx10.7internal")
my @sdks = getsdks();
if ($VERBOSE) {
- print "Installed SDKs: @sdks\n";
+ print "note: Installed SDKs: @sdks\n";
}
my $exactsdk = undef;
my $prefixsdk = undef;
foreach my $sdk (@sdks) {
- my $SDK = $C{SDK};
- $exactsdk = $sdk if ($sdk eq $SDK);
- # check for digits to prevent e.g. "iphone" => "iphonesimulator4.2"
- $prefixsdk = $sdk if ($sdk =~ /^$SDK[0-9]/ && $sdk gt $prefixsdk);
+ $exactsdk = $sdk if ($sdk eq $sdk_arg);
+ $prefixsdk = newersdk($sdk, $prefixsdk) if ($sdk =~ /^$sdk_arg/);
}
+
+ my $sdk;
if ($exactsdk) {
- $C{SDK} = $exactsdk;
+ $sdk = $exactsdk;
} elsif ($prefixsdk) {
- $C{SDK} = $prefixsdk;
+ $sdk = $prefixsdk;
} else {
- die "unknown SDK '$C{SDK}'\nInstalled SDKs: @sdks\n";
+ die "unknown SDK '$sdk_arg'\nInstalled SDKs: @sdks\n";
+ }
+
+ # Set deployment target and run target.
+ # fixme can't enforce version when run_arg eq "default"
+ # because we don't know it yet
+ $deployment_arg = versionsuffix($sdk) if $deployment_arg eq "default";
+ if ($run_arg ne "default") {
+ die "Deployment target '$deployment_arg' is newer than run target '$run_arg'\n" if $deployment_arg > $run_arg;
}
+ $C{DEPLOYMENT_TARGET} = $deployment_arg;
+ $C{RUN_TARGET} = $run_arg;
- # set the config name now, after massaging the language and sdk,
+ # set the config name now, after massaging the language and OS versions,
# but before adding other settings
my $configname = config_name(%C);
die if ($configname =~ /'/);
(my $configdir = $configname) =~ s#/##g;
$C{DIR} = "$BUILDDIR/$configdir";
- ($C{SDK_PATH}) = (`xcodebuild -version -sdk $C{SDK} Path` =~ /^\s*(.+?)\s*$/);
+ $C{SDK_PATH} = getsdkpath($sdk);
# Look up test library (possible in root or SDK_PATH)
$C{CXX} = $cxx;
$C{SWIFT} = $swift
} else {
- $C{CC} = find_compiler($cc, $C{SDK}, $C{SDK_PATH});
- $C{CXX} = find_compiler($cxx, $C{SDK}, $C{SDK_PATH});
- $C{SWIFT} = find_compiler($swift, $C{SDK}, $C{SDK_PATH});
+ $C{CC} = find_compiler($cc, $C{TOOLCHAIN}, $C{SDK_PATH});
+ $C{CXX} = find_compiler($cxx, $C{TOOLCHAIN}, $C{SDK_PATH});
+ $C{SWIFT} = find_compiler($swift, $C{TOOLCHAIN}, $C{SDK_PATH});
- die "No compiler '$cc' ('$C{CC}') in SDK '$C{SDK}'\n" if !-e $C{CC};
- die "No compiler '$cxx' ('$C{CXX}') in SDK '$C{SDK}'\n" if !-e $C{CXX};
- die "No compiler '$swift' ('$C{SWIFT}') in SDK '$C{SDK}'\n" if !-e $C{SWIFT};
+ die "No compiler '$cc' ('$C{CC}') in toolchain '$C{TOOLCHAIN}'\n" if !-e $C{CC};
+ die "No compiler '$cxx' ('$C{CXX}') in toolchain '$C{TOOLCHAIN}'\n" if !-e $C{CXX};
+ die "No compiler '$swift' ('$C{SWIFT}') in toolchain '$C{TOOLCHAIN}'\n" if !-e $C{SWIFT};
}
# Populate cflags
$cflags .= " '-Wl,-syslibroot,$C{SDK_PATH}'";
$swiftflags .= " -sdk '$C{SDK_PATH}'";
- my $target = "";
- if ($C{SDK} =~ /^iphoneos[0-9]/ && $cflags !~ /-mios-version-min/) {
- my ($vers) = ($C{SDK} =~ /^iphoneos([0-9]+\.[0-9]+)/);
- $cflags .= " -mios-version-min=$vers";
- $target = "$C{ARCH}-apple-ios$vers";
+ # Set deployment target cflags
+ my $target = undef;
+ die "No deployment target" if $C{DEPLOYMENT_TARGET} eq "";
+ if ($C{OS} eq "iphoneos") {
+ $cflags .= " -mios-version-min=$C{DEPLOYMENT_TARGET}";
+ $target = "$C{ARCH}-apple-ios$C{DEPLOYMENT_TARGET}";
+ }
+ elsif ($C{OS} eq "iphonesimulator") {
+ $cflags .= " -mios-simulator-version-min=$C{DEPLOYMENT_TARGET}";
+ $target = "$C{ARCH}-apple-ios$C{DEPLOYMENT_TARGET}";
+ }
+ elsif ($C{OS} eq "watchos") {
+ $cflags .= " -mwatchos-version-min=$C{DEPLOYMENT_TARGET}";
+ $target = "$C{ARCH}-apple-watchos$C{DEPLOYMENT_TARGET}";
}
- elsif ($C{SDK} =~ /^iphonesimulator[0-9]/ && $cflags !~ /-mios-simulator-version-min/) {
- my ($vers) = ($C{SDK} =~ /^iphonesimulator([0-9]+\.[0-9]+)/);
- $cflags .= " -mios-simulator-version-min=$vers";
- $target = "$C{ARCH}-apple-ios$vers";
+ elsif ($C{OS} eq "watchsimulator") {
+ $cflags .= " -mwatch-simulator-version-min=$C{DEPLOYMENT_TARGET}";
+ $target = "$C{ARCH}-apple-watchos$C{DEPLOYMENT_TARGET}";
}
else {
- my ($vers) = ($C{SDK} =~ /^macosx([0-9]+\.[0-9]+)/);
- $vers = "" if !defined($vers);
- $target = "$C{ARCH}-apple-macosx$vers";
+ $cflags .= " -mmacosx-version-min=$C{DEPLOYMENT_TARGET}";
+ $target = "$C{ARCH}-apple-macosx$C{DEPLOYMENT_TARGET}";
}
$swiftflags .= " -target $target";
- if ($C{SDK} =~ /^iphonesimulator/ && $C{ARCH} eq "i386") {
+ # fixme still necessary?
+ if ($C{OS} eq "iphonesimulator" && $C{ARCH} eq "i386") {
$objcflags .= " -fobjc-abi-version=2 -fobjc-legacy-dispatch";
}
die "unrecognized MEM '$C{MEM}'\n";
}
- if (supportslibauto($C{SDK})) {
+ if (supportslibauto($C{OS})) {
# do this even for non-GC tests
$objcflags .= " -lauto";
}
# Populate ENV_PREFIX
$C{ENV} = "LANG=C MallocScribble=1";
- $C{ENV} .= " VERBOSE=1" if $VERBOSE;
+ $C{ENV} .= " VERBOSE=$VERBOSE" if $VERBOSE;
if ($root ne "") {
- my $library_path = dirname($C{TESTLIB});
- die "no spaces allowed in root" if $library_path =~ /\s+/;
- $C{ENV} .= " DYLD_LIBRARY_PATH=$library_path" if ($library_path ne "/usr/lib");
- }
- if ($C{SDK_PATH} ne "/") {
- die "no spaces allowed in sdk" if $C{SDK_PATH} =~ /\s+/;
- $C{ENV} .= " DYLD_ROOT_PATH=$C{SDK_PATH}";
+ die "no spaces allowed in root" if dirname($C{TESTLIB}) =~ /\s+/;
}
if ($C{GUARDMALLOC}) {
$ENV{GUARDMALLOC} = "1"; # checked by tests and errcheck.pl
$C{ENV} .= " DYLD_INSERT_LIBRARIES=/usr/lib/libgmalloc.dylib";
- }
- if ($C{SDK} =~ /^iphonesimulator[0-9]/) {
- my ($vers) = ($C{SDK} =~ /^iphonesimulator([0-9]+\.[0-9+])/);
- $C{ENV} .=
- " CFFIXED_USER_HOME=$ENV{HOME}/Library/Application\\ Support/iPhone\\ Simulator/$vers" .
- " IPHONE_SIMULATOR_ROOT=$C{SDK_PATH}" .
- " IPHONE_SHARED_RESOURCES_DIRECTORY=$ENV{HOME}/Library/Application\\ Support/iPhone\\ Simulator/$vers";
+ if ($C{GUARDMALLOC} eq "before") {
+ $C{ENV} .= " MALLOC_PROTECT_BEFORE=1";
+ } elsif ($C{GUARDMALLOC} eq "after") {
+ # protect after is the default
+ } else {
+ die "Unknown guard malloc mode '$C{GUARDMALLOC}'\n";
+ }
}
# Populate compiler commands
die "unknown MEM=$C{MEM} (expected one of mrc arc gc)\n";
}
- if ($C{MEM} eq "gc" && $C{SDK} =~ /^iphone/) {
+ if ($C{MEM} eq "gc" && $C{OS} !~ /^macosx/) {
+ print "note: skipping configuration $C{NAME}\n";
+ print "note: because OS=$C{OS} does not support MEM=$C{MEM}\n";
+ return 0;
+ }
+ if ($C{MEM} eq "gc" && $C{ARCH} eq "x86_64h") {
print "note: skipping configuration $C{NAME}\n";
- print "note: because SDK=$C{SDK} does not support MEM=$C{MEM}\n";
+ print "note: because ARCH=$C{ARCH} does not support MEM=$C{MEM}\n";
return 0;
}
- if ($C{MEM} eq "arc" && $C{SDK} !~ /^iphone/ && $C{ARCH} eq "i386") {
+ if ($C{MEM} eq "arc" && $C{OS} =~ /^macosx/ && $C{ARCH} eq "i386") {
print "note: skipping configuration $C{NAME}\n";
print "note: because 32-bit Mac does not support MEM=$C{MEM}\n";
return 0;
# fixme
if ($C{LANGUAGE} eq "swift" && $C{ARCH} =~ /^arm/) {
print "note: skipping configuration $C{NAME}\n";
- print "note: because ARCH=$C{ARCH} does not support LANGAUGE=SWIFT\n";
+ print "note: because ARCH=$C{ARCH} does not support LANGUAGE=SWIFT\n";
return 0;
}
+ # fixme unimplemented run targets
+ if ($C{RUN_TARGET} ne "default" && $C{OS} !~ /simulator/) {
+ print "${yellow}WARN: skipping configuration $C{NAME}${nocolor}\n";
+ print "${yellow}WARN: because OS=$C{OS} does not yet implement RUN_TARGET=$C{RUN_TARGET}${nocolor}\n";
+ }
+
%$configref = %C;
}
else {
if ($C{ARCH} =~ /^arm/ && `unamep -p` !~ /^arm/) {
# upload all tests to iOS device
- make("RSYNC_PASSWORD=alpine rsync -av $C{DIR} rsync://root\@localhost:10873/root/var/root/test/");
+ make("RSYNC_PASSWORD=alpine rsync -av $C{DIR} rsync://root\@localhost:10873/root/var/root/objctest/");
die "Couldn't rsync tests to device\n" if ($?);
# upload library to iOS device
if ($C{TESTLIB} ne $TESTLIBPATH) {
- make("RSYNC_PASSWORD=alpine rsync -av $C{TESTLIB} rsync://root\@localhost:10873/root/var/root/test/");
+ make("RSYNC_PASSWORD=alpine rsync -av $C{TESTLIB} rsync://root\@localhost:10873/root/var/root/objctest/");
die "Couldn't rsync $C{TESTLIB} to device\n" if ($?);
- make("RSYNC_PASSWORD=alpine rsync -av $C{TESTDSYM} rsync://root\@localhost:10873/root/var/root/test/");
+ make("RSYNC_PASSWORD=alpine rsync -av $C{TESTDSYM} rsync://root\@localhost:10873/root/var/root/objctest/");
}
}
return [( map { ($_ eq "0") ? 0 : 1 } @values )];
}
+# Return an integer if set by "$argname=value" on the
+# command line. Return $default if not set.
+sub getints {
+ my ($argname, $default) = @_;
+
+ my @values = @{getargs($argname, $default)};
+ return [( map { int($_) } @values )];
+}
+
sub getarg {
my ($argname, $default) = @_;
my @values = @{getargs($argname, $default)};
return $values[0];
}
+sub getint {
+ my ($argname, $default) = @_;
+ my @values = @{getints($argname, $default)};
+ die "Only one value allowed for $argname\n" if @values > 1;
+ return $values[0];
+}
+
# main
my %args;
$args{ARCH} = getargs("ARCH", 0);
$args{ARCH} = getargs("ARCHS", $default_arch) if !@{$args{ARCH}}[0];
-$args{SDK} = getargs("SDK", "macosx");
+$args{OSVERSION} = getargs("OS", "macosx-default-default");
$args{MEM} = getargs("MEM", "mrc");
$args{LANGUAGE} = [ map { lc($_) } @{getargs("LANGUAGE", "objective-c,swift")} ];
-$args{STDLIB} = getargs("STDLIB", "libstdc++");
+$args{STDLIB} = getargs("STDLIB", "libc++");
$args{CC} = getargs("CC", "clang");
-$args{GUARDMALLOC} = getbools("GUARDMALLOC", 0);
+{
+ my $guardmalloc = getargs("GUARDMALLOC", 0);
+ # GUARDMALLOC=1 is the same as GUARDMALLOC=before,after
+ my @guardmalloc2 = ();
+ for my $arg (@$guardmalloc) {
+ if ($arg == 1) { push @guardmalloc2, "before";
+ push @guardmalloc2, "after"; }
+ else { push @guardmalloc2, $arg }
+ }
+ $args{GUARDMALLOC} = \@guardmalloc2;
+}
$BUILD = getbool("BUILD", 1);
$RUN = getbool("RUN", 1);
-$VERBOSE = getbool("VERBOSE", 0);
+$VERBOSE = getint("VERBOSE", 0);
my $root = getarg("ROOT", "");
$root =~ s#/*$##;
(my $t, my $f) = eval { run_one_config($configref, @tests); };
if ($@) {
chomp $@;
- print "${red}FAIL: $configname${def}\n";
- print "${red}FAIL: $@${def}\n";
+ print "${red}FAIL: $configname${nocolor}\n";
+ print "${red}FAIL: $@${nocolor}\n";
$failconfigs++;
} else {
my $color = ($f ? $red : "");
print "note:\n";
- print "${color}note: $configname$def\n";
- print "${color}note: $t tests, $f failures$def\n";
+ print "${color}note: $configname$nocolor\n";
+ print "${color}note: $t tests, $f failures$nocolor\n";
$testcount += $t;
$failcount += $f;
$failconfigs++ if ($f);
print "note: -----\n";
my $color = ($failconfigs ? $red : "");
-print "${color}note: $testconfigs configurations, $failconfigs with failures$def\n";
-print "${color}note: $testcount tests, $failcount failures$def\n";
+print "${color}note: $testconfigs configurations, $failconfigs with failures$nocolor\n";
+print "${color}note: $testcount tests, $failcount failures$nocolor\n";
$failed = ($failconfigs ? 1 : 0);
+// xpc leaks memory in dlopen(). Disable it.
+// TEST_ENV XPC_SERVICES_UNAVAILABLE=1
/*
TEST_BUILD
$C{COMPILE} $DIR/unload4.m -o unload4.dylib -dynamiclib
testassert(o2);
// give BigClass and BigClass->isa large method caches (4692641)
- for (i = 0; i < 10000; i++) {
+ // Flush caches part way through to test large empty caches.
+ for (i = 0; i < 3000; i++) {
+ sprintf(buf, "method_%d", i);
+ SEL sel = sel_registerName(buf);
+ ((void(*)(id, SEL))objc_msgSend)(o2, sel);
+ ((void(*)(id, SEL))objc_msgSend)(object_getClass(o2), sel);
+ }
+ _objc_flush_caches(object_getClass(o2));
+ for (i = 0; i < 17000; i++) {
sprintf(buf, "method_%d", i);
SEL sel = sel_registerName(buf);
((void(*)(id, SEL))objc_msgSend)(o2, sel);
// these selectors came from the bundle
testassert(0 == strcmp("unload2_instance_method", sel_getName(sel_registerName("unload2_instance_method"))));
testassert(0 == strcmp("unload2_category_method", sel_getName(sel_registerName("unload2_category_method"))));
+
+ // This protocol came from the bundle.
+ // It isn't unloaded cleanly (rdar://20664713), but neither
+ // may it cause the protocol table to crash after unloading.
+ testassert(objc_getProtocol("SmallProtocol"));
}
+
int main()
{
// fixme object_dispose() not aggressive enough?
@implementation UnusedClass @end
-@implementation SmallClass (Category)
+@protocol SmallProtocol
+-(void)unload2_category_method;
+@end
+
+@interface SmallClass (Category) <SmallProtocol> @end
+
+@implementation SmallClass (Category)
-(void)unload2_category_method { }
@end
// TEST_CONFIG
#include "test.h"
+
+#if __OBJC_GC__ && __cplusplus && __i386__
+
+int main()
+{
+ testwarn("rdar://19042235 test disabled for 32-bit objc++ GC because of unknown bit rot");
+ succeed(__FILE__);
+}
+
+#else
+
#include "testroot.i"
#include <stdint.h>
#include <string.h>
succeed(__FILE__);
return 0;
}
+
+#endif