"OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-lc++abi";
"OTHER_LDFLAGS[sdk=macosx*]" = (
"-lCrashReporterClient",
- "-lauto",
"-lc++abi",
"-Xlinker",
"-sectalign",
"OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-lc++abi";
"OTHER_LDFLAGS[sdk=macosx*]" = (
"-lCrashReporterClient",
- "-lauto",
"-lc++abi",
"-Xlinker",
"-sectalign",
.quad 0
.quad 0
-
- // Workaround for Skype evil (rdar://19715989)
-
- .text
- .align 4
- .private_extern _map_images
- .private_extern _map_2_images
- .private_extern _hax
-_hax:
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-_map_images:
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- jmp _map_2_images
-
#endif
@property (readonly) NSUInteger hash;
@property (readonly) Class superclass;
-- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'anObject.dynamicType' instead");
+- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'type(of: anObject)' instead");
- (instancetype)self;
- (id)performSelector:(SEL)aSelector;
// don't want the table to act as a root for `leaks`.
typedef objc::DenseMap<DisguisedPtr<objc_object>,size_t,true> RefcountMap;
+// Template parameters.
+enum HaveOld { DontHaveOld = false, DoHaveOld = true };
+enum HaveNew { DontHaveNew = false, DoHaveNew = true };
+
struct SideTable {
spinlock_t slock;
RefcountMap refcnts;
void lock() { slock.lock(); }
void unlock() { slock.unlock(); }
+ void forceReset() { slock.forceReset(); }
// Address-ordered lock discipline for a pair of side tables.
- template<bool HaveOld, bool HaveNew>
+ template<HaveOld, HaveNew>
static void lockTwo(SideTable *lock1, SideTable *lock2);
- template<bool HaveOld, bool HaveNew>
+ template<HaveOld, HaveNew>
static void unlockTwo(SideTable *lock1, SideTable *lock2);
};
template<>
-void SideTable::lockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
+void SideTable::lockTwo<DoHaveOld, DoHaveNew>
+ (SideTable *lock1, SideTable *lock2)
+{
spinlock_t::lockTwo(&lock1->slock, &lock2->slock);
}
template<>
-void SideTable::lockTwo<true, false>(SideTable *lock1, SideTable *) {
+void SideTable::lockTwo<DoHaveOld, DontHaveNew>
+ (SideTable *lock1, SideTable *)
+{
lock1->lock();
}
template<>
-void SideTable::lockTwo<false, true>(SideTable *, SideTable *lock2) {
+void SideTable::lockTwo<DontHaveOld, DoHaveNew>
+ (SideTable *, SideTable *lock2)
+{
lock2->lock();
}
template<>
-void SideTable::unlockTwo<true, true>(SideTable *lock1, SideTable *lock2) {
+void SideTable::unlockTwo<DoHaveOld, DoHaveNew>
+ (SideTable *lock1, SideTable *lock2)
+{
spinlock_t::unlockTwo(&lock1->slock, &lock2->slock);
}
template<>
-void SideTable::unlockTwo<true, false>(SideTable *lock1, SideTable *) {
+void SideTable::unlockTwo<DoHaveOld, DontHaveNew>
+ (SideTable *lock1, SideTable *)
+{
lock1->unlock();
}
template<>
-void SideTable::unlockTwo<false, true>(SideTable *, SideTable *lock2) {
+void SideTable::unlockTwo<DontHaveOld, DoHaveNew>
+ (SideTable *, SideTable *lock2)
+{
lock2->unlock();
}
-
// We cannot use a C++ static initializer to initialize SideTables because
// anonymous namespace
};
+void SideTableLockAll() {
+ SideTables().lockAll();
+}
+
+void SideTableUnlockAll() {
+ SideTables().unlockAll();
+}
+
+void SideTableForceResetAll() {
+ SideTables().forceResetAll();
+}
+
+void SideTableDefineLockOrder() {
+ SideTables().defineLockOrder();
+}
+
+void SideTableLocksPrecedeLock(const void *newlock) {
+ SideTables().precedeLock(newlock);
+}
+
+void SideTableLocksSucceedLock(const void *oldlock) {
+ SideTables().succeedLock(oldlock);
+}
//
// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block}
// If CrashIfDeallocating is true, the process is halted if newObj is
// deallocating or newObj's class does not support weak references.
// If CrashIfDeallocating is false, nil is stored instead.
-template <bool HaveOld, bool HaveNew, bool CrashIfDeallocating>
+enum CrashIfDeallocating {
+ DontCrashIfDeallocating = false, DoCrashIfDeallocating = true
+};
+template <HaveOld haveOld, HaveNew haveNew,
+ CrashIfDeallocating crashIfDeallocating>
static id
storeWeak(id *location, objc_object *newObj)
{
- assert(HaveOld || HaveNew);
- if (!HaveNew) assert(newObj == nil);
+ assert(haveOld || haveNew);
+ if (!haveNew) assert(newObj == nil);
Class previouslyInitializedClass = nil;
id oldObj;
// Order by lock address to prevent lock ordering problems.
// Retry if the old value changes underneath us.
retry:
- if (HaveOld) {
+ if (haveOld) {
oldObj = *location;
oldTable = &SideTables()[oldObj];
} else {
oldTable = nil;
}
- if (HaveNew) {
+ if (haveNew) {
newTable = &SideTables()[newObj];
} else {
newTable = nil;
}
- SideTable::lockTwo<HaveOld, HaveNew>(oldTable, newTable);
+ SideTable::lockTwo<haveOld, haveNew>(oldTable, newTable);
- if (HaveOld && *location != oldObj) {
- SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
+ if (haveOld && *location != oldObj) {
+ SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
goto retry;
}
// Prevent a deadlock between the weak reference machinery
// and the +initialize machinery by ensuring that no
// weakly-referenced object has an un-+initialized isa.
- if (HaveNew && newObj) {
+ if (haveNew && newObj) {
Class cls = newObj->getIsa();
if (cls != previouslyInitializedClass &&
!((objc_class *)cls)->isInitialized())
{
- SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
+ SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
_class_initialize(_class_getNonMetaClass(cls, (id)newObj));
// If this class is finished with +initialize then we're good.
}
// Clean up old value, if any.
- if (HaveOld) {
+ if (haveOld) {
weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
}
// Assign new value, if any.
- if (HaveNew) {
- newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table,
- (id)newObj, location,
- CrashIfDeallocating);
+ if (haveNew) {
+ newObj = (objc_object *)
+ weak_register_no_lock(&newTable->weak_table, (id)newObj, location,
+ crashIfDeallocating);
// weak_register_no_lock returns nil if weak store should be rejected
// Set is-weakly-referenced bit in refcount table.
// No new value. The storage is not changed.
}
- SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
+ SideTable::unlockTwo<haveOld, haveNew>(oldTable, newTable);
return (id)newObj;
}
id
objc_storeWeak(id *location, id newObj)
{
- return storeWeak<true/*old*/, true/*new*/, true/*crash*/>
+ return storeWeak<DoHaveOld, DoHaveNew, DoCrashIfDeallocating>
(location, (objc_object *)newObj);
}
id
objc_storeWeakOrNil(id *location, id newObj)
{
- return storeWeak<true/*old*/, true/*new*/, false/*crash*/>
+ return storeWeak<DoHaveOld, DoHaveNew, DontCrashIfDeallocating>
(location, (objc_object *)newObj);
}
return nil;
}
- return storeWeak<false/*old*/, true/*new*/, true/*crash*/>
+ return storeWeak<DontHaveOld, DoHaveNew, DoCrashIfDeallocating>
(location, (objc_object*)newObj);
}
return nil;
}
- return storeWeak<false/*old*/, true/*new*/, false/*crash*/>
+ return storeWeak<DontHaveOld, DoHaveNew, DontCrashIfDeallocating>
(location, (objc_object*)newObj);
}
void
objc_destroyWeak(id *location)
{
- (void)storeWeak<true/*old*/, false/*new*/, false/*crash*/>
+ (void)storeWeak<DoHaveOld, DontHaveNew, DontCrashIfDeallocating>
(location, nil);
}
static char *z = NULL;
static size_t zSize = 0;
-static mutex_t uniquerLock;
+mutex_t NXUniqueStringLock;
static const char *CopyIntoReadOnly (const char *str) {
size_t len = strlen (str) + 1;
return result;
}
- mutex_locker_t lock(uniquerLock);
+ mutex_locker_t lock(NXUniqueStringLock);
if (zSize < len) {
zSize = CHUNK_SIZE *((len + CHUNK_SIZE - 1) / CHUNK_SIZE);
/* not enough room, we try to allocate. If no room left, too bad */
- (id)mutableCopyWithZone:(void *)zone;
@end
-// These locks must not be at function scope.
-static StripedMap<spinlock_t> PropertyLocks;
-static StripedMap<spinlock_t> StructLocks;
-static StripedMap<spinlock_t> CppObjectLocks;
+StripedMap<spinlock_t> PropertyLocks;
+StripedMap<spinlock_t> StructLocks;
+StripedMap<spinlock_t> CppObjectLocks;
#define MUTABLE_COPY 2
}
-static spinlock_t impLock;
+spinlock_t impLock;
IMP method_setImplementation(Method m_gen, IMP imp)
{
/***********************************************************************
* instrumentObjcMessageSends
**********************************************************************/
+// Define this everywhere even if it isn't used to simplify fork() safety code.
+spinlock_t objcMsgLogLock;
+
#if !SUPPORT_MESSAGE_LOGGING
void instrumentObjcMessageSends(BOOL flag)
bool objcMsgLogEnabled = false;
static int objcMsgLogFD = -1;
-static spinlock_t objcMsgLogLock;
bool logMessageSend(bool isClassMethod,
const char *objectsClass,
}
// Add "message" to any forthcoming crash log.
-static mutex_t crashlog_lock;
+mutex_t crashlog_lock;
static void _objc_crashlog(const char *message)
{
char *newmsg;
struct alt_handler_list *next_DEBUGONLY;
};
-static mutex_t DebugLock;
static struct alt_handler_list *DebugLists;
static uintptr_t DebugCounter;
if (DebugAltHandlers) {
// Save this list so the debug code can find it from other threads
- mutex_locker_t lock(DebugLock);
+ mutex_locker_t lock(AltHandlerDebugLock);
list->next_DEBUGONLY = DebugLists;
DebugLists = list;
}
if (list) {
if (DebugAltHandlers) {
// Detach from the list-of-lists.
- mutex_locker_t lock(DebugLock);
+ mutex_locker_t lock(AltHandlerDebugLock);
struct alt_handler_list **listp = &DebugLists;
while (*listp && *listp != list) listp = &(*listp)->next_DEBUGONLY;
if (*listp) *listp = (*listp)->next_DEBUGONLY;
if (DebugAltHandlers) {
// Record backtrace in case this handler is misused later.
- mutex_locker_t lock(DebugLock);
+ mutex_locker_t lock(AltHandlerDebugLock);
token = DebugCounter++;
if (token == 0) token = DebugCounter++;
"or break in objc_alt_handler_error() to debug.");
if (DebugAltHandlers) {
- DebugLock.lock();
+ AltHandlerDebugLock.lock();
// Search other threads' alt handler lists for this handler.
struct alt_handler_list *list;
}
}
done:
- DebugLock.unlock();
+ AltHandlerDebugLock.unlock();
}
// __OBJC2__
#endif
+
+// Define this everywhere even if it isn't used, to simplify fork() safety code
+mutex_t AltHandlerDebugLock;
/* classInitLock protects CLS_INITIALIZED and CLS_INITIALIZING, and
* is signalled when any class is done initializing.
* Threads that are waiting for a class to finish initializing wait on this. */
-static monitor_t classInitLock;
+monitor_t classInitLock;
/***********************************************************************
OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0);
#endif
+// fork() safety called by libSystem
+OBJC_EXPORT void _objc_atfork_prepare(void)
+ OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0);
+OBJC_EXPORT void _objc_atfork_parent(void)
+ OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0);
+OBJC_EXPORT void _objc_atfork_child(void)
+ OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0);
+
// Return YES if GC is on and `object` is a GC allocation.
OBJC_EXPORT BOOL objc_isAuto(id object)
__OSX_DEPRECATED(10.4, 10.8, "it always returns NO")
* @APPLE_LICENSE_HEADER_END@
*/
+#if DEBUG
+extern void lockdebug_assert_all_locks_locked();
+extern void lockdebug_assert_no_locks_locked();
+extern void lockdebug_setInForkPrepare(bool);
+extern void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock);
+#else
+static inline void lockdebug_assert_all_locks_locked() { }
+static inline void lockdebug_assert_no_locks_locked() { }
+static inline void lockdebug_setInForkPrepare(bool) { }
+static inline void lockdebug_lock_precedes_lock(const void *, const void *) { }
+#endif
+
+extern void lockdebug_remember_mutex(mutex_tt<true> *lock);
extern void lockdebug_mutex_lock(mutex_tt<true> *lock);
extern void lockdebug_mutex_try_lock(mutex_tt<true> *lock);
extern void lockdebug_mutex_unlock(mutex_tt<true> *lock);
extern void lockdebug_mutex_assert_locked(mutex_tt<true> *lock);
extern void lockdebug_mutex_assert_unlocked(mutex_tt<true> *lock);
+static inline void lockdebug_remember_mutex(mutex_tt<false> *lock) { }
static inline void lockdebug_mutex_lock(mutex_tt<false> *lock) { }
static inline void lockdebug_mutex_try_lock(mutex_tt<false> *lock) { }
static inline void lockdebug_mutex_unlock(mutex_tt<false> *lock) { }
static inline void lockdebug_mutex_assert_unlocked(mutex_tt<false> *lock) { }
+extern void lockdebug_remember_monitor(monitor_tt<true> *lock);
extern void lockdebug_monitor_enter(monitor_tt<true> *lock);
extern void lockdebug_monitor_leave(monitor_tt<true> *lock);
extern void lockdebug_monitor_wait(monitor_tt<true> *lock);
extern void lockdebug_monitor_assert_locked(monitor_tt<true> *lock);
extern void lockdebug_monitor_assert_unlocked(monitor_tt<true> *lock);
+static inline void lockdebug_remember_monitor(monitor_tt<false> *lock) { }
static inline void lockdebug_monitor_enter(monitor_tt<false> *lock) { }
static inline void lockdebug_monitor_leave(monitor_tt<false> *lock) { }
static inline void lockdebug_monitor_wait(monitor_tt<false> *lock) { }
static inline void lockdebug_monitor_assert_unlocked(monitor_tt<false> *lock) {}
+extern void
+lockdebug_remember_recursive_mutex(recursive_mutex_tt<true> *lock);
extern void
lockdebug_recursive_mutex_lock(recursive_mutex_tt<true> *lock);
extern void
extern void
lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock);
+static inline void
+lockdebug_remember_recursive_mutex(recursive_mutex_tt<false> *lock) { }
static inline void
lockdebug_recursive_mutex_lock(recursive_mutex_tt<false> *lock) { }
static inline void
lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<false> *lock) { }
+extern void lockdebug_remember_rwlock(rwlock_tt<true> *lock);
extern void lockdebug_rwlock_read(rwlock_tt<true> *lock);
extern void lockdebug_rwlock_try_read_success(rwlock_tt<true> *lock);
extern void lockdebug_rwlock_unlock_read(rwlock_tt<true> *lock);
extern void lockdebug_rwlock_assert_locked(rwlock_tt<true> *lock);
extern void lockdebug_rwlock_assert_unlocked(rwlock_tt<true> *lock);
+static inline void lockdebug_remember_rwlock(rwlock_tt<false> *) { }
static inline void lockdebug_rwlock_read(rwlock_tt<false> *) { }
static inline void lockdebug_rwlock_try_read_success(rwlock_tt<false> *) { }
static inline void lockdebug_rwlock_unlock_read(rwlock_tt<false> *) { }
#if DEBUG && !TARGET_OS_WIN32
+#include <unordered_map>
+
+
+/***********************************************************************
+* Thread-local bool set during _objc_atfork_prepare().
+* That function is allowed to break some lock ordering rules.
+**********************************************************************/
+
+static tls_key_t fork_prepare_tls;
+
+void
+lockdebug_setInForkPrepare(bool inForkPrepare)
+{
+ INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
+ tls_set(fork_prepare_tls, (void*)inForkPrepare);
+}
+
+static bool
+inForkPrepare()
+{
+ INIT_ONCE_PTR(fork_prepare_tls, tls_create(nil), (void)0);
+ return (bool)tls_get(fork_prepare_tls);
+}
+
+
+
+/***********************************************************************
+* Lock order graph.
+* "lock X precedes lock Y" means that X must be acquired first.
+* This property is transitive.
+**********************************************************************/
+
+struct lockorder {
+ const void *l;
+ std::vector<const lockorder *> predecessors;
+};
+
+static std::unordered_map<const void*, lockorder> lockOrderList;
+
+static bool
+lockPrecedesLock(const lockorder& oldlock, const lockorder& newlock)
+{
+ for (const auto *pre : newlock.predecessors) {
+ if (&oldlock == pre) return true;
+ if (lockPrecedesLock(oldlock, *pre)) return true;
+ }
+ return false;
+}
+
+static bool
+lockPrecedesLock(const void *oldlock, const void *newlock)
+{
+ auto oldorder = lockOrderList.find(oldlock);
+ auto neworder = lockOrderList.find(newlock);
+ if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
+ return false;
+ }
+ return lockPrecedesLock(oldorder->second, neworder->second);
+}
+
+static bool
+lockUnorderedWithLock(const void *oldlock, const void *newlock)
+{
+ auto oldorder = lockOrderList.find(oldlock);
+ auto neworder = lockOrderList.find(newlock);
+ if (neworder == lockOrderList.end() || oldorder == lockOrderList.end()) {
+ return true;
+ }
+
+ if (lockPrecedesLock(oldorder->second, neworder->second) ||
+ lockPrecedesLock(neworder->second, oldorder->second))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+void lockdebug_lock_precedes_lock(const void *oldlock, const void *newlock)
+{
+ if (lockPrecedesLock(newlock, oldlock)) {
+ _objc_fatal("contradiction in lock order declaration");
+ }
+
+ auto oldorder = lockOrderList.find(oldlock);
+ auto neworder = lockOrderList.find(newlock);
+ if (oldorder == lockOrderList.end()) {
+ lockOrderList[oldlock] = lockorder{oldlock, {}};
+ oldorder = lockOrderList.find(oldlock);
+ }
+ if (neworder == lockOrderList.end()) {
+ lockOrderList[newlock] = lockorder{newlock, {}};
+ neworder = lockOrderList.find(newlock);
+ }
+
+ neworder->second.predecessors.push_back(&oldorder->second);
+}
+
+
/***********************************************************************
* Recording - per-thread list of mutexes and monitors held
**********************************************************************/
-typedef struct {
- void *l; // the lock itself
- int k; // the kind of lock it is (MUTEX, MONITOR, etc)
- int i; // the lock's nest count
-} lockcount;
+enum class lockkind {
+ MUTEX = 1, MONITOR = 2, RDLOCK = 3, WRLOCK = 4, RECURSIVE = 5
+};
+
+#define MUTEX lockkind::MUTEX
+#define MONITOR lockkind::MONITOR
+#define RDLOCK lockkind::RDLOCK
+#define WRLOCK lockkind::WRLOCK
+#define RECURSIVE lockkind::RECURSIVE
+
+struct lockcount {
+ lockkind k; // the kind of lock it is (MUTEX, MONITOR, etc)
+ int i; // the lock's nest count
+};
-#define MUTEX 1
-#define MONITOR 2
-#define RDLOCK 3
-#define WRLOCK 4
-#define RECURSIVE 5
+using objc_lock_list = std::unordered_map<const void *, lockcount>;
-typedef struct _objc_lock_list {
- int allocated;
- int used;
- lockcount list[0];
-} _objc_lock_list;
+// Thread-local list of locks owned by a thread.
+// Used by lock ownership checks.
static tls_key_t lock_tls;
+// Global list of all locks.
+// Used by fork() safety check.
+// This can't be a static struct because of C++ initialization order problems.
+static objc_lock_list& AllLocks() {
+ static objc_lock_list *locks;
+ INIT_ONCE_PTR(locks, new objc_lock_list, (void)0);
+ return *locks;
+}
+
+
static void
destroyLocks(void *value)
{
- _objc_lock_list *locks = (_objc_lock_list *)value;
+ auto locks = (objc_lock_list *)value;
// fixme complain about any still-held locks?
- if (locks) free(locks);
+ if (locks) delete locks;
}
-static struct _objc_lock_list *
-getLocks(BOOL create)
+static objc_lock_list&
+ownedLocks()
{
- _objc_lock_list *locks;
-
// Use a dedicated tls key to prevent differences vs non-debug in
// usage of objc's other tls keys (required for some unit tests).
INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0);
- locks = (_objc_lock_list *)tls_get(lock_tls);
+ auto locks = (objc_lock_list *)tls_get(lock_tls);
if (!locks) {
- if (!create) {
- return NULL;
- } else {
- locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16);
- locks->allocated = 16;
- locks->used = 0;
- tls_set(lock_tls, locks);
- }
- }
-
- if (locks->allocated == locks->used) {
- if (!create) {
- return locks;
- } else {
- _objc_lock_list *oldlocks = locks;
- locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount));
- locks->used = oldlocks->used;
- locks->allocated = oldlocks->used * 2;
- memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount));
- tls_set(lock_tls, locks);
- free(oldlocks);
- }
+ locks = new objc_lock_list;
+ tls_set(lock_tls, locks);
}
- return locks;
+ return *locks;
}
-static BOOL
-hasLock(_objc_lock_list *locks, void *lock, int kind)
+static bool
+hasLock(objc_lock_list& locks, const void *lock, lockkind kind)
{
- int i;
- if (!locks) return NO;
-
- for (i = 0; i < locks->used; i++) {
- if (locks->list[i].l == lock && locks->list[i].k == kind) return YES;
- }
- return NO;
+ auto iter = locks.find(lock);
+ if (iter != locks.end() && iter->second.k == kind) return true;
+ return false;
}
+static const char *sym(const void *lock)
+{
+ Dl_info info;
+ int ok = dladdr(lock, &info);
+ if (ok && info.dli_sname && info.dli_sname[0]) return info.dli_sname;
+ else return "??";
+}
+
static void
-setLock(_objc_lock_list *locks, void *lock, int kind)
+setLock(objc_lock_list& locks, const void *lock, lockkind kind)
{
- int i;
- for (i = 0; i < locks->used; i++) {
- if (locks->list[i].l == lock && locks->list[i].k == kind) {
- locks->list[i].i++;
- return;
+ // Check if we already own this lock.
+ auto iter = locks.find(lock);
+ if (iter != locks.end() && iter->second.k == kind) {
+ iter->second.i++;
+ return;
+ }
+
+ // Newly-acquired lock. Verify lock ordering.
+ // Locks not in AllLocks are exempt (i.e. @synchronize locks)
+ if (&locks != &AllLocks() && AllLocks().find(lock) != AllLocks().end()) {
+ for (auto& oldlock : locks) {
+ if (lockPrecedesLock(lock, oldlock.first)) {
+ _objc_fatal("lock %p (%s) incorrectly acquired before %p (%s)",
+ oldlock.first, sym(oldlock.first), lock, sym(lock));
+ }
+ if (!inForkPrepare() &&
+ lockUnorderedWithLock(lock, oldlock.first))
+ {
+ // _objc_atfork_prepare is allowed to acquire
+ // otherwise-unordered locks, but nothing else may.
+ _objc_fatal("lock %p (%s) acquired before %p (%s) "
+ "with no defined lock order",
+ oldlock.first, sym(oldlock.first), lock, sym(lock));
+ }
}
}
- locks->list[locks->used].l = lock;
- locks->list[locks->used].i = 1;
- locks->list[locks->used].k = kind;
- locks->used++;
+ locks[lock] = lockcount{kind, 1};
}
static void
-clearLock(_objc_lock_list *locks, void *lock, int kind)
-{
- int i;
- for (i = 0; i < locks->used; i++) {
- if (locks->list[i].l == lock && locks->list[i].k == kind) {
- if (--locks->list[i].i == 0) {
- locks->list[i].l = NULL;
- locks->list[i] = locks->list[--locks->used];
+clearLock(objc_lock_list& locks, const void *lock, lockkind kind)
+{
+ auto iter = locks.find(lock);
+ if (iter != locks.end()) {
+ auto& l = iter->second;
+ if (l.k == kind) {
+ if (--l.i == 0) {
+ locks.erase(iter);
}
return;
}
/***********************************************************************
-* Mutex checking
+* fork() safety checking
**********************************************************************/
-#if !TARGET_OS_SIMULATOR
-// Non-simulator platforms have lock debugging built into os_unfair_lock.
-
+void
+lockdebug_remember_mutex(mutex_t *lock)
+{
+ setLock(AllLocks(), lock, MUTEX);
+}
void
-lockdebug_mutex_lock(mutex_t *lock)
+lockdebug_remember_recursive_mutex(recursive_mutex_t *lock)
{
- // empty
+ setLock(AllLocks(), lock, RECURSIVE);
}
void
-lockdebug_mutex_unlock(mutex_t *lock)
+lockdebug_remember_monitor(monitor_t *lock)
{
- // empty
+ setLock(AllLocks(), lock, MONITOR);
}
void
-lockdebug_mutex_assert_locked(mutex_t *lock)
+lockdebug_remember_rwlock(rwlock_t *lock)
{
- os_unfair_lock_assert_owner((os_unfair_lock *)lock);
+ setLock(AllLocks(), lock, WRLOCK);
}
void
-lockdebug_mutex_assert_unlocked(mutex_t *lock)
+lockdebug_assert_all_locks_locked()
{
- os_unfair_lock_assert_not_owner((os_unfair_lock *)lock);
+ auto& owned = ownedLocks();
+
+ for (const auto& l : AllLocks()) {
+ if (!hasLock(owned, l.first, l.second.k)) {
+ _objc_fatal("lock %p:%d is incorrectly not owned",
+ l.first, l.second.k);
+ }
+ }
}
+void
+lockdebug_assert_no_locks_locked()
+{
+ auto& owned = ownedLocks();
-// !TARGET_OS_SIMULATOR
-#else
-// TARGET_OS_SIMULATOR
+ for (const auto& l : AllLocks()) {
+ if (hasLock(owned, l.first, l.second.k)) {
+ _objc_fatal("lock %p:%d is incorrectly owned", l.first, l.second.k);
+ }
+ }
+}
-// Simulator platforms have no built-in lock debugging in os_unfair_lock.
+/***********************************************************************
+* Mutex checking
+**********************************************************************/
void
lockdebug_mutex_lock(mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, MUTEX)) {
_objc_fatal("deadlock: relocking mutex");
void
lockdebug_mutex_try_lock_success(mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
setLock(locks, lock, MUTEX);
}
void
lockdebug_mutex_unlock(mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, MUTEX)) {
_objc_fatal("unlocking unowned mutex");
void
lockdebug_mutex_assert_locked(mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, MUTEX)) {
_objc_fatal("mutex incorrectly not locked");
void
lockdebug_mutex_assert_unlocked(mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, MUTEX)) {
_objc_fatal("mutex incorrectly locked");
}
-// TARGET_OS_SIMULATOR
-#endif
-
/***********************************************************************
* Recursive mutex checking
**********************************************************************/
void
-lockdebug_recursive_mutex_lock(recursive_mutex_tt<true> *lock)
+lockdebug_recursive_mutex_lock(recursive_mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
setLock(locks, lock, RECURSIVE);
}
void
-lockdebug_recursive_mutex_unlock(recursive_mutex_tt<true> *lock)
+lockdebug_recursive_mutex_unlock(recursive_mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, RECURSIVE)) {
_objc_fatal("unlocking unowned recursive mutex");
void
-lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt<true> *lock)
+lockdebug_recursive_mutex_assert_locked(recursive_mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, RECURSIVE)) {
_objc_fatal("recursive mutex incorrectly not locked");
}
void
-lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt<true> *lock)
+lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, RECURSIVE)) {
_objc_fatal("recursive mutex incorrectly locked");
void
lockdebug_monitor_enter(monitor_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, MONITOR)) {
_objc_fatal("deadlock: relocking monitor");
void
lockdebug_monitor_leave(monitor_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, MONITOR)) {
_objc_fatal("unlocking unowned monitor");
void
lockdebug_monitor_wait(monitor_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, MONITOR)) {
_objc_fatal("waiting in unowned monitor");
void
lockdebug_monitor_assert_locked(monitor_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, MONITOR)) {
_objc_fatal("monitor incorrectly not locked");
void
lockdebug_monitor_assert_unlocked(monitor_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, MONITOR)) {
_objc_fatal("monitor incorrectly held");
**********************************************************************/
void
-lockdebug_rwlock_read(rwlock_tt<true> *lock)
+lockdebug_rwlock_read(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, RDLOCK)) {
// Recursive rwlock read is bad (may deadlock vs pending writer)
// try-read when already writing is OK (will fail)
// try-read failure does nothing.
void
-lockdebug_rwlock_try_read_success(rwlock_tt<true> *lock)
+lockdebug_rwlock_try_read_success(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
setLock(locks, lock, RDLOCK);
}
void
-lockdebug_rwlock_unlock_read(rwlock_tt<true> *lock)
+lockdebug_rwlock_unlock_read(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, RDLOCK)) {
_objc_fatal("un-reading unowned rwlock");
void
-lockdebug_rwlock_write(rwlock_tt<true> *lock)
+lockdebug_rwlock_write(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, RDLOCK)) {
// Lock promotion not allowed (may deadlock)
// try-write when already writing is OK (will fail)
// try-write failure does nothing.
void
-lockdebug_rwlock_try_write_success(rwlock_tt<true> *lock)
+lockdebug_rwlock_try_write_success(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(YES);
+ auto& locks = ownedLocks();
setLock(locks, lock, WRLOCK);
}
void
-lockdebug_rwlock_unlock_write(rwlock_tt<true> *lock)
+lockdebug_rwlock_unlock_write(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, WRLOCK)) {
_objc_fatal("un-writing unowned rwlock");
void
-lockdebug_rwlock_assert_reading(rwlock_tt<true> *lock)
+lockdebug_rwlock_assert_reading(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, RDLOCK)) {
_objc_fatal("rwlock incorrectly not reading");
}
void
-lockdebug_rwlock_assert_writing(rwlock_tt<true> *lock)
+lockdebug_rwlock_assert_writing(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, WRLOCK)) {
_objc_fatal("rwlock incorrectly not writing");
}
void
-lockdebug_rwlock_assert_locked(rwlock_tt<true> *lock)
+lockdebug_rwlock_assert_locked(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) {
_objc_fatal("rwlock incorrectly neither reading nor writing");
}
void
-lockdebug_rwlock_assert_unlocked(rwlock_tt<true> *lock)
+lockdebug_rwlock_assert_unlocked(rwlock_t *lock)
{
- _objc_lock_list *locks = getLocks(NO);
+ auto& locks = ownedLocks();
if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) {
_objc_fatal("rwlock incorrectly not unlocked");
--- /dev/null
+/*
+ * Copyright (c) 2017 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/***********************************************************************
+* objc-locks-new.h
+* Declarations of all locks used in the runtime.
+**********************************************************************/
+
+#ifndef _OBJC_LOCKS_NEW_H
+#define _OBJC_LOCKS_NEW_H
+
+// fork() safety requires careful tracking of all locks used in the runtime.
+// Thou shalt not declare any locks outside this file.
+
+extern rwlock_t runtimeLock;
+extern mutex_t DemangleCacheLock;
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/***********************************************************************
+* objc-locks-old.h
+* Declarations of all locks used in the runtime.
+**********************************************************************/
+
+#ifndef _OBJC_LOCKS_OLD_H
+#define _OBJC_LOCKS_OLD_H
+
+// fork() safety requires careful tracking of all locks used in the runtime.
+// Thou shalt not declare any locks outside this file.
+
+extern mutex_t classLock;
+extern mutex_t methodListLock;
+extern mutex_t NXUniqueStringLock;
+extern spinlock_t impLock;
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Apple Inc. All Rights Reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/***********************************************************************
+* objc-locks.h
+* Declarations of all locks used in the runtime.
+**********************************************************************/
+
+#ifndef _OBJC_LOCKS_H
+#define _OBJC_LOCKS_H
+
+// fork() safety requires careful tracking of all locks used in the runtime.
+// Thou shalt not declare any locks outside this file.
+
+// Lock ordering is declared in _objc_fork_prepare()
+// and is enforced by lockdebug.
+
+extern monitor_t classInitLock;
+extern rwlock_t selLock;
+extern mutex_t cacheUpdateLock;
+extern recursive_mutex_t loadMethodLock;
+extern mutex_t crashlog_lock;
+extern spinlock_t objcMsgLogLock;
+extern mutex_t AltHandlerDebugLock;
+extern mutex_t AssociationsManagerLock;
+extern StripedMap<spinlock_t> PropertyLocks;
+extern StripedMap<spinlock_t> StructLocks;
+extern StripedMap<spinlock_t> CppObjectLocks;
+
+// SideTable lock is buried awkwardly. Call a function to manipulate it.
+extern void SideTableLockAll();
+extern void SideTableUnlockAll();
+extern void SideTableForceResetAll();
+extern void SideTableDefineLockOrder();
+extern void SideTableLocksPrecedeLock(const void *newlock);
+extern void SideTableLocksSucceedLock(const void *oldlock);
+
+#if __OBJC2__
+#include "objc-locks-new.h"
+#else
+#include "objc-locks-old.h"
+#endif
+
+#endif
return nil;
}
+unsigned int getPreoptimizedClassUnreasonableCount()
+{
+ return 0;
+}
+
Class getPreoptimizedClass(const char *name)
{
return nil;
}
+unsigned int getPreoptimizedClassUnreasonableCount()
+{
+ objc_clsopt_t *classes = opt ? opt->clsopt() : nil;
+ if (!classes) return 0;
+
+ // This is an overestimate: each set of duplicates
+ // gets double-counted in `capacity` as well.
+ return classes->capacity + classes->duplicateCount();
+}
+
+
Class getPreoptimizedClass(const char *name)
{
objc_clsopt_t *classes = opt ? opt->clsopt() : nil;
using rwlock_t = rwlock_tt<DEBUG>;
using recursive_mutex_t = recursive_mutex_tt<DEBUG>;
+// Use fork_unsafe_lock to get a lock that isn't
+// acquired and released around fork().
+// All fork-safe locks are checked in debug builds.
+struct fork_unsafe_lock_t { };
+extern const fork_unsafe_lock_t fork_unsafe_lock;
+
#include "objc-lockdebug.h"
template <bool Debug>
class mutex_tt : nocopy_t {
os_unfair_lock mLock;
public:
- mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) { }
+ mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) {
+ lockdebug_remember_mutex(this);
+ }
+
+ mutex_tt(const fork_unsafe_lock_t unsafe) : mLock(OS_UNFAIR_LOCK_INIT) { }
void lock() {
lockdebug_mutex_lock(this);
os_unfair_lock_unlock_inline(&mLock);
}
+ void forceReset() {
+ lockdebug_mutex_unlock(this);
+
+ bzero(&mLock, sizeof(mLock));
+ mLock = os_unfair_lock OS_UNFAIR_LOCK_INIT;
+ }
+
void assertLocked() {
lockdebug_mutex_assert_locked(this);
}
// Address-ordered lock discipline for a pair of locks.
static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) {
- if (lock1 > lock2) {
+ if (lock1 < lock2) {
lock1->lock();
lock2->lock();
} else {
pthread_mutex_t mLock;
public:
- recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { }
+ recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) {
+ lockdebug_remember_recursive_mutex(this);
+ }
+
+ recursive_mutex_tt(const fork_unsafe_lock_t unsafe)
+ : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER)
+ { }
void lock()
{
if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
}
+ void forceReset()
+ {
+ lockdebug_recursive_mutex_unlock(this);
+
+ bzero(&mLock, sizeof(mLock));
+ mLock = pthread_mutex_t PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
+ }
+
bool tryUnlock()
{
int err = pthread_mutex_unlock(&mLock);
public:
monitor_tt()
- : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { }
+ : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
+ {
+ lockdebug_remember_monitor(this);
+ }
+
+ monitor_tt(const fork_unsafe_lock_t unsafe)
+ : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER)
+ { }
void enter()
{
if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
}
+ void forceReset()
+ {
+ lockdebug_monitor_leave(this);
+
+ bzero(&mutex, sizeof(mutex));
+ bzero(&cond, sizeof(cond));
+ mutex = pthread_mutex_t PTHREAD_MUTEX_INITIALIZER;
+ cond = pthread_cond_t PTHREAD_COND_INITIALIZER;
+ }
+
void assertLocked()
{
lockdebug_monitor_assert_locked(this);
pthread_rwlock_t mLock;
public:
- rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { }
+ rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) {
+ lockdebug_remember_rwlock(this);
+ }
+
+ rwlock_tt(const fork_unsafe_lock_t unsafe)
+ : mLock(PTHREAD_RWLOCK_INITIALIZER)
+ { }
void read()
{
}
}
+ void forceReset()
+ {
+ lockdebug_rwlock_unlock_write(this);
+
+ bzero(&mLock, sizeof(mLock));
+ mLock = pthread_rwlock_t PTHREAD_RWLOCK_INITIALIZER;
+ }
+
void assertReading() {
lockdebug_rwlock_assert_reading(this);
(unsigned char)(((uint32_t)(v))>>8), \
(unsigned char)(((uint32_t)(v))>>0)
+// fork() safety requires careful tracking of all locks.
+// Our custom lock types check this in debug builds.
+// Disallow direct use of all other lock types.
+typedef __darwin_pthread_mutex_t pthread_mutex_t UNAVAILABLE_ATTRIBUTE;
+typedef __darwin_pthread_rwlock_t pthread_rwlock_t UNAVAILABLE_ATTRIBUTE;
+typedef int32_t OSSpinLock UNAVAILABLE_ATTRIBUTE;
+typedef struct os_unfair_lock_s os_unfair_lock UNAVAILABLE_ATTRIBUTE;
+
+
#endif
#include "objc-runtime-old.h"
#include "objcrt.h"
+const fork_unsafe_lock_t fork_unsafe_lock;
+
int monitor_init(monitor_t *c)
{
// fixme error checking
}
+/***********************************************************************
+* _objc_atfork_prepare
+* _objc_atfork_parent
+* _objc_atfork_child
+* Allow ObjC to be used between fork() and exec().
+* libc requires this because it has fork-safe functions that use os_objects.
+*
+* _objc_atfork_prepare() acquires all locks.
+* _objc_atfork_parent() releases the locks again.
+* _objc_atfork_child() forcibly resets the locks.
+**********************************************************************/
+
+// Declare lock ordering.
+#if DEBUG
+__attribute__((constructor))
+static void defineLockOrder()
+{
+ // Every lock precedes crashlog_lock
+ // on the assumption that fatal errors could be anywhere.
+ lockdebug_lock_precedes_lock(&loadMethodLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&classInitLock, &crashlog_lock);
+#if __OBJC2__
+ lockdebug_lock_precedes_lock(&runtimeLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&DemangleCacheLock, &crashlog_lock);
+#else
+ lockdebug_lock_precedes_lock(&classLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&methodListLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&NXUniqueStringLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&impLock, &crashlog_lock);
+#endif
+ lockdebug_lock_precedes_lock(&selLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&cacheUpdateLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&objcMsgLogLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&AltHandlerDebugLock, &crashlog_lock);
+ lockdebug_lock_precedes_lock(&AssociationsManagerLock, &crashlog_lock);
+ SideTableLocksPrecedeLock(&crashlog_lock);
+ PropertyLocks.precedeLock(&crashlog_lock);
+ StructLocks.precedeLock(&crashlog_lock);
+ CppObjectLocks.precedeLock(&crashlog_lock);
+
+ // loadMethodLock precedes everything
+ // because it is held while +load methods run
+ lockdebug_lock_precedes_lock(&loadMethodLock, &classInitLock);
+#if __OBJC2__
+ lockdebug_lock_precedes_lock(&loadMethodLock, &runtimeLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &DemangleCacheLock);
+#else
+ lockdebug_lock_precedes_lock(&loadMethodLock, &methodListLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &classLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &NXUniqueStringLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &impLock);
+#endif
+ lockdebug_lock_precedes_lock(&loadMethodLock, &selLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &cacheUpdateLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &objcMsgLogLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &AltHandlerDebugLock);
+ lockdebug_lock_precedes_lock(&loadMethodLock, &AssociationsManagerLock);
+ SideTableLocksSucceedLock(&loadMethodLock);
+ PropertyLocks.succeedLock(&loadMethodLock);
+ StructLocks.succeedLock(&loadMethodLock);
+ CppObjectLocks.succeedLock(&loadMethodLock);
+
+ // PropertyLocks and CppObjectLocks precede everything
+ // because they are held while objc_retain() or C++ copy are called.
+ // (StructLocks do not precede everything because it calls memmove only.)
+ PropertyLocks.precedeLock(&classInitLock);
+ CppObjectLocks.precedeLock(&classInitLock);
+#if __OBJC2__
+ PropertyLocks.precedeLock(&runtimeLock);
+ CppObjectLocks.precedeLock(&runtimeLock);
+ PropertyLocks.precedeLock(&DemangleCacheLock);
+ CppObjectLocks.precedeLock(&DemangleCacheLock);
+#else
+ PropertyLocks.precedeLock(&methodListLock);
+ CppObjectLocks.precedeLock(&methodListLock);
+ PropertyLocks.precedeLock(&classLock);
+ CppObjectLocks.precedeLock(&classLock);
+ PropertyLocks.precedeLock(&NXUniqueStringLock);
+ CppObjectLocks.precedeLock(&NXUniqueStringLock);
+ PropertyLocks.precedeLock(&impLock);
+ CppObjectLocks.precedeLock(&impLock);
+#endif
+ PropertyLocks.precedeLock(&selLock);
+ CppObjectLocks.precedeLock(&selLock);
+ PropertyLocks.precedeLock(&cacheUpdateLock);
+ CppObjectLocks.precedeLock(&cacheUpdateLock);
+ PropertyLocks.precedeLock(&objcMsgLogLock);
+ CppObjectLocks.precedeLock(&objcMsgLogLock);
+ PropertyLocks.precedeLock(&AltHandlerDebugLock);
+ CppObjectLocks.precedeLock(&AltHandlerDebugLock);
+ PropertyLocks.precedeLock(&AssociationsManagerLock);
+ CppObjectLocks.precedeLock(&AssociationsManagerLock);
+ // fixme side table
+
+#if __OBJC2__
+ lockdebug_lock_precedes_lock(&classInitLock, &runtimeLock);
+#endif
+
+#if __OBJC2__
+ // Runtime operations may occur inside SideTable locks
+ // (such as storeWeak calling getMethodImplementation)
+ SideTableLocksPrecedeLock(&runtimeLock);
+ // Some operations may occur inside runtimeLock.
+ lockdebug_lock_precedes_lock(&runtimeLock, &selLock);
+ lockdebug_lock_precedes_lock(&runtimeLock, &cacheUpdateLock);
+ lockdebug_lock_precedes_lock(&runtimeLock, &DemangleCacheLock);
+#else
+ // Runtime operations may occur inside SideTable locks
+ // (such as storeWeak calling getMethodImplementation)
+ SideTableLocksPrecedeLock(&methodListLock);
+ // Method lookup and fixup.
+ lockdebug_lock_precedes_lock(&methodListLock, &classLock);
+ lockdebug_lock_precedes_lock(&methodListLock, &selLock);
+ lockdebug_lock_precedes_lock(&methodListLock, &cacheUpdateLock);
+ lockdebug_lock_precedes_lock(&methodListLock, &impLock);
+ lockdebug_lock_precedes_lock(&classLock, &selLock);
+ lockdebug_lock_precedes_lock(&classLock, &cacheUpdateLock);
+#endif
+
+ // Striped locks use address order internally.
+ SideTableDefineLockOrder();
+ PropertyLocks.defineLockOrder();
+ StructLocks.defineLockOrder();
+ CppObjectLocks.defineLockOrder();
+}
+// DEBUG
+#endif
+
+void _objc_atfork_prepare()
+{
+ lockdebug_assert_no_locks_locked();
+ lockdebug_setInForkPrepare(true);
+
+ loadMethodLock.lock();
+ PropertyLocks.lockAll();
+ CppObjectLocks.lockAll();
+ classInitLock.enter();
+ SideTableLockAll();
+#if __OBJC2__
+ runtimeLock.write();
+ DemangleCacheLock.lock();
+#else
+ methodListLock.lock();
+ classLock.lock();
+ NXUniqueStringLock.lock();
+ impLock.lock();
+#endif
+ selLock.write();
+ cacheUpdateLock.lock();
+ objcMsgLogLock.lock();
+ AltHandlerDebugLock.lock();
+ AssociationsManagerLock.lock();
+ StructLocks.lockAll();
+ crashlog_lock.lock();
+
+ lockdebug_assert_all_locks_locked();
+ lockdebug_setInForkPrepare(false);
+}
+
+void _objc_atfork_parent()
+{
+ lockdebug_assert_all_locks_locked();
+
+ CppObjectLocks.unlockAll();
+ StructLocks.unlockAll();
+ PropertyLocks.unlockAll();
+ AssociationsManagerLock.unlock();
+ AltHandlerDebugLock.unlock();
+ objcMsgLogLock.unlock();
+ crashlog_lock.unlock();
+ loadMethodLock.unlock();
+ cacheUpdateLock.unlock();
+ selLock.unlockWrite();
+ SideTableUnlockAll();
+#if __OBJC2__
+ DemangleCacheLock.unlock();
+ runtimeLock.unlockWrite();
+#else
+ impLock.unlock();
+ NXUniqueStringLock.unlock();
+ methodListLock.unlock();
+ classLock.unlock();
+#endif
+ classInitLock.leave();
+
+ lockdebug_assert_no_locks_locked();
+}
+
+void _objc_atfork_child()
+{
+ lockdebug_assert_all_locks_locked();
+
+ CppObjectLocks.forceResetAll();
+ StructLocks.forceResetAll();
+ PropertyLocks.forceResetAll();
+ AssociationsManagerLock.forceReset();
+ AltHandlerDebugLock.forceReset();
+ objcMsgLogLock.forceReset();
+ crashlog_lock.forceReset();
+ loadMethodLock.forceReset();
+ cacheUpdateLock.forceReset();
+ selLock.forceReset();
+ SideTableForceResetAll();
+#if __OBJC2__
+ DemangleCacheLock.forceReset();
+ runtimeLock.forceReset();
+#else
+ impLock.forceReset();
+ NXUniqueStringLock.forceReset();
+ methodListLock.forceReset();
+ classLock.forceReset();
+#endif
+ classInitLock.forceReset();
+
+ lockdebug_assert_no_locks_locked();
+}
+
+
/***********************************************************************
* _objc_init
* Bootstrap initialization. Registers our image notifier with dyld.
lock_init();
exception_init();
- _dyld_objc_notify_register(&map_2_images, load_images, unmap_image);
+ _dyld_objc_notify_register(&map_images, load_images, unmap_image);
}
extern Protocol *getPreoptimizedProtocol(const char *name);
+extern unsigned getPreoptimizedClassUnreasonableCount();
extern Class getPreoptimizedClass(const char *name);
extern Class* copyPreoptimizedClasses(const char *name, int *outCount);
/* locking */
extern void lock_init(void);
-extern rwlock_t selLock;
-extern mutex_t cacheUpdateLock;
-extern recursive_mutex_t loadMethodLock;
-#if __OBJC2__
-extern rwlock_t runtimeLock;
-#else
-extern mutex_t classLock;
-extern mutex_t methodListLock;
-#endif
class monitor_locker_t : nocopy_t {
monitor_t& lock;
// fixme runtime
extern Class look_up_class(const char *aClassName, bool includeUnconnected, bool includeClassHandler);
-extern "C" void map_2_images(unsigned count, const char * const paths[],
- const struct mach_header * const mhdrs[]);
+extern "C" void map_images(unsigned count, const char * const paths[],
+ const struct mach_header * const mhdrs[]);
extern void map_images_nolock(unsigned count, const char * const paths[],
const struct mach_header * const mhdrs[]);
extern void load_images(const char *path, const struct mach_header *mh);
return const_cast<StripedMap<T>>(this)[p];
}
+ // Shortcuts for StripedMaps of locks.
+ void lockAll() {
+ for (unsigned int i = 0; i < StripeCount; i++) {
+ array[i].value.lock();
+ }
+ }
+
+ void unlockAll() {
+ for (unsigned int i = 0; i < StripeCount; i++) {
+ array[i].value.unlock();
+ }
+ }
+
+ void forceResetAll() {
+ for (unsigned int i = 0; i < StripeCount; i++) {
+ array[i].value.forceReset();
+ }
+ }
+
+ void defineLockOrder() {
+ for (unsigned int i = 1; i < StripeCount; i++) {
+ lockdebug_lock_precedes_lock(&array[i-1].value, &array[i].value);
+ }
+ }
+
+ void precedeLock(const void *newlock) {
+ // assumes defineLockOrder is also called
+ lockdebug_lock_precedes_lock(&array[StripeCount-1].value, newlock);
+ }
+
+ void succeedLock(const void *oldlock) {
+ // assumes defineLockOrder is also called
+ lockdebug_lock_precedes_lock(oldlock, &array[0].value);
+ }
+
#if DEBUG
StripedMap() {
// Verify alignment expectations.
*/
+
+// Lock declarations
+#include "objc-locks.h"
+
// Inlined parts of objc_object's implementation
#include "objc-object.h"
using namespace objc_references_support;
// class AssociationsManager manages a lock / hash table singleton pair.
-// Allocating an instance acquires the lock, and calling its assocations() method
-// lazily allocates it.
+// Allocating an instance acquires the lock, and calling its assocations()
+// method lazily allocates the hash table.
+
+spinlock_t AssociationsManagerLock;
class AssociationsManager {
- static spinlock_t _lock;
- static AssociationsHashMap *_map; // associative references: object pointer -> PtrPtrHashMap.
+ // associative references: object pointer -> PtrPtrHashMap.
+ static AssociationsHashMap *_map;
public:
- AssociationsManager() { _lock.lock(); }
- ~AssociationsManager() { _lock.unlock(); }
+ AssociationsManager() { AssociationsManagerLock.lock(); }
+ ~AssociationsManager() { AssociationsManagerLock.unlock(); }
AssociationsHashMap &associations() {
if (_map == NULL)
}
};
-spinlock_t AssociationsManager::_lock;
AssociationsHashMap *AssociationsManager::_map = NULL;
// expanded policy bits.
{
assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE)));
// Set during realization or construction only. No locking needed.
- bits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
+ // Use a store-release fence because there may be concurrent
+ // readers of data and data's contents.
+ uintptr_t newBits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData;
+ atomic_thread_fence(memory_order_release);
+ bits = newBits;
}
bool hasDefaultRR() {
extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive);
static inline void
-foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class))
+foreach_realized_class_and_subclass_2(Class top, unsigned& count,
+ std::function<bool (Class)> code)
{
// runtimeLock.assertWriting();
assert(top);
Class cls = top;
while (1) {
+ if (--count == 0) {
+ _objc_fatal("Memory corruption in class list.");
+ }
if (!code(cls)) break;
if (cls->data()->firstSubclass) {
} else {
while (!cls->data()->nextSiblingClass && cls != top) {
cls = cls->superclass;
+ if (--count == 0) {
+ _objc_fatal("Memory corruption in class list.");
+ }
}
if (cls == top) break;
cls = cls->data()->nextSiblingClass;
}
}
+extern Class firstRealizedClass();
+extern unsigned int unreasonableClassCount();
+
// Enumerates a class and all of its realized subclasses.
static inline void
-foreach_realized_class_and_subclass(Class top, void (^code)(Class))
+foreach_realized_class_and_subclass(Class top,
+ std::function<void (Class)> code)
{
- foreach_realized_class_and_subclass_2(top, ^bool(Class cls) {
- code(cls); return true;
+ unsigned int count = unreasonableClassCount();
+
+ foreach_realized_class_and_subclass_2(top, count,
+ [&code](Class cls) -> bool
+ {
+ code(cls);
+ return true;
});
}
// Enumerates all realized classes and metaclasses.
-extern Class firstRealizedClass();
static inline void
-foreach_realized_class_and_metaclass(void (^code)(Class))
+foreach_realized_class_and_metaclass(std::function<void (Class)> code)
{
+ unsigned int count = unreasonableClassCount();
+
for (Class top = firstRealizedClass();
top != nil;
top = top->data()->nextSiblingClass)
{
- foreach_realized_class_and_subclass_2(top, ^bool(Class cls) {
- code(cls); return true;
+ foreach_realized_class_and_subclass_2(top, count,
+ [&code](Class cls) -> bool
+ {
+ code(cls);
+ return true;
});
}
}
+/***********************************************************************
+* unreasonableClassCount
+* Provides an upper bound for any iteration of classes,
+* to prevent spins when runtime metadata is corrupted.
+**********************************************************************/
+unsigned unreasonableClassCount()
+{
+ runtimeLock.assertLocked();
+
+ int base = NXCountMapTable(gdb_objc_realized_classes) +
+ getPreoptimizedClassUnreasonableCount();
+
+ // Provide lots of slack here. Some iterations touch metaclasses too.
+ // Some iterations backtrack (like realized class iteration).
+ // We don't need an efficient bound, merely one that prevents spins.
+ return (base + 1) * 16;
+}
+
+
/***********************************************************************
* futureNamedClasses
* Returns the classname => future class map for unrealized future classes.
* Locking: write-locks runtimeLock
**********************************************************************/
void
-map_2_images(unsigned count, const char * const paths[],
- const struct mach_header * const mhdrs[])
+map_images(unsigned count, const char * const paths[],
+ const struct mach_header * const mhdrs[])
{
rwlock_writer_t lock(runtimeLock);
return map_images_nolock(count, paths, mhdrs);
* If realize=false, the class must already be realized or future.
* Locking: If realize=true, runtimeLock must be held for writing by the caller.
**********************************************************************/
-static mutex_t DemangleCacheLock;
+mutex_t DemangleCacheLock;
static NXHashTable *DemangleCache;
const char *
objc_class::demangledName(bool realize)
IMP lookUpImpOrForward(Class cls, SEL sel, id inst,
bool initialize, bool cache, bool resolver)
{
- Class curClass;
IMP imp = nil;
- Method meth;
bool triedResolver = NO;
runtimeLock.assertUnlocked();
if (imp) return imp;
}
+ // runtimeLock is held during isRealized and isInitialized checking
+ // to prevent races against concurrent realization.
+
+ // runtimeLock is held during method search to make
+ // method-lookup + cache-fill atomic with respect to method addition.
+ // Otherwise, a category could be added but ignored indefinitely because
+ // the cache was re-filled with the old value after the cache flush on
+ // behalf of the category.
+
+ runtimeLock.read();
+
if (!cls->isRealized()) {
- rwlock_writer_t lock(runtimeLock);
+ // Drop the read-lock and acquire the write-lock.
+ // realizeClass() checks isRealized() again to prevent
+ // a race while the lock is down.
+ runtimeLock.unlockRead();
+ runtimeLock.write();
+
realizeClass(cls);
+
+ runtimeLock.unlockWrite();
+ runtimeLock.read();
}
if (initialize && !cls->isInitialized()) {
+ runtimeLock.unlockRead();
_class_initialize (_class_getNonMetaClass(cls, inst));
+ runtimeLock.read();
// If sel == initialize, _class_initialize will send +initialize and
// then the messenger will send +initialize again after this
// procedure finishes. Of course, if this is not being called
// from the messenger then it won't happen. 2778172
}
- // The lock is held to make method-lookup + cache-fill atomic
- // with respect to method addition. Otherwise, a category could
- // be added but ignored indefinitely because the cache was re-filled
- // with the old value after the cache flush on behalf of the category.
- retry:
- runtimeLock.read();
+
+ retry:
+ runtimeLock.assertReading();
// Try this class's cache.
if (imp) goto done;
// Try this class's method lists.
-
- meth = getMethodNoSuper_nolock(cls, sel);
- if (meth) {
- log_and_fill_cache(cls, meth->imp, sel, inst, cls);
- imp = meth->imp;
- goto done;
+ {
+ Method meth = getMethodNoSuper_nolock(cls, sel);
+ if (meth) {
+ log_and_fill_cache(cls, meth->imp, sel, inst, cls);
+ imp = meth->imp;
+ goto done;
+ }
}
// Try superclass caches and method lists.
-
- curClass = cls;
- while ((curClass = curClass->superclass)) {
- // Superclass cache.
- imp = cache_getImp(curClass, sel);
- if (imp) {
- if (imp != (IMP)_objc_msgForward_impcache) {
- // Found the method in a superclass. Cache it in this class.
- log_and_fill_cache(cls, imp, sel, inst, curClass);
- goto done;
+ {
+ unsigned attempts = unreasonableClassCount();
+ for (Class curClass = cls;
+ curClass != nil;
+ curClass = curClass->superclass)
+ {
+ // Halt if there is a cycle in the superclass chain.
+ if (--attempts == 0) {
+ _objc_fatal("Memory corruption in class list.");
}
- else {
- // Found a forward:: entry in a superclass.
- // Stop searching, but don't cache yet; call method
- // resolver for this class first.
- break;
+
+ // Superclass cache.
+ imp = cache_getImp(curClass, sel);
+ if (imp) {
+ if (imp != (IMP)_objc_msgForward_impcache) {
+ // Found the method in a superclass. Cache it in this class.
+ log_and_fill_cache(cls, imp, sel, inst, curClass);
+ goto done;
+ }
+ else {
+ // Found a forward:: entry in a superclass.
+ // Stop searching, but don't cache yet; call method
+ // resolver for this class first.
+ break;
+ }
+ }
+
+ // Superclass method list.
+ Method meth = getMethodNoSuper_nolock(curClass, sel);
+ if (meth) {
+ log_and_fill_cache(cls, meth->imp, sel, inst, curClass);
+ imp = meth->imp;
+ goto done;
}
- }
-
- // Superclass method list.
- meth = getMethodNoSuper_nolock(curClass, sel);
- if (meth) {
- log_and_fill_cache(cls, meth->imp, sel, inst, curClass);
- imp = meth->imp;
- goto done;
}
}
if (resolver && !triedResolver) {
runtimeLock.unlockRead();
_class_resolveMethod(cls, sel, inst);
+ runtimeLock.read();
// Don't cache the result; we don't hold the lock so it may have
// changed already. Re-do the search from scratch instead.
triedResolver = YES;
* Calls ABI-agnostic code after taking ABI-specific locks.
**********************************************************************/
void
-map_2_images(unsigned count, const char * const paths[],
- const struct mach_header * const mhdrs[])
+map_images(unsigned count, const char * const paths[],
+ const struct mach_header * const mhdrs[])
{
recursive_mutex_locker_t lock(loadMethodLock);
map_images_nolock(count, paths, mhdrs);
SyncData *data;
spinlock_t lock;
- SyncList() : data(nil) { }
+ SyncList() : data(nil), lock(fork_unsafe_lock) { }
};
// Use multiple parallel lists to decrease contention among unrelated objects.
result = (SyncData*)calloc(sizeof(SyncData), 1);
result->object = (objc_object *)object;
result->threadCount = 1;
- new (&result->mutex) recursive_mutex_t();
+ new (&result->mutex) recursive_mutex_t(fork_unsafe_lock);
result->nextData = *listp;
*listp = result;
typedef id (*IMP)(id, SEL, ...);
#endif
-#define OBJC_BOOL_DEFINED
-
/// Type to represent a boolean value.
-#if (TARGET_OS_IPHONE && __LP64__) || TARGET_OS_WATCH
-#define OBJC_BOOL_IS_BOOL 1
-typedef bool BOOL;
+
+#if defined(__OBJC_BOOL_IS_BOOL)
+ // Honor __OBJC_BOOL_IS_BOOL when available.
+# if __OBJC_BOOL_IS_BOOL
+# define OBJC_BOOL_IS_BOOL 1
+# else
+# define OBJC_BOOL_IS_BOOL 0
+# endif
#else
-#define OBJC_BOOL_IS_CHAR 1
-typedef signed char BOOL;
-// BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C"
-// even if -funsigned-char is used.
+ // __OBJC_BOOL_IS_BOOL not set.
+# if TARGET_OS_OSX || (TARGET_OS_IOS && !__LP64__ && !__ARM_ARCH_7K)
+# define OBJC_BOOL_IS_BOOL 0
+# else
+# define OBJC_BOOL_IS_BOOL 1
+# endif
#endif
+#if OBJC_BOOL_IS_BOOL
+ typedef bool BOOL;
+#else
+# define OBJC_BOOL_IS_CHAR 1
+ typedef signed char BOOL;
+ // BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C"
+ // even if -funsigned-char is used.
+#endif
+
+#define OBJC_BOOL_DEFINED
+
#if __has_feature(objc_bool)
#define YES __objc_yes
#define NO __objc_no
* @note In a garbage-collected environment, the memory is scanned conservatively.
*/
OBJC_EXPORT void *object_getIndexedIvars(id obj)
- OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0);
+ OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0)
+ OBJC_ARC_UNAVAILABLE;
/**
* Identifies a selector as being valid or invalid.
OBJC_AVAILABLE(10.10, 8.0, 9.0, 1.0);
-/**
- * Returns the class name of a given object.
- *
- * @param obj An Objective-C object.
- *
- * @return The name of the class of which \e obj is an instance.
- */
-OBJC_EXPORT const char *object_getClassName(id obj)
- OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0);
-
-/**
- * Returns a pointer to any extra bytes allocated with an instance given object.
- *
- * @param obj An Objective-C object.
- *
- * @return A pointer to any extra bytes allocated with \e obj. If \e obj was
- * not allocated with any extra bytes, then dereferencing the returned pointer is undefined.
- *
- * @note This function returns a pointer to any extra bytes allocated with the instance
- * (as specified by \c class_createInstance with extraBytes>0). This memory follows the
- * object's ordinary ivars, but may not be adjacent to the last ivar.
- * @note The returned pointer is guaranteed to be pointer-size aligned, even if the area following
- * the object's last ivar is less aligned than that. Alignment greater than pointer-size is never
- * guaranteed, even if the area following the object's last ivar is more aligned than that.
- * @note In a garbage-collected environment, the memory is scanned conservatively.
- */
-OBJC_EXPORT void *object_getIndexedIvars(id obj)
- OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0)
- OBJC_ARC_UNAVAILABLE;
-
/**
* Reads the value of an instance variable in an object.
*
OBJC_EXPORT const char *sel_getName(SEL sel)
OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0);
-/**
- * Registers a method name with the Objective-C runtime system.
- *
- * @param str A pointer to a C string. Pass the name of the method you wish to register.
- *
- * @return A pointer of type SEL specifying the selector for the named method.
- *
- * @note The implementation of this method is identical to the implementation of \c sel_registerName.
- * @note Prior to OS X version 10.0, this method tried to find the selector mapped to the given name
- * and returned \c NULL if the selector was not found. This was changed for safety, because it was
- * observed that many of the callers of this function did not check the return value for \c NULL.
- */
-OBJC_EXPORT SEL sel_getUid(const char *str)
- OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0);
/**
* Registers a method with the Objective-C runtime system, maps the method