#define _OBJC_OS_H
#include <TargetConditionals.h>
+#include "objc-config.h"
+
+#ifdef __LP64__
+# define WORD_SHIFT 3UL
+# define WORD_MASK 7UL
+# define WORD_BITS 64
+#else
+# define WORD_SHIFT 2UL
+# define WORD_MASK 3UL
+# define WORD_BITS 32
+#endif
+
+static inline uint32_t word_align(uint32_t x) {
+ return (x + WORD_MASK) & ~WORD_MASK;
+}
+static inline size_t word_align(size_t x) {
+ return (x + WORD_MASK) & ~WORD_MASK;
+}
+
+
+// Mix-in for classes that must not be copied.
+class nocopy_t {
+ private:
+ nocopy_t(const nocopy_t&) = delete;
+ const nocopy_t& operator=(const nocopy_t&) = delete;
+ protected:
+ nocopy_t() { }
+ ~nocopy_t() { }
+};
+
#if TARGET_OS_MAC
+# ifndef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# endif
+
# include <stdio.h>
# include <stdlib.h>
# include <stdint.h>
# include <unistd.h>
# include <pthread.h>
# include <crt_externs.h>
-# include <AssertMacros.h>
-# include <Block_private.h>
-# include <AvailabilityMacros.h>
+# undef check
+# include <Availability.h>
# include <TargetConditionals.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/stat.h>
# include <sys/param.h>
# include <mach/mach.h>
+# include <mach/vm_param.h>
+# include <mach/mach_time.h>
# include <mach-o/dyld.h>
# include <mach-o/ldsyms.h>
# include <mach-o/loader.h>
# include <mach-o/getsect.h>
# include <mach-o/dyld_priv.h>
# include <malloc/malloc.h>
+# include <os/lock_private.h>
# include <libkern/OSAtomic.h>
# include <libkern/OSCacheControl.h>
# include <System/pthread_machdep.h>
# include "objc-probes.h" // generated dtrace probe definitions.
+// Some libc functions call objc_msgSend()
+// so we can't use them without deadlocks.
+void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
+void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
+
+
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define NEVER_INLINE inline __attribute__((noinline))
+
+
+
+static ALWAYS_INLINE uintptr_t
+addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
+{
+ return __builtin_addcl(lhs, rhs, carryin, carryout);
+}
+
+static ALWAYS_INLINE uintptr_t
+subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
+{
+ return __builtin_subcl(lhs, rhs, carryin, carryout);
+}
+
+
+#if __arm64__
+
+static ALWAYS_INLINE
+uintptr_t
+LoadExclusive(uintptr_t *src)
+{
+ uintptr_t result;
+ asm("ldxr %x0, [%x1]"
+ : "=r" (result)
+ : "r" (src), "m" (*src));
+ return result;
+}
+
+static ALWAYS_INLINE
+bool
+StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
+{
+ uint32_t result;
+ asm("stxr %w0, %x2, [%x3]"
+ : "=r" (result), "=m" (*dst)
+ : "r" (value), "r" (dst));
+ return !result;
+}
+
+
+static ALWAYS_INLINE
+bool
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
+{
+ uint32_t result;
+ asm("stlxr %w0, %x2, [%x3]"
+ : "=r" (result), "=m" (*dst)
+ : "r" (value), "r" (dst));
+ return !result;
+}
+
+
+#elif __arm__
+
+static ALWAYS_INLINE
+uintptr_t
+LoadExclusive(uintptr_t *src)
+{
+ return *src;
+}
+
+static ALWAYS_INLINE
+bool
+StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+ return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value,
+ (void **)dst);
+}
+
+static ALWAYS_INLINE
+bool
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+ return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value,
+ (void **)dst);
+}
+
+
+#elif __x86_64__ || __i386__
+
+static ALWAYS_INLINE
+uintptr_t
+LoadExclusive(uintptr_t *src)
+{
+ return *src;
+}
+
+static ALWAYS_INLINE
+bool
+StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+
+ return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value);
+}
+
+static ALWAYS_INLINE
+bool
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+ return StoreExclusive(dst, oldvalue, value);
+}
+
+#else
+# error unknown architecture
+#endif
+
+
+class spinlock_t {
+ os_lock_handoff_s mLock;
+ public:
+ spinlock_t() : mLock(OS_LOCK_HANDOFF_INIT) { }
+
+ void lock() { os_lock_lock(&mLock); }
+ void unlock() { os_lock_unlock(&mLock); }
+ bool trylock() { return os_lock_trylock(&mLock); }
+
+
+ // Address-ordered lock discipline for a pair of locks.
+
+ static void lockTwo(spinlock_t *lock1, spinlock_t *lock2) {
+ if (lock1 > lock2) {
+ lock1->lock();
+ lock2->lock();
+ } else {
+ lock2->lock();
+ if (lock2 != lock1) lock1->lock();
+ }
+ }
+
+ static void unlockTwo(spinlock_t *lock1, spinlock_t *lock2) {
+ lock1->unlock();
+ if (lock2 != lock1) lock2->unlock();
+ }
+};
+
+
+#if !TARGET_OS_IPHONE
+# include <CrashReporterClient.h>
+#else
+ // CrashReporterClient not yet available on iOS
+ __BEGIN_DECLS
+ extern const char *CRSetCrashLogMessage(const char *msg);
+ extern const char *CRGetCrashLogMessage(void);
+ extern const char *CRSetCrashLogMessage2(const char *msg);
+ __END_DECLS
+#endif
+
# if __cplusplus
# include <vector>
# include <algorithm>
+# include <functional>
using namespace std;
-# include <ext/hash_map>
- using namespace __gnu_cxx;
# endif
+# define PRIVATE_EXTERN __attribute__((visibility("hidden")))
+# undef __private_extern__
+# define __private_extern__ use_PRIVATE_EXTERN_instead
+# undef private_extern
+# define private_extern use_PRIVATE_EXTERN_instead
+
+/* Use this for functions that are intended to be breakpoint hooks.
+ If you do not, the compiler may optimize them away.
+ BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
+# define BREAKPOINT_FUNCTION(prototype) \
+ OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
+ prototype { asm(""); }
+
#elif TARGET_OS_WIN32
# define WINVER 0x0501 // target Windows XP and later
# include <string.h>
# include <assert.h>
# include <malloc.h>
-# include <AvailabilityMacros.h>
+# include <Availability.h>
# if __cplusplus
# include <vector>
# include <algorithm>
+# include <functional>
using namespace std;
-# include <hash_map>
- using namespace stdext;
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
# else
# define __END_DECLS /*empty*/
# endif
-# define __private_extern__
+# define PRIVATE_EXTERN
# define __attribute__(x)
# define inline __inline
+/* Use this for functions that are intended to be breakpoint hooks.
+ If you do not, the compiler may optimize them away.
+ BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */
+# define BREAKPOINT_FUNCTION(prototype) \
+ __declspec(noinline) prototype { __asm { } }
+
/* stub out dtrace probes */
# define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0)
# define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0)
#include <objc/objc.h>
#include <objc/objc-api.h>
-__BEGIN_DECLS
-
extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
#define INIT_ONCE_PTR(var, create, delete) \
if (var) break; \
typeof(var) v = create; \
while (!var) { \
- if (OSAtomicCompareAndSwapPtrBarrier(0, v, (void**)&var)) { \
+ if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \
goto done; \
} \
} \
// Thread keys reserved by libc for our use.
-// Keys [0..4] are used by autozone.
-#if defined(__PTK_FRAMEWORK_OBJC_KEY5)
-# define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
-# define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
-# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
-// define DIRECT_4_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
-// define DIRECT_5_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
+#if defined(__PTK_FRAMEWORK_OBJC_KEY0)
+# define SUPPORT_DIRECT_THREAD_KEYS 1
+# define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
+# define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
+# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
+# define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
+# if SUPPORT_RETURN_AUTORELEASE
+# define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
+# endif
+# if SUPPORT_QOS_HACK
+# define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
+# endif
#else
-# define NO_DIRECT_THREAD_KEYS 1
+# define SUPPORT_DIRECT_THREAD_KEYS 0
#endif
static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
-// AssertMacros
-
-#define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
-#define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
-#define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
-
-
// OSAtomic
static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
DWORD key;
void (*dtor)(void *);
} tls_key_t;
-static __inline void tls_create(tls_key_t *k, void (*dtor)(void*)) {
+static __inline tls_key_t tls_create(void (*dtor)(void*)) {
// fixme need dtor registry for DllMain to call on thread detach
- k->key = TlsAlloc();
- k->dtor = dtor;
+ tls_key_t k;
+ k.key = TlsAlloc();
+ k.dtor = dtor;
+ return k;
}
static __inline void *tls_get(tls_key_t k) {
return TlsGetValue(k.key);
EnterCriticalSection(m->lock);
return 0;
}
-static __inline int _mutex_try_lock_nodebug(mutex_t *m) {
+static __inline bool _mutex_try_lock_nodebug(mutex_t *m) {
// fixme error check
if (!m->lock) {
mutex_init(m);
}
-typedef mutex_t OSSpinLock;
-#define OSSpinLockLock(l) mutex_lock(l)
-#define OSSpinLockUnlock(l) mutex_unlock(l)
-#define OS_SPINLOCK_INIT MUTEX_INITIALIZER
+typedef mutex_t spinlock_t;
+#define spinlock_lock(l) mutex_lock(l)
+#define spinlock_unlock(l) mutex_unlock(l)
+#define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
typedef struct {
assert(m->mutex);
return WaitForSingleObject(m->mutex, INFINITE);
}
-static __inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
+static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
assert(m->mutex);
return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0));
}
typedef HANDLE mutex_t;
static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); }
static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); }
-static inline int mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
+static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; }
static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); }
*/
}
return WaitForSingleObject(c->mutex, INFINITE);
}
-static inline int _monitor_exit_nodebug(monitor_t *c) {
+static inline int _monitor_leave_nodebug(monitor_t *c) {
if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
else return 0;
}
// fixme no rwlock yet
-#define rwlock_t mutex_t
-#define rwlock_init(r) mutex_init(r)
-#define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
-#define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
-#define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
-#define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
-#define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
-#define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
-
-
-typedef struct {
- struct objc_module **modules;
- size_t moduleCount;
- struct old_protocol **protocols;
- size_t protocolCount;
- void *imageinfo;
- size_t imageinfoBytes;
- SEL *selrefs;
- size_t selrefCount;
- struct objc_class **clsrefs;
- size_t clsrefCount;
-} os_header_info;
typedef IMAGE_DOS_HEADER headerType;
// fixme YES bundle? NO bundle? sometimes?
// OS headers
+#include <mach-o/loader.h>
+#ifndef __LP64__
+# define SEGMENT_CMD LC_SEGMENT
+#else
+# define SEGMENT_CMD LC_SEGMENT_64
+#endif
+
+#ifndef VM_MEMORY_OBJC_DISPATCHERS
+# define VM_MEMORY_OBJC_DISPATCHERS 0
+#endif
// Compiler compatibility
// OS compatibility
+static inline uint64_t nanoseconds() {
+ return mach_absolute_time();
+}
+
// Internal data types
typedef pthread_t objc_thread_t;
typedef pthread_key_t tls_key_t;
-static inline void tls_create(tls_key_t *k, void (*dtor)(void*)) {
- pthread_key_create(k, dtor);
+static inline tls_key_t tls_create(void (*dtor)(void*)) {
+ tls_key_t k;
+ pthread_key_create(&k, dtor);
+ return k;
}
static inline void *tls_get(tls_key_t k) {
return pthread_getspecific(k);
pthread_setspecific(k, value);
}
-#ifndef NO_DIRECT_THREAD_KEYS
+#if SUPPORT_DIRECT_THREAD_KEYS
+
+#if DEBUG
+static bool is_valid_direct_key(tls_key_t k) {
+ return ( k == SYNC_DATA_DIRECT_KEY
+ || k == SYNC_COUNT_DIRECT_KEY
+ || k == AUTORELEASE_POOL_KEY
+# if SUPPORT_RETURN_AUTORELEASE
+ || k == RETURN_DISPOSITION_KEY
+# endif
+# if SUPPORT_QOS_HACK
+ || k == QOS_KEY
+# endif
+ );
+}
+#endif
+
+#if __arm__
+
+// rdar://9162780 _pthread_get/setspecific_direct are inefficient
+// copied from libdispatch
+
+__attribute__((const))
+static ALWAYS_INLINE void**
+tls_base(void)
+{
+ uintptr_t p;
+#if defined(__arm__) && defined(_ARM_ARCH_6)
+ __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p] "=&r" (p));
+ return (void**)(p & ~0x3ul);
+#else
+#error tls_base not implemented
+#endif
+}
+
+
+static ALWAYS_INLINE void
+tls_set_direct(void **tsdb, tls_key_t k, void *v)
+{
+ assert(is_valid_direct_key(k));
+
+ tsdb[k] = v;
+}
+#define tls_set_direct(k, v) \
+ tls_set_direct(tls_base(), (k), (v))
+
+
+static ALWAYS_INLINE void *
+tls_get_direct(void **tsdb, tls_key_t k)
+{
+ assert(is_valid_direct_key(k));
+
+ return tsdb[k];
+}
+#define tls_get_direct(k) \
+ tls_get_direct(tls_base(), (k))
+
+// arm
+#else
+// not arm
+
static inline void *tls_get_direct(tls_key_t k)
{
- assert(k == SYNC_DATA_DIRECT_KEY ||
- k == SYNC_COUNT_DIRECT_KEY);
+ assert(is_valid_direct_key(k));
if (_pthread_has_direct_tsd()) {
return _pthread_getspecific_direct(k);
}
static inline void tls_set_direct(tls_key_t k, void *value)
{
- assert(k == SYNC_DATA_DIRECT_KEY ||
- k == SYNC_COUNT_DIRECT_KEY);
+ assert(is_valid_direct_key(k));
if (_pthread_has_direct_tsd()) {
_pthread_setspecific_direct(k, value);
pthread_setspecific(k, value);
}
}
+
+// not arm
#endif
+// SUPPORT_DIRECT_THREAD_KEYS
+#endif
-typedef pthread_mutex_t mutex_t;
-#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
-extern int DebuggerMode;
-extern void gdb_objc_debuggerModeFailure(void);
-extern BOOL isManagedDuringDebugger(void *lock);
-extern BOOL isLockedDuringDebugger(mutex_t *lock);
+static inline pthread_t pthread_self_direct()
+{
+ return (pthread_t)
+ _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
+}
-static inline int _mutex_lock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger(m)) {
- gdb_objc_debuggerModeFailure();
- }
- return 0;
- }
- return pthread_mutex_lock(m);
+static inline mach_port_t mach_thread_self_direct()
+{
+ return (mach_port_t)(uintptr_t)
+ _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+}
+
+#if SUPPORT_QOS_HACK
+static inline pthread_priority_t pthread_self_priority_direct()
+{
+ pthread_priority_t pri = (pthread_priority_t)
+ _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
+ return pri & ~_PTHREAD_PRIORITY_FLAGS_MASK;
}
-static inline int _mutex_try_lock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger(m)) {
- gdb_objc_debuggerModeFailure();
+#endif
+
+
+template <bool Debug> class mutex_tt;
+template <bool Debug> class monitor_tt;
+template <bool Debug> class rwlock_tt;
+template <bool Debug> class recursive_mutex_tt;
+
+#include "objc-lockdebug.h"
+
+template <bool Debug>
+class mutex_tt : nocopy_t {
+ pthread_mutex_t mLock;
+
+ public:
+ mutex_tt() : mLock(PTHREAD_MUTEX_INITIALIZER) { }
+
+ void lock()
+ {
+ lockdebug_mutex_lock(this);
+
+ int err = pthread_mutex_lock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
+
+ bool tryLock()
+ {
+ int err = pthread_mutex_trylock(&mLock);
+ if (err == 0) {
+ lockdebug_mutex_try_lock_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_trylock failed (%d)", err);
}
- return 1;
}
- return !pthread_mutex_trylock(m);
-}
-static inline int _mutex_unlock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- return 0;
+
+ void unlock()
+ {
+ lockdebug_mutex_unlock(this);
+
+ int err = pthread_mutex_unlock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
}
- return pthread_mutex_unlock(m);
-}
-typedef struct {
- pthread_mutex_t *mutex;
-} recursive_mutex_t;
-#define RECURSIVE_MUTEX_INITIALIZER {0};
-#define RECURSIVE_MUTEX_NOT_LOCKED EPERM
-extern void recursive_mutex_init(recursive_mutex_t *m);
+ void assertLocked() {
+ lockdebug_mutex_assert_locked(this);
+ }
-static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger((mutex_t *)m)) {
- gdb_objc_debuggerModeFailure();
+ void assertUnlocked() {
+ lockdebug_mutex_assert_unlocked(this);
+ }
+};
+
+using mutex_t = mutex_tt<DEBUG>;
+
+
+template <bool Debug>
+class recursive_mutex_tt : nocopy_t {
+ pthread_mutex_t mLock;
+
+ public:
+ recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { }
+
+ void lock()
+ {
+ lockdebug_recursive_mutex_lock(this);
+
+ int err = pthread_mutex_lock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
+
+ bool tryLock()
+ {
+ int err = pthread_mutex_trylock(&mLock);
+ if (err == 0) {
+ lockdebug_recursive_mutex_lock(this);
+ return true;
+ } else if (err == EBUSY) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_trylock failed (%d)", err);
}
- return 0;
}
- return pthread_mutex_lock(m->mutex);
-}
-static inline int _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger((mutex_t *)m)) {
- gdb_objc_debuggerModeFailure();
+
+
+ void unlock()
+ {
+ lockdebug_recursive_mutex_unlock(this);
+
+ int err = pthread_mutex_unlock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+ bool tryUnlock()
+ {
+ int err = pthread_mutex_unlock(&mLock);
+ if (err == 0) {
+ lockdebug_recursive_mutex_unlock(this);
+ return true;
+ } else if (err == EPERM) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_unlock failed (%d)", err);
}
- return 1;
}
- return !pthread_mutex_trylock(m->mutex);
-}
-static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- return 0;
+
+
+ void assertLocked() {
+ lockdebug_recursive_mutex_assert_locked(this);
}
- return pthread_mutex_unlock(m->mutex);
-}
+ void assertUnlocked() {
+ lockdebug_recursive_mutex_assert_unlocked(this);
+ }
+};
-typedef struct {
+using recursive_mutex_t = recursive_mutex_tt<DEBUG>;
+
+
+template <bool Debug>
+class monitor_tt {
pthread_mutex_t mutex;
pthread_cond_t cond;
-} monitor_t;
-#define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
-#define MONITOR_NOT_ENTERED EPERM
-static inline int monitor_init(monitor_t *c) {
- int err = pthread_mutex_init(&c->mutex, NULL);
- if (err) return err;
- err = pthread_cond_init(&c->cond, NULL);
- if (err) {
- pthread_mutex_destroy(&c->mutex);
- return err;
+ public:
+ monitor_tt()
+ : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { }
+
+ void enter()
+ {
+ lockdebug_monitor_enter(this);
+
+ int err = pthread_mutex_lock(&mutex);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
+
+ void leave()
+ {
+ lockdebug_monitor_leave(this);
+
+ int err = pthread_mutex_unlock(&mutex);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+ void wait()
+ {
+ lockdebug_monitor_wait(this);
+
+ int err = pthread_cond_wait(&cond, &mutex);
+ if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
+ }
+
+ void notify()
+ {
+ int err = pthread_cond_signal(&cond);
+ if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
+ }
+
+ void notifyAll()
+ {
+ int err = pthread_cond_broadcast(&cond);
+ if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
}
- return 0;
-}
-static inline int _monitor_enter_nodebug(monitor_t *c) {
- assert(!isManagedDuringDebugger(c));
- return pthread_mutex_lock(&c->mutex);
-}
-static inline int _monitor_exit_nodebug(monitor_t *c) {
- return pthread_mutex_unlock(&c->mutex);
-}
-static inline int _monitor_wait_nodebug(monitor_t *c) {
- return pthread_cond_wait(&c->cond, &c->mutex);
-}
-static inline int monitor_notify(monitor_t *c) {
- return pthread_cond_signal(&c->cond);
-}
-static inline int monitor_notifyAll(monitor_t *c) {
- return pthread_cond_broadcast(&c->cond);
-}
+
+ void assertLocked()
+ {
+ lockdebug_monitor_assert_locked(this);
+ }
+
+ void assertUnlocked()
+ {
+ lockdebug_monitor_assert_unlocked(this);
+ }
+};
+
+using monitor_t = monitor_tt<DEBUG>;
// semaphore_create formatted for INIT_ONCE use
}
-/* Custom read-write lock
- - reader is atomic add/subtract
- - writer is pthread mutex plus atomic add/subtract
- - fairness: new readers wait if a writer wants in
- - fairness: when writer completes, readers (probably) precede new writer
+#if SUPPORT_QOS_HACK
+// Override QOS class to avoid priority inversion in rwlocks
+// <rdar://17697862> do a qos override before taking rw lock in objc
- state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
- x: blocked reader count
- y: active reader count
- z: readers allowed flag
-*/
-typedef struct {
- volatile int32_t state;
- semaphore_t readersDone;
- semaphore_t writerDone;
- pthread_mutex_t writerMutex;
-} rwlock_t;
+#include <pthread/workqueue_private.h>
+extern pthread_priority_t BackgroundPriority;
+extern pthread_priority_t MainPriority;
-extern BOOL isReadingDuringDebugger(rwlock_t *lock);
-extern BOOL isWritingDuringDebugger(rwlock_t *lock);
-
-static inline void rwlock_init(rwlock_t *l)
+static inline void qosStartOverride()
{
- l->state = 1;
- l->readersDone = create_semaphore();
- l->writerDone = create_semaphore();
- l->writerMutex = (mutex_t)MUTEX_INITIALIZER;
-}
-
-static inline void _rwlock_read_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isReadingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return;
- }
- while (1) {
- // Increment "blocked readers" or "active readers" count.
- int32_t old = l->state;
- if (old % 2 == 1) {
- // Readers OK. Increment active reader count.
- if (OSAtomicCompareAndSwap32Barrier(old, old + 2, &l->state)) {
- // Success. Read lock acquired.
- return;
- } else {
- // CAS failed (writer or another reader). Redo from start.
- }
- }
- else {
- // Readers not OK. Increment blocked reader count.
- if (OSAtomicCompareAndSwap32(old, old + 0x10000, &l->state)) {
- // Success. Wait for writer to complete, then retry.
- semaphore_wait(l->writerDone);
- } else {
- // CAS failed (writer or another reader). Redo from start.
- }
+ uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
+ if (overrideRefCount > 0) {
+ // If there is a qos override, increment the refcount and continue
+ tls_set_direct(QOS_KEY, (void *)(overrideRefCount + 1));
+ }
+ else {
+ pthread_priority_t currentPriority = pthread_self_priority_direct();
+ // Check if override is needed. Only override if we are background qos
+ if (currentPriority != 0 && currentPriority <= BackgroundPriority) {
+ int res __unused = _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority);
+ assert(res == 0);
+ // Once we override, we set the reference count in the tsd
+ // to know when to end the override
+ tls_set_direct(QOS_KEY, (void *)1);
}
}
}
-static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
+static inline void qosEndOverride()
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- return;
- }
- // Decrement "active readers" count.
- int32_t newState = OSAtomicAdd32Barrier(-2, &l->state);
- if ((newState & 0xffff) == 0) {
- // No active readers, and readers OK flag is clear.
- // We're the last reader out and there's a writer waiting. Wake it.
- semaphore_signal(l->readersDone);
+ uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
+ if (overrideRefCount == 0) return;
+
+ if (overrideRefCount == 1) {
+ // end the override
+ int res __unused = _pthread_override_qos_class_end_direct(mach_thread_self_direct());
+ assert(res == 0);
}
+
+ // decrement refcount
+ tls_set_direct(QOS_KEY, (void *)(overrideRefCount - 1));
}
+// SUPPORT_QOS_HACK
+#else
+// not SUPPORT_QOS_HACK
-static inline int _rwlock_try_read_nodebug(rwlock_t *l)
-{
- int i;
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isReadingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return 1;
- }
- for (i = 0; i < 16; i++) {
- int32_t old = l->state;
- if (old % 2 != 1) {
- // Readers not OK. Fail.
- return 0;
- } else {
- // Readers OK.
- if (OSAtomicCompareAndSwap32Barrier(old, old + 2, &l->state)) {
- // Success. Read lock acquired.
- return 1;
- } else {
- // CAS failed (writer or another reader). Redo from start.
- // trylock will fail against writer,
- // but retry a few times against reader.
- }
- }
- }
+static inline void qosStartOverride() { }
+static inline void qosEndOverride() { }
- // Too many retries. Give up.
- return 0;
-}
+// not SUPPORT_QOS_HACK
+#endif
-static inline void _rwlock_write_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isWritingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return;
+template <bool Debug>
+class rwlock_tt : nocopy_t {
+ pthread_rwlock_t mLock;
+
+ public:
+ rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { }
+
+ void read()
+ {
+ lockdebug_rwlock_read(this);
+
+ qosStartOverride();
+ int err = pthread_rwlock_rdlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_rdlock failed (%d)", err);
}
- // Only one writer allowed at a time.
- pthread_mutex_lock(&l->writerMutex);
+ void unlockRead()
+ {
+ lockdebug_rwlock_unlock_read(this);
- // Clear "readers OK" bit and "blocked readers" count.
- int32_t newState = OSAtomicAnd32(0x0000fffe, (uint32_t *)&l->state);
-
- if (newState == 0) {
- // No "active readers". Success.
- OSMemoryBarrier();
- } else {
- // Wait for "active readers" to complete.
- semaphore_wait(l->readersDone);
+ int err = pthread_rwlock_unlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
+ qosEndOverride();
}
-}
-static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- return;
+ bool tryRead()
+ {
+ qosStartOverride();
+ int err = pthread_rwlock_tryrdlock(&mLock);
+ if (err == 0) {
+ lockdebug_rwlock_try_read_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ qosEndOverride();
+ return false;
+ } else {
+ _objc_fatal("pthread_rwlock_tryrdlock failed (%d)", err);
+ }
}
- // Reinstate "readers OK" bit and clear reader counts.
- int32_t oldState;
- do {
- oldState = l->state;
- } while (!OSAtomicCompareAndSwap32Barrier(oldState, 0x1, &l->state));
-
- // Unblock any "blocked readers" that arrived while we held the lock
- oldState = oldState >> 16;
- while (oldState--) {
- semaphore_signal(l->writerDone);
+ void write()
+ {
+ lockdebug_rwlock_write(this);
+
+ qosStartOverride();
+ int err = pthread_rwlock_wrlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_wrlock failed (%d)", err);
}
- // Allow a new writer.
- pthread_mutex_unlock(&l->writerMutex);
-}
+ void unlockWrite()
+ {
+ lockdebug_rwlock_unlock_write(this);
-static inline int _rwlock_try_write_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isWritingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
+ int err = pthread_rwlock_unlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
+ qosEndOverride();
+ }
+
+ bool tryWrite()
+ {
+ qosStartOverride();
+ int err = pthread_rwlock_trywrlock(&mLock);
+ if (err == 0) {
+ lockdebug_rwlock_try_write_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ qosEndOverride();
+ return false;
+ } else {
+ _objc_fatal("pthread_rwlock_trywrlock failed (%d)", err);
}
- return 1;
}
- if (pthread_mutex_trylock(&l->writerMutex)) {
- // Some other writer is in the way - fail
- return 0;
+
+ void assertReading() {
+ lockdebug_rwlock_assert_reading(this);
}
- // Similar to _rwlock_write_nodebug, but less intrusive with readers active
+ void assertWriting() {
+ lockdebug_rwlock_assert_writing(this);
+ }
- int32_t oldState, newState;
- oldState = l->state;
- newState = oldState & 0x0000fffe;
- if (newState != 0) {
- // Readers active. Give up.
- pthread_mutex_unlock(&l->writerMutex);
- return 0;
+ void assertLocked() {
+ lockdebug_rwlock_assert_locked(this);
}
- if (!OSAtomicCompareAndSwap32Barrier(oldState, newState, &l->state)) {
- // CAS failed (reader interupted). Give up.
- pthread_mutex_unlock(&l->writerMutex);
- return 0;
+
+ void assertUnlocked() {
+ lockdebug_rwlock_assert_unlocked(this);
}
+};
- return 1;
-}
+using rwlock_t = rwlock_tt<DEBUG>;
#ifndef __LP64__
#define headerIsBundle(hi) (hi->mhdr->filetype == MH_BUNDLE)
#define libobjc_header ((headerType *)&_mh_dylib_header)
-typedef struct {
- Dl_info dl_info;
- const segmentType * objcSegmentHeader;
- const segmentType * dataSegmentHeader;
- ptrdiff_t image_slide;
-#if !__OBJC2__
- struct old_protocol **proto_refs;
-#endif
-} os_header_info;
-
// Prototypes
/* Secure /tmp usage */
#endif
-__END_DECLS
+
+static inline void *
+memdup(const void *mem, size_t len)
+{
+ void *dup = malloc(len);
+ memcpy(dup, mem, len);
+ return dup;
+}
+
+// unsigned strdup
+static inline uint8_t *
+ustrdup(const uint8_t *str)
+{
+ return (uint8_t *)strdup((char *)str);
+}
+
+// nil-checking strdup
+static inline uint8_t *
+strdupMaybeNil(const uint8_t *str)
+{
+ if (!str) return nil;
+ return (uint8_t *)strdup((char *)str);
+}
+
+// nil-checking unsigned strdup
+static inline uint8_t *
+ustrdupMaybeNil(const uint8_t *str)
+{
+ if (!str) return nil;
+ return (uint8_t *)strdup((char *)str);
+}
#endif