#define _OBJC_OS_H
#include <TargetConditionals.h>
+#include "objc-config.h"
+
+#ifdef __LP64__
+# define WORD_SHIFT 3UL
+# define WORD_MASK 7UL
+# define WORD_BITS 64
+#else
+# define WORD_SHIFT 2UL
+# define WORD_MASK 3UL
+# define WORD_BITS 32
+#endif
+
+static inline uint32_t word_align(uint32_t x) {
+ return (x + WORD_MASK) & ~WORD_MASK;
+}
+static inline size_t word_align(size_t x) {
+ return (x + WORD_MASK) & ~WORD_MASK;
+}
+
+
+// Mix-in for classes that must not be copied.
+class nocopy_t {
+ private:
+ nocopy_t(const nocopy_t&) = delete;
+ const nocopy_t& operator=(const nocopy_t&) = delete;
+ protected:
+ nocopy_t() { }
+ ~nocopy_t() { }
+};
+
#if TARGET_OS_MAC
# include <unistd.h>
# include <pthread.h>
# include <crt_externs.h>
-# include <AssertMacros.h>
# undef check
-# include <AvailabilityMacros.h>
+# include <Availability.h>
# include <TargetConditionals.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/stat.h>
# include <sys/param.h>
# include <mach/mach.h>
+# include <mach/vm_param.h>
+# include <mach/mach_time.h>
# include <mach-o/dyld.h>
# include <mach-o/ldsyms.h>
# include <mach-o/loader.h>
# include <mach-o/getsect.h>
# include <mach-o/dyld_priv.h>
# include <malloc/malloc.h>
+# include <os/lock_private.h>
# include <libkern/OSAtomic.h>
# include <libkern/OSCacheControl.h>
# include <System/pthread_machdep.h>
# include "objc-probes.h" // generated dtrace probe definitions.
+// Some libc functions call objc_msgSend()
+// so we can't use them without deadlocks.
+void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE;
+void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE;
+
-#if defined(__i386__) || defined(__x86_64__)
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define NEVER_INLINE inline __attribute__((noinline))
-// Inlined spinlock.
-// Not for arm on iOS because it hurts uniprocessor performance.
-#define ARR_SPINLOCK_INIT 0
-// XXX -- Careful: OSSpinLock isn't volatile, but should be
-typedef volatile int ARRSpinLock;
-__attribute__((always_inline))
-static inline void ARRSpinLockLock(ARRSpinLock *l)
+
+static ALWAYS_INLINE uintptr_t
+addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
{
- unsigned y;
-again:
- if (__builtin_expect(__sync_lock_test_and_set(l, 1), 0) == 0) {
- return;
- }
- for (y = 1000; y; y--) {
-#if defined(__i386__) || defined(__x86_64__)
- asm("pause");
-#endif
- if (*l == 0) goto again;
- }
- thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
- goto again;
+ return __builtin_addcl(lhs, rhs, carryin, carryout);
+}
+
+static ALWAYS_INLINE uintptr_t
+subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout)
+{
+ return __builtin_subcl(lhs, rhs, carryin, carryout);
+}
+
+
+#if __arm64__
+
+static ALWAYS_INLINE
+uintptr_t
+LoadExclusive(uintptr_t *src)
+{
+ uintptr_t result;
+ asm("ldxr %x0, [%x1]"
+ : "=r" (result)
+ : "r" (src), "m" (*src));
+ return result;
+}
+
+static ALWAYS_INLINE
+bool
+StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
+{
+ uint32_t result;
+ asm("stxr %w0, %x2, [%x3]"
+ : "=r" (result), "=m" (*dst)
+ : "r" (value), "r" (dst));
+ return !result;
+}
+
+
+static ALWAYS_INLINE
+bool
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value)
+{
+ uint32_t result;
+ asm("stlxr %w0, %x2, [%x3]"
+ : "=r" (result), "=m" (*dst)
+ : "r" (value), "r" (dst));
+ return !result;
+}
+
+
+#elif __arm__
+
+static ALWAYS_INLINE
+uintptr_t
+LoadExclusive(uintptr_t *src)
+{
+ return *src;
+}
+
+static ALWAYS_INLINE
+bool
+StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+ return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value,
+ (void **)dst);
+}
+
+static ALWAYS_INLINE
+bool
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+ return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value,
+ (void **)dst);
}
-__attribute__((always_inline))
-static inline void ARRSpinLockUnlock(ARRSpinLock *l)
+
+
+#elif __x86_64__ || __i386__
+
+static ALWAYS_INLINE
+uintptr_t
+LoadExclusive(uintptr_t *src)
{
- __sync_lock_release(l);
+ return *src;
}
-__attribute__((always_inline))
-static inline int ARRSpinLockTry(ARRSpinLock *l)
+
+static ALWAYS_INLINE
+bool
+StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
{
- return __sync_bool_compare_and_swap(l, 0, 1);
+
+ return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value);
}
-#define OSSpinLock ARRSpinLock
-#define OSSpinLockTry(l) ARRSpinLockTry(l)
-#define OSSpinLockLock(l) ARRSpinLockLock(l)
-#define OSSpinLockUnlock(l) ARRSpinLockUnlock(l)
-#undef OS_SPINLOCK_INIT
-#define OS_SPINLOCK_INIT ARR_SPINLOCK_INIT
+static ALWAYS_INLINE
+bool
+StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value)
+{
+ return StoreExclusive(dst, oldvalue, value);
+}
+#else
+# error unknown architecture
#endif
+class spinlock_t {
+ os_lock_handoff_s mLock;
+ public:
+ spinlock_t() : mLock(OS_LOCK_HANDOFF_INIT) { }
+
+ void lock() { os_lock_lock(&mLock); }
+ void unlock() { os_lock_unlock(&mLock); }
+ bool trylock() { return os_lock_trylock(&mLock); }
+
+
+ // Address-ordered lock discipline for a pair of locks.
+
+ static void lockTwo(spinlock_t *lock1, spinlock_t *lock2) {
+ if (lock1 > lock2) {
+ lock1->lock();
+ lock2->lock();
+ } else {
+ lock2->lock();
+ if (lock2 != lock1) lock1->lock();
+ }
+ }
+
+ static void unlockTwo(spinlock_t *lock1, spinlock_t *lock2) {
+ lock1->unlock();
+ if (lock2 != lock1) lock2->unlock();
+ }
+};
+
+
#if !TARGET_OS_IPHONE
# include <CrashReporterClient.h>
#else
__END_DECLS
#endif
-#if TARGET_IPHONE_SIMULATOR
- // getsectiondata() and getsegmentdata() are unavailable
- __BEGIN_DECLS
-# define getsectiondata(m, s, n, c) objc_getsectiondata(m, s, n, c)
-# define getsegmentdata(m, s, c) objc_getsegmentdata(m, s, c)
- extern uint8_t *objc_getsectiondata(const struct mach_header *mh, const char *segname, const char *sectname, unsigned long *outSize);
- extern uint8_t * objc_getsegmentdata(const struct mach_header *mh, const char *segname, unsigned long *outSize);
- __END_DECLS
-#endif
-
# if __cplusplus
# include <vector>
# include <algorithm>
/* Use this for functions that are intended to be breakpoint hooks.
If you do not, the compiler may optimize them away.
BREAKPOINT_FUNCTION( void stop_on_error(void) ); */
-# define BREAKPOINT_FUNCTION(prototype) \
- OBJC_EXTERN __attribute__((noinline, visibility("hidden"))) \
+# define BREAKPOINT_FUNCTION(prototype) \
+ OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \
prototype { asm(""); }
#elif TARGET_OS_WIN32
# include <string.h>
# include <assert.h>
# include <malloc.h>
-# include <AvailabilityMacros.h>
+# include <Availability.h>
# if __cplusplus
# include <vector>
#include <objc/objc.h>
#include <objc/objc-api.h>
-__BEGIN_DECLS
-
extern void _objc_fatal(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2)));
#define INIT_ONCE_PTR(var, create, delete) \
// Thread keys reserved by libc for our use.
-// Keys [0..4] are used by autozone.
-#if defined(__PTK_FRAMEWORK_OBJC_KEY5)
+#if defined(__PTK_FRAMEWORK_OBJC_KEY0)
# define SUPPORT_DIRECT_THREAD_KEYS 1
-# define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
-# define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY6)
-# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY7)
-# define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY8)
+# define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0)
+# define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1)
+# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2)
+# define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3)
# if SUPPORT_RETURN_AUTORELEASE
-# define AUTORELEASE_POOL_RECLAIM_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY9)
+# define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4)
+# endif
+# if SUPPORT_QOS_HACK
+# define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5)
# endif
#else
# define SUPPORT_DIRECT_THREAD_KEYS 0
static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ }
-// AssertMacros
-
-#define require_action_string(cond, dest, act, msg) do { if (!(cond)) { { act; } goto dest; } } while (0)
-#define require_noerr_string(err, dest, msg) do { if (err) goto dest; } while (0)
-#define require_string(cond, dest, msg) do { if (!(cond)) goto dest; } while (0)
-
-
// OSAtomic
static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
}
-typedef mutex_t OSSpinLock;
-#define OSSpinLockLock(l) mutex_lock(l)
-#define OSSpinLockUnlock(l) mutex_unlock(l)
-#define OS_SPINLOCK_INIT MUTEX_INITIALIZER
+typedef mutex_t spinlock_t;
+#define spinlock_lock(l) mutex_lock(l)
+#define spinlock_unlock(l) mutex_unlock(l)
+#define SPINLOCK_INITIALIZER MUTEX_INITIALIZER
typedef struct {
}
return WaitForSingleObject(c->mutex, INFINITE);
}
-static inline int _monitor_exit_nodebug(monitor_t *c) {
+static inline int _monitor_leave_nodebug(monitor_t *c) {
if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED;
else return 0;
}
// fixme no rwlock yet
-#define rwlock_t mutex_t
-#define rwlock_init(r) mutex_init(r)
-#define _rwlock_read_nodebug(m) _mutex_lock_nodebug(m)
-#define _rwlock_write_nodebug(m) _mutex_lock_nodebug(m)
-#define _rwlock_try_read_nodebug(m) _mutex_try_lock_nodebug(m)
-#define _rwlock_try_write_nodebug(m) _mutex_try_lock_nodebug(m)
-#define _rwlock_unlock_read_nodebug(m) _mutex_unlock_nodebug(m)
-#define _rwlock_unlock_write_nodebug(m) _mutex_unlock_nodebug(m)
-
typedef IMAGE_DOS_HEADER headerType;
// fixme YES bundle? NO bundle? sometimes?
// OS compatibility
+static inline uint64_t nanoseconds() {
+ return mach_absolute_time();
+}
+
// Internal data types
typedef pthread_t objc_thread_t;
#if SUPPORT_DIRECT_THREAD_KEYS
-#if !NDEBUG
+#if DEBUG
static bool is_valid_direct_key(tls_key_t k) {
return ( k == SYNC_DATA_DIRECT_KEY
|| k == SYNC_COUNT_DIRECT_KEY
|| k == AUTORELEASE_POOL_KEY
# if SUPPORT_RETURN_AUTORELEASE
- || k == AUTORELEASE_POOL_RECLAIM_KEY
+ || k == RETURN_DISPOSITION_KEY
+# endif
+# if SUPPORT_QOS_HACK
+ || k == QOS_KEY
# endif
);
}
// rdar://9162780 _pthread_get/setspecific_direct are inefficient
// copied from libdispatch
-__attribute__((always_inline)) __attribute__((const))
-static inline void**
+__attribute__((const))
+static ALWAYS_INLINE void**
tls_base(void)
{
uintptr_t p;
#endif
}
-__attribute__((always_inline))
-static inline void
+
+static ALWAYS_INLINE void
tls_set_direct(void **tsdb, tls_key_t k, void *v)
{
assert(is_valid_direct_key(k));
#define tls_set_direct(k, v) \
tls_set_direct(tls_base(), (k), (v))
-__attribute__((always_inline))
-static inline void *
+
+static ALWAYS_INLINE void *
tls_get_direct(void **tsdb, tls_key_t k)
{
assert(is_valid_direct_key(k));
#endif
-typedef pthread_mutex_t mutex_t;
-#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER;
+static inline pthread_t pthread_self_direct()
+{
+ return (pthread_t)
+ _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF);
+}
-extern int DebuggerMode;
-extern void gdb_objc_debuggerModeFailure(void);
-extern BOOL isManagedDuringDebugger(void *lock);
-extern BOOL isLockedDuringDebugger(void *lock);
+static inline mach_port_t mach_thread_self_direct()
+{
+ return (mach_port_t)(uintptr_t)
+ _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+}
-static inline int _mutex_lock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger(m)) {
- gdb_objc_debuggerModeFailure();
- }
- return 0;
- }
- return pthread_mutex_lock(m);
+#if SUPPORT_QOS_HACK
+static inline pthread_priority_t pthread_self_priority_direct()
+{
+ pthread_priority_t pri = (pthread_priority_t)
+ _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
+ return pri & ~_PTHREAD_PRIORITY_FLAGS_MASK;
}
-static inline bool _mutex_try_lock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger(m)) {
- gdb_objc_debuggerModeFailure();
+#endif
+
+
+template <bool Debug> class mutex_tt;
+template <bool Debug> class monitor_tt;
+template <bool Debug> class rwlock_tt;
+template <bool Debug> class recursive_mutex_tt;
+
+#include "objc-lockdebug.h"
+
+template <bool Debug>
+class mutex_tt : nocopy_t {
+ pthread_mutex_t mLock;
+
+ public:
+ mutex_tt() : mLock(PTHREAD_MUTEX_INITIALIZER) { }
+
+ void lock()
+ {
+ lockdebug_mutex_lock(this);
+
+ int err = pthread_mutex_lock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
+
+ bool tryLock()
+ {
+ int err = pthread_mutex_trylock(&mLock);
+ if (err == 0) {
+ lockdebug_mutex_try_lock_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_trylock failed (%d)", err);
}
- return true;
}
- return !pthread_mutex_trylock(m);
-}
-static inline int _mutex_unlock_nodebug(mutex_t *m) {
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- return 0;
+
+ void unlock()
+ {
+ lockdebug_mutex_unlock(this);
+
+ int err = pthread_mutex_unlock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
}
- return pthread_mutex_unlock(m);
-}
-typedef struct {
- pthread_mutex_t *mutex;
-} recursive_mutex_t;
-#define RECURSIVE_MUTEX_INITIALIZER {0};
-#define RECURSIVE_MUTEX_NOT_LOCKED EPERM
-extern void recursive_mutex_init(recursive_mutex_t *m);
+ void assertLocked() {
+ lockdebug_mutex_assert_locked(this);
+ }
-static inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger((mutex_t *)m)) {
- gdb_objc_debuggerModeFailure();
+ void assertUnlocked() {
+ lockdebug_mutex_assert_unlocked(this);
+ }
+};
+
+using mutex_t = mutex_tt<DEBUG>;
+
+
+template <bool Debug>
+class recursive_mutex_tt : nocopy_t {
+ pthread_mutex_t mLock;
+
+ public:
+ recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { }
+
+ void lock()
+ {
+ lockdebug_recursive_mutex_lock(this);
+
+ int err = pthread_mutex_lock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
+ }
+
+ bool tryLock()
+ {
+ int err = pthread_mutex_trylock(&mLock);
+ if (err == 0) {
+ lockdebug_recursive_mutex_lock(this);
+ return true;
+ } else if (err == EBUSY) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_trylock failed (%d)", err);
}
- return 0;
}
- return pthread_mutex_lock(m->mutex);
-}
-static inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- if (! isLockedDuringDebugger((mutex_t *)m)) {
- gdb_objc_debuggerModeFailure();
+
+
+ void unlock()
+ {
+ lockdebug_recursive_mutex_unlock(this);
+
+ int err = pthread_mutex_unlock(&mLock);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+ bool tryUnlock()
+ {
+ int err = pthread_mutex_unlock(&mLock);
+ if (err == 0) {
+ lockdebug_recursive_mutex_unlock(this);
+ return true;
+ } else if (err == EPERM) {
+ return false;
+ } else {
+ _objc_fatal("pthread_mutex_unlock failed (%d)", err);
}
- return true;
}
- return !pthread_mutex_trylock(m->mutex);
-}
-static inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) {
- assert(m->mutex);
- if (DebuggerMode && isManagedDuringDebugger(m)) {
- return 0;
+
+
+ void assertLocked() {
+ lockdebug_recursive_mutex_assert_locked(this);
}
- return pthread_mutex_unlock(m->mutex);
-}
+ void assertUnlocked() {
+ lockdebug_recursive_mutex_assert_unlocked(this);
+ }
+};
-typedef struct {
+using recursive_mutex_t = recursive_mutex_tt<DEBUG>;
+
+
+template <bool Debug>
+class monitor_tt {
pthread_mutex_t mutex;
pthread_cond_t cond;
-} monitor_t;
-#define MONITOR_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }
-#define MONITOR_NOT_ENTERED EPERM
-static inline int monitor_init(monitor_t *c) {
- int err = pthread_mutex_init(&c->mutex, NULL);
- if (err) return err;
- err = pthread_cond_init(&c->cond, NULL);
- if (err) {
- pthread_mutex_destroy(&c->mutex);
- return err;
+ public:
+ monitor_tt()
+ : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { }
+
+ void enter()
+ {
+ lockdebug_monitor_enter(this);
+
+ int err = pthread_mutex_lock(&mutex);
+ if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err);
}
- return 0;
-}
-static inline int _monitor_enter_nodebug(monitor_t *c) {
- assert(!isManagedDuringDebugger(c));
- return pthread_mutex_lock(&c->mutex);
-}
-static inline int _monitor_exit_nodebug(monitor_t *c) {
- return pthread_mutex_unlock(&c->mutex);
-}
-static inline int _monitor_wait_nodebug(monitor_t *c) {
- return pthread_cond_wait(&c->cond, &c->mutex);
-}
-static inline int monitor_notify(monitor_t *c) {
- return pthread_cond_signal(&c->cond);
-}
-static inline int monitor_notifyAll(monitor_t *c) {
- return pthread_cond_broadcast(&c->cond);
-}
+
+ void leave()
+ {
+ lockdebug_monitor_leave(this);
+
+ int err = pthread_mutex_unlock(&mutex);
+ if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err);
+ }
+
+ void wait()
+ {
+ lockdebug_monitor_wait(this);
+
+ int err = pthread_cond_wait(&cond, &mutex);
+ if (err) _objc_fatal("pthread_cond_wait failed (%d)", err);
+ }
+
+ void notify()
+ {
+ int err = pthread_cond_signal(&cond);
+ if (err) _objc_fatal("pthread_cond_signal failed (%d)", err);
+ }
+
+ void notifyAll()
+ {
+ int err = pthread_cond_broadcast(&cond);
+ if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err);
+ }
+
+ void assertLocked()
+ {
+ lockdebug_monitor_assert_locked(this);
+ }
+
+ void assertUnlocked()
+ {
+ lockdebug_monitor_assert_unlocked(this);
+ }
+};
+
+using monitor_t = monitor_tt<DEBUG>;
// semaphore_create formatted for INIT_ONCE use
}
-/* Custom read-write lock
- - reader is atomic add/subtract
- - writer is pthread mutex plus atomic add/subtract
- - fairness: new readers wait if a writer wants in
- - fairness: when writer completes, readers (probably) precede new writer
-
- state: xxxxxxxx xxxxxxxx yyyyyyyy yyyyyyyz
- x: blocked reader count
- y: active reader count
- z: readers allowed flag
-*/
-typedef struct {
- pthread_rwlock_t rwl;
-} rwlock_t;
-
-extern BOOL isReadingDuringDebugger(rwlock_t *lock);
-extern BOOL isWritingDuringDebugger(rwlock_t *lock);
+#if SUPPORT_QOS_HACK
+// Override QOS class to avoid priority inversion in rwlocks
+// <rdar://17697862> do a qos override before taking rw lock in objc
-static inline void rwlock_init(rwlock_t *l)
-{
- int err __unused = pthread_rwlock_init(&l->rwl, NULL);
- assert(err == 0);
-}
+#include <pthread/workqueue_private.h>
+extern pthread_priority_t BackgroundPriority;
+extern pthread_priority_t MainPriority;
-static inline void _rwlock_read_nodebug(rwlock_t *l)
+static inline void qosStartOverride()
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isReadingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
+ uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
+ if (overrideRefCount > 0) {
+ // If there is a qos override, increment the refcount and continue
+ tls_set_direct(QOS_KEY, (void *)(overrideRefCount + 1));
+ }
+ else {
+ pthread_priority_t currentPriority = pthread_self_priority_direct();
+ // Check if override is needed. Only override if we are background qos
+ if (currentPriority != 0 && currentPriority <= BackgroundPriority) {
+ int res __unused = _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority);
+ assert(res == 0);
+ // Once we override, we set the reference count in the tsd
+ // to know when to end the override
+ tls_set_direct(QOS_KEY, (void *)1);
}
- return;
}
- int err __unused = pthread_rwlock_rdlock(&l->rwl);
- assert(err == 0);
}
-static inline void _rwlock_unlock_read_nodebug(rwlock_t *l)
+static inline void qosEndOverride()
{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- return;
+ uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY);
+ if (overrideRefCount == 0) return;
+
+ if (overrideRefCount == 1) {
+ // end the override
+ int res __unused = _pthread_override_qos_class_end_direct(mach_thread_self_direct());
+ assert(res == 0);
}
- int err __unused = pthread_rwlock_unlock(&l->rwl);
- assert(err == 0);
+
+ // decrement refcount
+ tls_set_direct(QOS_KEY, (void *)(overrideRefCount - 1));
}
+// SUPPORT_QOS_HACK
+#else
+// not SUPPORT_QOS_HACK
-static inline bool _rwlock_try_read_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isReadingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
- }
- return true;
+static inline void qosStartOverride() { }
+static inline void qosEndOverride() { }
+
+// not SUPPORT_QOS_HACK
+#endif
+
+
+template <bool Debug>
+class rwlock_tt : nocopy_t {
+ pthread_rwlock_t mLock;
+
+ public:
+ rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { }
+
+ void read()
+ {
+ lockdebug_rwlock_read(this);
+
+ qosStartOverride();
+ int err = pthread_rwlock_rdlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_rdlock failed (%d)", err);
}
- int err = pthread_rwlock_tryrdlock(&l->rwl);
- assert(err == 0 || err == EBUSY);
- return (err == 0);
-}
+ void unlockRead()
+ {
+ lockdebug_rwlock_unlock_read(this);
-static inline void _rwlock_write_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isWritingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
+ int err = pthread_rwlock_unlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
+ qosEndOverride();
+ }
+
+ bool tryRead()
+ {
+ qosStartOverride();
+ int err = pthread_rwlock_tryrdlock(&mLock);
+ if (err == 0) {
+ lockdebug_rwlock_try_read_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ qosEndOverride();
+ return false;
+ } else {
+ _objc_fatal("pthread_rwlock_tryrdlock failed (%d)", err);
}
- return;
}
- int err __unused = pthread_rwlock_wrlock(&l->rwl);
- assert(err == 0);
-}
-static inline void _rwlock_unlock_write_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- return;
+ void write()
+ {
+ lockdebug_rwlock_write(this);
+
+ qosStartOverride();
+ int err = pthread_rwlock_wrlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_wrlock failed (%d)", err);
}
- int err __unused = pthread_rwlock_unlock(&l->rwl);
- assert(err == 0);
-}
-static inline bool _rwlock_try_write_nodebug(rwlock_t *l)
-{
- if (DebuggerMode && isManagedDuringDebugger(l)) {
- if (! isWritingDuringDebugger(l)) {
- gdb_objc_debuggerModeFailure();
+ void unlockWrite()
+ {
+ lockdebug_rwlock_unlock_write(this);
+
+ int err = pthread_rwlock_unlock(&mLock);
+ if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err);
+ qosEndOverride();
+ }
+
+ bool tryWrite()
+ {
+ qosStartOverride();
+ int err = pthread_rwlock_trywrlock(&mLock);
+ if (err == 0) {
+ lockdebug_rwlock_try_write_success(this);
+ return true;
+ } else if (err == EBUSY) {
+ qosEndOverride();
+ return false;
+ } else {
+ _objc_fatal("pthread_rwlock_trywrlock failed (%d)", err);
}
- return true;
}
- int err = pthread_rwlock_trywrlock(&l->rwl);
- assert(err == 0 || err == EBUSY);
- return (err == 0);
-}
+
+
+ void assertReading() {
+ lockdebug_rwlock_assert_reading(this);
+ }
+
+ void assertWriting() {
+ lockdebug_rwlock_assert_writing(this);
+ }
+
+ void assertLocked() {
+ lockdebug_rwlock_assert_locked(this);
+ }
+
+ void assertUnlocked() {
+ lockdebug_rwlock_assert_unlocked(this);
+ }
+};
+
+using rwlock_t = rwlock_tt<DEBUG>;
#ifndef __LP64__
#endif
-__END_DECLS
+
+static inline void *
+memdup(const void *mem, size_t len)
+{
+ void *dup = malloc(len);
+ memcpy(dup, mem, len);
+ return dup;
+}
+
+// unsigned strdup
+static inline uint8_t *
+ustrdup(const uint8_t *str)
+{
+ return (uint8_t *)strdup((char *)str);
+}
+
+// nil-checking strdup
+static inline uint8_t *
+strdupMaybeNil(const uint8_t *str)
+{
+ if (!str) return nil;
+ return (uint8_t *)strdup((char *)str);
+}
+
+// nil-checking unsigned strdup
+static inline uint8_t *
+ustrdupMaybeNil(const uint8_t *str)
+{
+ if (!str) return nil;
+ return (uint8_t *)strdup((char *)str);
+}
#endif