uint32_t tsd_offset; /* copy-in */
uint32_t return_to_kernel_offset; /* copy-in */
uint32_t mach_thread_self_offset; /* copy-in */
+ uint32_t mutex_default_policy; /* copy-out */
} __attribute__ ((packed));
#ifdef KERNEL
SYSCTL_INT(_kern, OID_AUTO, pthread_debug_tracing, CTLFLAG_RW | CTLFLAG_LOCKED,
&pthread_debug_tracing, 0, "")
+static uint32_t pthread_mutex_default_policy;
+
+SYSCTL_INT(_kern, OID_AUTO, pthread_mutex_default_policy, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pthread_mutex_default_policy, 0, "");
+
/*
* +-----+-----+-----+-----+-----+-----+-----+
* | MT | BG | UT | DE | IN | UN | mgr |
data.main_qos = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
}
+ data.mutex_default_policy = pthread_mutex_default_policy;
+
kr = copyout(&data, pthread_init_data, pthread_init_sz);
if (kr != KERN_SUCCESS) {
return EINVAL;
pthread_zone_threadreq = zinit(sizeof(struct threadreq),
1024 * sizeof(struct threadreq), 8192, "pthread.threadreq");
+ int policy_bootarg;
+ if (PE_parse_boot_argn("pthread_mutex_default_policy", &policy_bootarg, sizeof(policy_bootarg))) {
+ pthread_mutex_default_policy = policy_bootarg;
+ }
+
/*
* register sysctls
*/
sysctl_register_oid(&sysctl__kern_wq_max_threads);
sysctl_register_oid(&sysctl__kern_wq_max_constrained_threads);
sysctl_register_oid(&sysctl__kern_pthread_debug_tracing);
+ sysctl_register_oid(&sysctl__kern_pthread_mutex_default_policy);
#if DEBUG
sysctl_register_oid(&sysctl__debug_wq_kevent_test);
* /usr/share/misc/pthread.codes during build.
*/
+// userspace trace points force slow-paths, so must be compiled in
+#define ENABLE_USERSPACE_TRACE 0
+
// pthread tracing subclasses
# define _TRACE_SUB_DEFAULT 0
# define _TRACE_SUB_WORKQUEUE 1
# define PTHREAD_TRACE_WQ_REQ(x,a,b,c,d,e) \
{ if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT(x, VM_UNSLIDE(a), VM_UNSLIDE(b), c, d, e); } }
-#endif
+#else // KERNEL
+
+#if ENABLE_USERSPACE_TRACE
+# include <sys/kdebug.h>
+# define PTHREAD_TRACE(x, a, b, c, d) kdebug_trace(TRACE_##x, a, b, c, d)
+#else // ENABLE_USERSPACE_TRACE
+# define PTHREAD_TRACE(x, a, b, c, d) do { } while(0)
+#endif // ENABLE_USERSPACE_TRACE
+
+#endif // KERNEL
# define TRACE_CODE(name, subclass, code) \
static const int TRACE_##name = KDBG_CODE(DBG_PTHREAD, subclass, code)
-#else
+#else // _PTHREAD_BUILDING_CODES_
/* When not included as a header, this file is pre-processed into perl source to generate
* the pthread.codes file during build.
*/
# define TRACE_CODE(name, subclass, code) \
printf("0x%x\t%s\n", ((DBG_PTHREAD << 24) | ((subclass & 0xff) << 16) | ((code & 0x3fff) << 2)), STR(name))
-#endif
+#endif // _PTHREAD_BUILDING_CODES_
/* These defines translate into TRACE_<name> when used in source code, and are
* pre-processed out to a codes file by the build system.
TRACE_CODE(psynch_mutex_utrylock_failed, _TRACE_SUB_MUTEX, 0x1);
TRACE_CODE(psynch_mutex_uunlock, _TRACE_SUB_MUTEX, 0x2);
TRACE_CODE(psynch_ksyn_incorrect_owner, _TRACE_SUB_MUTEX, 0x3);
+TRACE_CODE(psynch_mutex_lock_updatebits, _TRACE_SUB_MUTEX, 0x4);
+TRACE_CODE(psynch_mutex_unlock_updatebits, _TRACE_SUB_MUTEX, 0x5);
#endif // _KERN_TRACE_H_
<dict>
<key>com.apple.kpi.bsd</key>
<string>12.0</string>
+ <key>com.apple.kpi.iokit</key>
+ <string>13.0.0</string>
<key>com.apple.kpi.libkern</key>
<string>11.2</string>
<key>com.apple.kpi.mach</key>
#define _PTHREAD_MUTEX_POLICY_FAIRSHARE 1
#define _PTHREAD_MUTEX_POLICY_FIRSTFIT 2
-/* sets the mutex policy attributes */
+/* manipulate the mutex policy attributes */
__API_AVAILABLE(macos(10.7), ios(5.0))
int pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *, int );
+__API_AVAILABLE(macos(10.13.4), ios(11.3))
+int pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t *, int * );
+
#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
__API_AVAILABLE(macos(10.11))
void
_pthread_key_global_init(const char *envp[]);
+PTHREAD_NOEXPORT
+void
+_pthread_mutex_global_init(const char *envp[], struct _pthread_registration_data *registration_data);
+
PTHREAD_EXPORT
void
_pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
PTHREAD_NOEXPORT
void
-_pthread_bsdthread_init(void);
+_pthread_bsdthread_init(struct _pthread_registration_data *data);
PTHREAD_NOEXPORT_VARIANT
void
// Calls _pthread_set_self() to prepare the main thread for execution.
_pthread_main_thread_init(thread);
+ struct _pthread_registration_data registration_data;
// Set up kernel entry points with __bsdthread_register.
- _pthread_bsdthread_init();
+ _pthread_bsdthread_init(®istration_data);
- // Have pthread_key do its init envvar checks.
+ // Have pthread_key and pthread_mutex do their init envvar checks.
_pthread_key_global_init(envp);
+ _pthread_mutex_global_init(envp, ®istration_data);
#if PTHREAD_DEBUG_LOG
_SIMPLE_STRING path = _simple_salloc();
/***** pthread workqueue support routines *****/
PTHREAD_NOEXPORT void
-_pthread_bsdthread_init(void)
+_pthread_bsdthread_init(struct _pthread_registration_data *data)
{
- struct _pthread_registration_data data = {};
- data.version = sizeof(struct _pthread_registration_data);
- data.dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
- data.return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
- data.tsd_offset = offsetof(struct _pthread, tsd);
- data.mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
+ bzero(data, sizeof(*data));
+ data->version = sizeof(struct _pthread_registration_data);
+ data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
+ data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
+ data->tsd_offset = offsetof(struct _pthread, tsd);
+ data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
int rv = __bsdthread_register(thread_start,
start_wqthread, (int)PTHREAD_SIZE,
- (void*)&data, (uintptr_t)sizeof(data),
- data.dispatch_queue_offset);
+ (void*)data, (uintptr_t)sizeof(*data),
+ data->dispatch_queue_offset);
if (rv > 0) {
if ((rv & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
__pthread_supported_features = rv;
}
- pthread_priority_t main_qos = (pthread_priority_t)data.main_qos;
+ /*
+ * TODO: differentiate between (-1, EINVAL) after fork (which has the side
+ * effect of resetting the child's stack_addr_hint before bailing out) and
+ * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
+ * the latter as fatal.
+ *
+ * <rdar://problem/36451838>
+ */
+
+ pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
if (_pthread_priority_get_qos_newest(main_qos) != QOS_CLASS_UNSPECIFIED) {
_pthread_set_main_qos(main_qos);
_PTHREAD_LOCK_INIT(globals->psaved_self_global_lock);
__is_threaded = 0;
_pthread_main_thread_init(globals->psaved_self);
- _pthread_bsdthread_init();
+
+ struct _pthread_registration_data registration_data;
+ _pthread_bsdthread_init(®istration_data);
}
// Iterate pthread_atfork child handlers.
PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
int _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
+extern int __pthread_mutex_default_policy PTHREAD_NOEXPORT;
+
+
+int __pthread_mutex_default_policy PTHREAD_NOEXPORT =
+ _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+
+PTHREAD_NOEXPORT
+void
+_pthread_mutex_global_init(const char *envp[],
+ struct _pthread_registration_data *registration_data)
+{
+ const char *envvar = _simple_getenv(envp, "PTHREAD_MUTEX_DEFAULT_POLICY");
+ if ((envvar && (envvar[0] - '0') == _PTHREAD_MUTEX_POLICY_FIRSTFIT) ||
+ (registration_data->mutex_default_policy ==
+ _PTHREAD_MUTEX_POLICY_FIRSTFIT)) {
+ __pthread_mutex_default_policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
+ }
+}
+
+
PTHREAD_ALWAYS_INLINE
static inline int _pthread_mutex_init(_pthread_mutex *mutex,
const pthread_mutexattr_t *attr, uint32_t static_type);
-#define DEBUG_TRACE_POINTS 0
-
-#if DEBUG_TRACE_POINTS
-#include <sys/kdebug.h>
-#define DEBUG_TRACE(x, a, b, c, d) kdebug_trace(TRACE_##x, a, b, c, d)
-#else
-#define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
-#endif
-
typedef union mutex_seq {
uint32_t seq[2];
struct { uint32_t lgenval; uint32_t ugenval; };
oldseqval->seq_LU = seqaddr->seq_LU;
}
-PTHREAD_ALWAYS_INLINE
-static inline void
-mutex_seq_atomic_load_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval)
-{
- oldseqval->seq_LU = os_atomic_load(&seqaddr->atomic_seq_LU, relaxed);
-}
-
#define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
mutex_seq_atomic_load_##m(seqaddr, oldseqval)
return res;
}
+int
+pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t *attr, int *policy)
+{
+ int res = EINVAL;
+ if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
+ *policy = attr->policy;
+ res = 0;
+ }
+ return res;
+}
+
int
pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
{
{
attr->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
attr->protocol = _PTHREAD_DEFAULT_PROTOCOL;
- attr->policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+ attr->policy = __pthread_mutex_default_policy;
attr->type = PTHREAD_MUTEX_DEFAULT;
attr->sig = _PTHREAD_MUTEX_ATTR_SIG;
attr->pshared = _PTHREAD_DEFAULT_PSHARED;
}
} while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
+ PTHREAD_TRACE(psynch_mutex_unlock_updatebits, mutex, oldseq.lgenval,
+ newseq.lgenval, oldtid);
+
if (clearprepost) {
__psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
newseq.lgenval, flags | _PTHREAD_MTX_OPT_MUTEX);
static inline int
_pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
{
- int res = 0;
bool firstfit = (mutex->mtxopts.options.policy ==
_PTHREAD_MUTEX_POLICY_FIRSTFIT);
- bool isebit = false, updated = false;
+ bool gotlock = true;
mutex_seq *seqaddr;
MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
uint64_t oldtid;
do {
- if (firstfit && isebit && updated) {
- mutex_seq_atomic_load(seqaddr, &oldseq, relaxed);
- }
newseq = oldseq;
oldtid = os_atomic_load(tidaddr, relaxed);
- if (isebit && !(oldseq.lgenval & PTH_RWL_EBIT)) {
- // E bit was set on first pass through the loop but is no longer
- // set. Apparently we spin until it arrives.
- // XXX: verify this is desired behavior.
- continue;
- }
-
- if (isebit) {
- // first fit mutex now has the E bit set. Return 1.
- res = 1;
- break;
- }
-
if (firstfit) {
- isebit = (oldseq.lgenval & PTH_RWL_EBIT);
- } else if ((oldseq.lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) ==
- (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
- // fairshare mutex and the bits are already set, just update tid
+ // firstfit locks can have the lock stolen out from under a locker
+ // between the unlock from the kernel and this lock path. When this
+ // happens, we still want to set the K bit before leaving the loop
+ // (or notice if the lock unlocks while we try to update).
+ gotlock = !is_rwl_ebit_set(oldseq.lgenval);
+ } else if ((oldseq.lgenval & (PTH_RWL_KBIT | PTH_RWL_EBIT)) ==
+ (PTH_RWL_KBIT | PTH_RWL_EBIT)) {
+ // bit are already set, just update the owner tidaddr
break;
}
- // either first fit or no E bit set
- // update the bits
newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
+ } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+ relaxed));
- // Retry if CAS fails, or if it succeeds with firstfit and E bit
- // already set
- } while (!(updated = mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
- relaxed)) || (firstfit && isebit));
-
- if (res == 0) {
+ if (gotlock) {
if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
// we own this mutex, nobody should be updating it except us
return _pthread_mutex_corruption_abort(mutex);
}
}
- return res;
+ PTHREAD_TRACE(psynch_mutex_lock_updatebits, mutex, oldseq.lgenval,
+ newseq.lgenval, oldtid);
+
+ // failing to take the lock in firstfit returns 1 to force the caller
+ // to wait in the kernel
+ return gotlock ? 0 : 1;
}
PTHREAD_NOINLINE
}
} while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
+ PTHREAD_TRACE(psynch_mutex_lock_updatebits, omutex, oldseq.lgenval,
+ newseq.lgenval, 0);
+
if (gotlock) {
os_atomic_store(tidaddr, selfid, relaxed);
res = 0;
- DEBUG_TRACE(psynch_mutex_ulock, omutex, lgenval, ugenval, selfid);
+ PTHREAD_TRACE(psynch_mutex_ulock, omutex, newseq.lgenval,
+ newseq.ugenval, selfid);
} else if (trylock) {
res = EBUSY;
- DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval,
- oldtid);
+ PTHREAD_TRACE(psynch_mutex_utrylock_failed, omutex, newseq.lgenval,
+ newseq.ugenval, oldtid);
} else {
+ PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_START, omutex,
+ newseq.lgenval, newseq.ugenval, oldtid);
res = _pthread_mutex_lock_wait(omutex, newseq, oldtid);
+ PTHREAD_TRACE(psynch_mutex_ulock | DBG_FUNC_END, omutex,
+ newseq.lgenval, newseq.ugenval, oldtid);
}
if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
static inline int
_pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
{
-#if PLOCKSTAT || DEBUG_TRACE_POINTS
- if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
- DEBUG_TRACE_POINTS) {
+#if ENABLE_USERSPACE_TRACE
+ return _pthread_mutex_lock_slow(omutex, trylock);
+#elif PLOCKSTAT
+ if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
return _pthread_mutex_lock_slow(omutex, trylock);
}
#endif
+
_pthread_mutex *mutex = (_pthread_mutex *)omutex;
if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
return _pthread_mutex_lock_slow(omutex, trylock);
uint64_t *tidaddr;
MUTEX_GETTID_ADDR(mutex, &tidaddr);
+ PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_START, omutex, newseq.lgenval,
+ newseq.ugenval, os_atomic_load(tidaddr, relaxed));
+
updateval = __psynch_mutexdrop(omutex, newseq.lgenval, newseq.ugenval,
os_atomic_load(tidaddr, relaxed), flags);
+ PTHREAD_TRACE(psynch_mutex_uunlock | DBG_FUNC_END, omutex, updateval, 0, 0);
+
if (updateval == (uint32_t)-1) {
res = errno;
} else {
uint64_t *tidaddr;
MUTEX_GETTID_ADDR(mutex, &tidaddr);
- DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen,
- os_atomic_load(tidaddr, relaxed));
+ PTHREAD_TRACE(psynch_mutex_uunlock, omutex, newseq.lgenval,
+ newseq.ugenval, os_atomic_load(tidaddr, relaxed));
}
return 0;
int
pthread_mutex_unlock(pthread_mutex_t *omutex)
{
-#if PLOCKSTAT || DEBUG_TRACE_POINTS
- if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
- DEBUG_TRACE_POINTS) {
+#if ENABLE_USERSPACE_TRACE
+ return _pthread_mutex_unlock_slow(omutex);
+#elif PLOCKSTAT
+ if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
return _pthread_mutex_unlock_slow(omutex);
}
#endif
mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
if (static_type != 3) {
- mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+ mutex->mtxopts.options.policy = __pthread_mutex_default_policy;
} else {
mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
}
#include <stdbool.h>
#include <errno.h>
+#include <pthread/pthread_spis.h>
+
+#include <sys/sysctl.h>
+
#include "darwintest_defaults.h"
+#include <darwintest_multiprocess.h>
struct context {
pthread_mutex_t mutex;
T_ASSERT_POSIX_ZERO(res, "pthread_join()");
}
}
+
+static void
+check_process_default_mutex_policy(int expected_policy)
+{
+ pthread_mutexattr_t mattr;
+ T_EXPECT_POSIX_ZERO(pthread_mutexattr_init(&mattr), "pthread_mutexattr_init()");
+
+ int policy;
+ T_EXPECT_POSIX_ZERO(pthread_mutexattr_getpolicy_np(&mattr, &policy),
+ "pthread_mutexattr_getpolicy_np()");
+ T_LOG("policy was %d", policy);
+ T_EXPECT_EQ(policy, expected_policy, "Saw the expected default policy");
+
+ T_EXPECT_POSIX_ZERO(pthread_mutexattr_destroy(&mattr), "pthread_mutexattr_destroy()");
+}
+
+T_DECL(mutex_default_policy,
+ "Tests that the default mutex policy is fairshare")
+{
+ check_process_default_mutex_policy(_PTHREAD_MUTEX_POLICY_FAIRSHARE);
+}
+
+T_DECL(mutex_default_policy_sysctl,
+ "Tests that setting the policy sysctl changes the default policy")
+{
+ int firstfit_default = _PTHREAD_MUTEX_POLICY_FIRSTFIT;
+ T_EXPECT_POSIX_ZERO(
+ sysctlbyname("kern.pthread_mutex_default_policy", NULL, NULL, &firstfit_default, sizeof(firstfit_default)),
+ "Changed the default policy sysctl to firstfit");
+
+ dt_helper_t helper = dt_child_helper("mutex_default_policy_sysctl_helper");
+ dt_run_helpers(&helper, 1, 5);
+}
+
+T_HELPER_DECL(mutex_default_policy_sysctl_helper, "sysctl helper")
+{
+ check_process_default_mutex_policy(_PTHREAD_MUTEX_POLICY_FIRSTFIT);
+
+ int default_default = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+ T_EXPECT_POSIX_ZERO(
+ sysctlbyname("kern.pthread_mutex_default_policy", NULL, NULL, &default_default, sizeof(default_default)),
+ "Restored the default policy to fairshare");
+
+ T_END;
+}
+
+T_DECL(mutex_default_policy_envvar,
+ "Tests that setting the policy environment variable changes the default policy",
+ T_META_ENVVAR("PTHREAD_MUTEX_DEFAULT_POLICY=2"))
+{
+ check_process_default_mutex_policy(_PTHREAD_MUTEX_POLICY_FIRSTFIT);
+}
--- /dev/null
+#!/usr/local/bin/luatrace -s
+
+trace_codename = function(codename, callback)
+ local debugid = trace.debugid(codename)
+ if debugid ~= 0 then
+ trace.single(debugid,callback)
+ else
+ printf("WARNING: Cannot locate debugid for '%s'\n", codename)
+ end
+end
+
+initial_timestamp = 0
+get_prefix = function(buf)
+ if initial_timestamp == 0 then
+ initial_timestamp = buf.timestamp
+ end
+ local secs = trace.convert_timestamp_to_nanoseconds(buf.timestamp - initial_timestamp) / 1000000000
+
+ local prefix
+ if trace.debugid_is_start(buf.debugid) then
+ prefix = "→"
+ elseif trace.debugid_is_end(buf.debugid) then
+ prefix = "←"
+ else
+ prefix = "↔"
+ end
+
+ local proc
+ proc = buf.command
+
+ return string.format("%s %6.9f %-17s [%05d.%06x] %-24s",
+ prefix, secs, proc, buf.pid, buf.threadid, buf.debugname)
+end
+
+decode_lval = function(lval)
+ local kbit = " "
+ if lval & 0x1 ~= 0 then
+ kbit = "K"
+ end
+ local ebit = " "
+ if lval & 0x2 ~= 0 then
+ ebit = "E"
+ end
+ local wbit = " "
+ if lval & 0x4 ~= 0 then
+ wbit = "W"
+ end
+
+ local count = lval >> 8
+ return string.format("[0x%06x, %s%s%s]", count, wbit, ebit, kbit)
+end
+
+decode_sval = function(sval)
+ local sbit = " "
+ if sval & 0x1 ~= 0 then
+ sbit = "S"
+ end
+ local ibit = " "
+ if sval & 0x2 ~= 0 then
+ ibit = "I"
+ end
+
+ local count = sval >> 8
+ return string.format("[0x%06x, %s%s]", count, ibit, sbit)
+end
+
+trace_codename("psynch_mutex_lock_updatebits", function(buf)
+ local prefix = get_prefix(buf)
+ if buf[4] == 0 then
+ printf("%s\tupdated lock bits, pre-kernel (addr: 0x%016x, oldlval: %s, newlval: %s)\n", prefix, buf[1], decode_lval(buf[2]), decode_lval(buf[3]))
+ else
+ printf("%s\tupdated lock bits, post-kernel (addr: 0x%016x, oldlval: %s, newlval: %s)\n", prefix, buf[1], decode_lval(buf[2]), decode_lval(buf[3]))
+ end
+end)
+
+trace_codename("psynch_mutex_unlock_updatebits", function(buf)
+ local prefix = get_prefix(buf)
+ printf("%s\tupdated unlock bits (addr: 0x%016x, oldlval: %s, newlval: %s)\n", prefix, buf[1], decode_lval(buf[2]), decode_lval(buf[3]))
+end)
+
+trace_codename("psynch_mutex_ulock", function(buf)
+ local prefix = get_prefix(buf)
+
+ if trace.debugid_is_start(buf.debugid) then
+ printf("%s\tlock busy, waiting in kernel (addr: 0x%016x, lval: %s, sval: %s, owner_tid: 0x%x)\n",
+ prefix, buf[1], decode_lval(buf[2]), decode_sval(buf[3]), buf[4])
+ elseif trace.debugid_is_end(buf.debugid) then
+ printf("%s\tlock acquired from kernel (addr: 0x%016x, updated bits: %s)\n",
+ prefix, buf[1], decode_lval(buf[2]))
+ else
+ printf("%s\tlock taken, uncontended (addr: 0x%016x, lval: %s, sval: %s)\n",
+ prefix, buf[1], decode_lval(buf[2]), decode_sval(buf[3]))
+ end
+end)
+
+trace_codename("psynch_mutex_utrylock_failed", function(buf)
+ local prefix = get_prefix(buf)
+ printf("%s\tmutex trybusy addr: 0x%016x lval: %s sval: %s owner: 0x%x\n", prefix, buf[1], decode_lval(buf[2]), decode_sval(buf[3]), buf[4])
+end)
+
+trace_codename("psynch_mutex_uunlock", function(buf)
+ local prefix = get_prefix(buf)
+
+ if trace.debugid_is_start(buf.debugid) then
+ printf("%s\tunlock, signalling kernel waiters (addr: 0x%016x, lval: %s, sval: %s, owner_tid: 0x%x)\n",
+ prefix, buf[1], decode_lval(buf[2]), decode_sval(buf[3]), buf[4])
+ elseif trace.debugid_is_end(buf.debugid) then
+ printf("%s\tunlock, waiters signalled (addr: 0x%016x, updated bits: %s)\n",
+ prefix, buf[1], decode_lval(buf[2]))
+ else
+ printf("%s\tunlock, no kernel waiters (addr: 0x%016x, lval: %s, sval: %s)\n",
+ prefix, buf[1], decode_lval(buf[2]), decode_sval(buf[3]))
+ end
+end)
+
+-- The trace codes we need aren't enabled by default
+darwin.sysctlbyname("kern.pthread_debug_tracing", 1)
+completion_handler = function()
+ darwin.sysctlbyname("kern.pthread_debug_tracing", 0)
+end
+trace.set_completion_handler(completion_handler)