#include <kern/zalloc.h>
#include <kern/sched_prim.h>
#include <kern/processor.h>
-#include <kern/wait_queue.h>
+#include <kern/block_hint.h>
//#include <kern/mach_param.h>
#include <mach/mach_vm.h>
#include <mach/mach_param.h>
static int _wait_result_to_errno(wait_result_t result);
-static int ksyn_wait(ksyn_wait_queue_t, int, uint32_t, int, uint64_t, thread_continue_t);
+static int ksyn_wait(ksyn_wait_queue_t, int, uint32_t, int, uint64_t, thread_continue_t, block_hint_t);
static kern_return_t ksyn_signal(ksyn_wait_queue_t, int, ksyn_waitq_element_t, uint32_t);
static void ksyn_freeallkwe(ksyn_queue_t kq);
static kern_return_t ksyn_mtxsignal(ksyn_wait_queue_t, ksyn_waitq_element_t kwe, uint32_t);
static void ksyn_mtx_update_owner_qos_override(ksyn_wait_queue_t, uint64_t tid, boolean_t prepost);
-static void ksyn_mtx_transfer_qos_override(ksyn_wait_queue_t, ksyn_waitq_element_t);
static void ksyn_mtx_drop_qos_override(ksyn_wait_queue_t);
static int kwq_handle_unlock(ksyn_wait_queue_t, uint32_t mgen, uint32_t rw_wc, uint32_t *updatep, int flags, int *blockp, uint32_t premgen);
{
int res = 0;
if (kwq->kw_pre_intrcount != 0 &&
- kwq->kw_pre_intrtype == type &&
- is_seqlower_eq(lockseq, kwq->kw_pre_intrseq)) {
+ kwq->kw_pre_intrtype == type &&
+ (kwq->kw_pre_intrseq == 0 || is_seqlower_eq(lockseq, kwq->kw_pre_intrseq))) {
kwq->kw_pre_intrcount--;
*retval = kwq->kw_pre_intrretbits;
if (kwq->kw_pre_intrcount == 0) {
if (tid != 0) {
if ((tid == kwq->kw_owner) && (kwq->kw_kflags & KSYN_KWF_QOS_APPLIED)) {
// hint continues to be accurate, and a boost was already applied
- pthread_kern->proc_usynch_thread_qos_add_override(NULL, tid, kwq->kw_qos_override, FALSE);
+ pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), NULL, tid, kwq->kw_qos_override, FALSE, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
} else {
// either hint did not match previous owner, or hint was accurate but mutex was not contended enough for a boost previously
boolean_t boostsucceded;
- boostsucceded = pthread_kern->proc_usynch_thread_qos_add_override(NULL, tid, kwq->kw_qos_override, TRUE);
+ boostsucceded = pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), NULL, tid, kwq->kw_qos_override, TRUE, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
if (boostsucceded) {
kwq->kw_kflags |= KSYN_KWF_QOS_APPLIED;
if (wasboosted && (tid != kwq->kw_owner) && (kwq->kw_owner != 0)) {
// the hint did not match the previous owner, so drop overrides
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
} else {
if (wasboosted && (kwq->kw_owner != 0)) {
// the hint did not match the previous owner, so drop overrides
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
}
}
-static void ksyn_mtx_transfer_qos_override(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe)
+static boolean_t
+ksyn_mtx_transfer_qos_override_begin(ksyn_wait_queue_t kwq,
+ ksyn_waitq_element_t kwe, uint64_t *kw_owner)
{
+ boolean_t needs_commit = FALSE;
if (!(kwq->kw_pflags & KSYN_WQ_SHARED)) {
boolean_t wasboosted = (kwq->kw_kflags & KSYN_KWF_QOS_APPLIED) ? TRUE : FALSE;
-
+
if (kwq->kw_inqueue > 1) {
boolean_t boostsucceeded;
-
+
// More than one waiter, so resource will still be contended after handing off ownership
- boostsucceeded = pthread_kern->proc_usynch_thread_qos_add_override(kwe->kwe_uth, 0, kwq->kw_qos_override, TRUE);
-
+ boostsucceeded = pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), kwe->kwe_uth, 0, kwq->kw_qos_override, TRUE, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
+
if (boostsucceeded) {
kwq->kw_kflags |= KSYN_KWF_QOS_APPLIED;
}
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, 0, 0, 0, 0, 0);
} else if (thread_tid(current_thread()) != kwq->kw_owner) {
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ *kw_owner = kwq->kw_owner;
+ needs_commit = TRUE;
} else {
- pthread_kern->proc_usynch_thread_qos_remove_override(current_uthread(), 0);
+ *kw_owner = 0;
+ needs_commit = TRUE;
}
}
}
+ return needs_commit;
+}
+
+static void
+ksyn_mtx_transfer_qos_override_commit(ksyn_wait_queue_t kwq, uint64_t kw_owner)
+{
+ struct uthread *uthread = kw_owner ? NULL : current_uthread();
+
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(
+ current_task(), uthread, kw_owner, kwq->kw_addr,
+ THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
static void ksyn_mtx_drop_qos_override(ksyn_wait_queue_t kwq)
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, 0, 0, 0, 0, 0);
} else if (thread_tid(current_thread()) != kwq->kw_owner) {
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
} else {
- pthread_kern->proc_usynch_thread_qos_remove_override(current_uthread(), 0);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), current_uthread(), 0, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
}
ksyn_mtx_update_owner_qos_override(kwq, tid, FALSE);
kwq->kw_owner = tid;
- error = ksyn_wait(kwq, KSYN_QUEUE_WRITER, mgen, ins_flags, 0, psynch_mtxcontinue);
+ error = ksyn_wait(kwq, KSYN_QUEUE_WRITER, mgen, ins_flags, 0, psynch_mtxcontinue, kThreadWaitPThreadMutex);
// ksyn_wait drops wait queue lock
out:
ksyn_wqrelease(kwq, 1, (KSYN_WQTYPE_INWAIT|KSYN_WQTYPE_MTX));
ksyn_mtxsignal(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe, uint32_t updateval)
{
kern_return_t ret;
+ boolean_t needs_commit;
+ uint64_t kw_owner;
if (!kwe) {
kwe = TAILQ_FIRST(&kwq->kw_ksynqueues[KSYN_QUEUE_WRITER].ksynq_kwelist);
}
}
- ksyn_mtx_transfer_qos_override(kwq, kwe);
+ needs_commit = ksyn_mtx_transfer_qos_override_begin(kwq, kwe, &kw_owner);
kwq->kw_owner = kwe->kwe_tid;
ret = ksyn_signal(kwq, KSYN_QUEUE_WRITER, kwe, updateval);
if (ret != KERN_SUCCESS) {
ksyn_mtx_drop_qos_override(kwq);
kwq->kw_owner = 0;
+ } else if (needs_commit) {
+ ksyn_mtx_transfer_qos_override_commit(kwq, kw_owner);
}
-
return ret;
}
clock_absolutetime_interval_to_deadline(abstime, &abstime);
}
- error = ksyn_wait(ckwq, KSYN_QUEUE_WRITER, cgen, SEQFIT, abstime, psynch_cvcontinue);
+ error = ksyn_wait(ckwq, KSYN_QUEUE_WRITER, cgen, SEQFIT, abstime, psynch_cvcontinue, kThreadWaitPThreadCondVar);
// ksyn_wait drops wait queue lock
}
_ksyn_handle_prepost(kwq, prepost_type, lockseq, retval)) {
ksyn_wqunlock(kwq);
} else {
- error = ksyn_wait(kwq, kqi, lgenval, SEQFIT, 0, THREAD_CONTINUE_NULL);
+ block_hint_t block_hint = type == PTH_RW_TYPE_READ ?
+ kThreadWaitPThreadRWLockRead : kThreadWaitPThreadRWLockWrite;
+ error = ksyn_wait(kwq, kqi, lgenval, SEQFIT, 0, THREAD_CONTINUE_NULL, block_hint);
// ksyn_wait drops wait queue lock
if (error == 0) {
uthread_t uth = current_uthread();
int diff;
uint32_t count = 0;
uint32_t curgen = lgenval & PTHRW_COUNT_MASK;
-
+ int clearedkflags = 0;
+
error = ksyn_wqfind(rwlock, lgenval, ugenval, rw_wc, flags, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK), &kwq);
if (error != 0) {
return(error);
/* no prepost and all threads are in place, reset the bit */
if ((isinit != 0) && ((kwq->kw_kflags & KSYN_KWF_INITCLEARED) != 0)){
kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
+ clearedkflags = 1;
}
/* can handle unlock now */
/* update bits?? */
*retval = updatebits;
}
-
+
+ // <rdar://problem/22244050> If any of the wakeups failed because they already
+ // returned to userspace because of a signal then we need to ensure that the
+ // reset state is not cleared when that thread returns. Otherwise,
+ // _pthread_rwlock_lock will clear the interrupted state before it is read.
+ if (clearedkflags != 0 && kwq->kw_pre_intrcount > 0) {
+ kwq->kw_kflags |= KSYN_KWF_INITCLEARED;
+ }
ksyn_wqunlock(kwq);
ksyn_wqrelease(kwq, 0, (KSYN_WQTYPE_INDROP | KSYN_WQTYPE_RWLOCK));
pthread_list_lock();
res = ksyn_wq_hash_lookup(uaddr, current_proc(), flags, &kwq, &hashptr, &object, &offset);
if (res != 0) {
+ pthread_list_unlock();
break;
}
if (kwq == NULL && nkwq == NULL) {
kwq->kw_dropcount++;
}
}
+ pthread_list_unlock();
break;
}
- pthread_list_unlock();
if (kwqp != NULL) {
*kwqp = kwq;
}
uint32_t lockseq,
int fit,
uint64_t abstime,
- thread_continue_t continuation)
+ thread_continue_t continuation,
+ block_hint_t block_hint)
{
int res;
return res;
}
+ thread_set_pending_block_hint(th, block_hint);
assert_wait_deadline_with_leeway(&kwe->kwe_psynchretval, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, abstime, 0);
ksyn_wqunlock(kwq);
kwq_zone = (zone_t)pthread_kern->zinit(sizeof(struct ksyn_wait_queue), 8192 * sizeof(struct ksyn_wait_queue), 4096, "ksyn_wait_queue");
kwe_zone = (zone_t)pthread_kern->zinit(sizeof(struct ksyn_waitq_element), 8192 * sizeof(struct ksyn_waitq_element), 4096, "ksyn_waitq_element");
}
+
+void *
+_pthread_get_thread_kwq(thread_t thread)
+{
+ assert(thread);
+ struct uthread * uthread = pthread_kern->get_bsdthread_info(thread);
+ assert(uthread);
+ ksyn_waitq_element_t kwe = pthread_kern->uthread_get_uukwe(uthread);
+ assert(kwe);
+ ksyn_wait_queue_t kwq = kwe->kwe_kwqqueue;
+ return kwq;
+}
+
+/* This function is used by stackshot to determine why a thread is blocked, and report
+ * who owns the object that the thread is blocked on. It should *only* be called if the
+ * `block_hint' field in the relevant thread's struct is populated with something related
+ * to pthread sync objects.
+ */
+void
+_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo * waitinfo)
+{
+ ksyn_wait_queue_t kwq = _pthread_get_thread_kwq(thread);
+ switch (waitinfo->wait_type) {
+ case kThreadWaitPThreadMutex:
+ assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_MTX);
+ waitinfo->owner = kwq->kw_owner;
+ waitinfo->context = kwq->kw_addr;
+ break;
+ /* Owner of rwlock not stored in kernel space due to races. Punt
+ * and hope that the userspace address is helpful enough. */
+ case kThreadWaitPThreadRWLockRead:
+ case kThreadWaitPThreadRWLockWrite:
+ assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK);
+ waitinfo->owner = 0;
+ waitinfo->context = kwq->kw_addr;
+ break;
+ /* Condvars don't have owners, so just give the userspace address. */
+ case kThreadWaitPThreadCondVar:
+ assert((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR);
+ waitinfo->owner = 0;
+ waitinfo->context = kwq->kw_addr;
+ break;
+ case kThreadWaitNone:
+ default:
+ waitinfo->owner = 0;
+ waitinfo->context = 0;
+ break;
+ }
+}