#define BSDTHREAD_CTL_SET_SELF 0x100 /* bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, flags) */
#define BSDTHREAD_CTL_QOS_OVERRIDE_RESET 0x200 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_RESET, 0, 0, 0) */
#define BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH 0x400 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread_port, priority, 0) */
+#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD 0x401 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread_port, priority, resource) */
+#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET 0x402 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, 0|1 (?reset_all), resource, 0) */
/* qos_class_t is mapped into one of these bits in the bitfield, this mapping now exists here because
* libdispatch requires the QoS class mask of the pthread_priority_t to be a bitfield.
int _bsdthread_terminate(struct proc *p, user_addr_t stackaddr, size_t size, uint32_t kthport, uint32_t sem, int32_t *retval);
int _bsdthread_ctl_set_qos(struct proc *p, user_addr_t cmd, mach_port_name_t kport, user_addr_t tsd_priority_addr, user_addr_t arg3, int *retval);
int _bsdthread_ctl_set_self(struct proc *p, user_addr_t cmd, pthread_priority_t priority, mach_port_name_t voucher, _pthread_set_flags_t flags, int *retval);
-int _bsdthread_ctl_qos_override_start(struct proc *p, user_addr_t cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int *retval);
-int _bsdthread_ctl_qos_override_end(struct proc *p, user_addr_t cmd, mach_port_name_t kport, user_addr_t arg2, user_addr_t arg3, int *retval);
+int _bsdthread_ctl_qos_override_start(struct proc *p, user_addr_t cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int *retval);
+int _bsdthread_ctl_qos_override_end(struct proc *p, user_addr_t cmd, mach_port_name_t kport, user_addr_t resource, user_addr_t arg3, int *retval);
int _bsdthread_ctl_qos_override_dispatch(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int __unused *retval);
int _bsdthread_ctl_qos_override_reset(struct proc __unused *p, user_addr_t __unused cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int __unused *retval);
+int _bsdthread_ctl_qos_dispatch_asynchronous_override_add(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int __unused *retval);
+int _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(struct proc __unused *p, user_addr_t __unused cmd, int reset_all, user_addr_t resource, user_addr_t arg3, int __unused *retval);
int _bsdthread_ctl(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int *retval);
int _thread_selfid(__unused struct proc *p, uint64_t *retval);
int _workq_kernreturn(struct proc *p, int options, user_addr_t item, int arg2, int arg3, int32_t *retval);
}
static inline void
-wq_thread_override_reset(thread_t th)
+wq_thread_override_reset(thread_t th, user_addr_t resource)
{
struct uthread *uth = pthread_kern->get_bsdthread_info(th);
struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
if (tl) {
/*
* Drop all outstanding overrides on this thread, done outside the wq lock
- * because proc_usynch_thread_qos_remove_override takes a spinlock that
+ * because proc_usynch_thread_qos_remove_override_for_resource takes a spinlock that
* could cause us to panic.
*/
- uint32_t count = tl->th_dispatch_override_count;
- while (!OSCompareAndSwap(count, 0, &tl->th_dispatch_override_count)) {
- count = tl->th_dispatch_override_count;
- }
-
- PTHREAD_TRACE(TRACE_wq_override_reset | DBG_FUNC_NONE, tl->th_workq, count, 0, 0, 0);
+ PTHREAD_TRACE(TRACE_wq_override_reset | DBG_FUNC_NONE, tl->th_workq, 0, 0, 0, 0);
- for (int i=count; i>0; i--) {
- pthread_kern->proc_usynch_thread_qos_remove_override(uth, 0);
- }
+ pthread_kern->proc_usynch_thread_qos_reset_override_for_resource(current_task(), uth, 0, resource, THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
}
}
}
int
-_bsdthread_ctl_qos_override_start(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int __unused *retval)
+_bsdthread_ctl_qos_override_start(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int __unused *retval)
{
thread_t th;
int rv = 0;
- if (arg3 != 0) {
- return EINVAL;
- }
-
if ((th = port_name_to_thread(kport)) == THREAD_NULL) {
return ESRCH;
}
struct threadlist *tl = util_get_thread_threadlist_entry(th);
if (tl) {
- /* Workqueue threads count their overrides, so they can forcibly balance any outstanding
- * overrides when they return to the kernel.
- */
- uint32_t o = OSAddAtomic(1, &tl->th_override_count);
- PTHREAD_TRACE(TRACE_wq_override_start | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), o+1, priority, 0);
+ PTHREAD_TRACE(TRACE_wq_override_start | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 1, priority, 0);
}
/* The only failure case here is if we pass a tid and have it lookup the thread, we pass the uthread, so this all always succeeds. */
- pthread_kern->proc_usynch_thread_qos_add_override(uth, 0, override_qos, TRUE);
+ pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), uth, 0, override_qos, TRUE, resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
thread_deallocate(th);
return rv;
}
int
-_bsdthread_ctl_qos_override_end(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, user_addr_t arg2, user_addr_t arg3, int __unused *retval)
+_bsdthread_ctl_qos_override_end(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, user_addr_t resource, user_addr_t arg3, int __unused *retval)
{
thread_t th;
int rv = 0;
- if (arg2 != 0 || arg3 != 0) {
+ if (arg3 != 0) {
return EINVAL;
}
struct threadlist *tl = util_get_thread_threadlist_entry(th);
if (tl) {
- uint32_t o = OSAddAtomic(-1, &tl->th_override_count);
-
- PTHREAD_TRACE(TRACE_wq_override_end | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), o-1, 0, 0);
-
- if (o == 0) {
- /* underflow! */
- thread_deallocate(th);
- return EFAULT;
- }
+ PTHREAD_TRACE(TRACE_wq_override_end | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 0, 0, 0);
}
- pthread_kern->proc_usynch_thread_qos_remove_override(uth, 0);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), uth, 0, resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
thread_deallocate(th);
return rv;
}
int
-_bsdthread_ctl_qos_override_dispatch(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int __unused *retval)
+_bsdthread_ctl_qos_override_dispatch(struct proc *p, user_addr_t cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int *retval)
{
- thread_t th;
- int rv = 0;
-
if (arg3 != 0) {
return EINVAL;
}
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_add(p, cmd, kport, priority, USER_ADDR_NULL, retval);
+}
+
+int
+_bsdthread_ctl_qos_dispatch_asynchronous_override_add(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int __unused *retval)
+{
+ thread_t th;
+ int rv = 0;
+
if ((th = port_name_to_thread(kport)) == THREAD_NULL) {
return ESRCH;
}
return EPERM;
}
- /* Workqueue threads count their overrides, so they can forcibly balance any outstanding
- * overrides when they return to the kernel.
- */
- uint32_t o = OSAddAtomic(1, &tl->th_dispatch_override_count);
- PTHREAD_TRACE(TRACE_wq_override_dispatch | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), o+1, priority, 0);
+ PTHREAD_TRACE(TRACE_wq_override_dispatch | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 1, priority, 0);
/* The only failure case here is if we pass a tid and have it lookup the thread, we pass the uthread, so this all always succeeds. */
- pthread_kern->proc_usynch_thread_qos_add_override(uth, 0, override_qos, TRUE);
+ pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), uth, 0, override_qos, TRUE, resource, THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
thread_deallocate(th);
return rv;
}
int
-_bsdthread_ctl_qos_override_reset(struct proc __unused *p, user_addr_t __unused cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int __unused *retval)
+_bsdthread_ctl_qos_override_reset(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int *retval)
+{
+ if (arg1 != 0 || arg2 != 0 || arg3 != 0) {
+ return EINVAL;
+ }
+
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(p, cmd, 1 /* reset_all */, 0, 0, retval);
+}
+
+int
+_bsdthread_ctl_qos_dispatch_asynchronous_override_reset(struct proc __unused *p, user_addr_t __unused cmd, int reset_all, user_addr_t resource, user_addr_t arg3, int __unused *retval)
{
thread_t th;
struct threadlist *tl;
int rv = 0;
- if (arg1 != 0 || arg2 != 0 || arg3 != 0) {
+ if ((reset_all && (resource != 0)) || arg3 != 0) {
return EINVAL;
}
tl = util_get_thread_threadlist_entry(th);
if (tl) {
- wq_thread_override_reset(th);
+ wq_thread_override_reset(th, reset_all ? THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD : resource);
} else {
rv = EPERM;
}
return _bsdthread_ctl_qos_override_reset(p, cmd, arg1, arg2, arg3, retval);
case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
return _bsdthread_ctl_qos_override_dispatch(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_add(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(p, cmd, (int)arg1, arg2, arg3, retval);
case BSDTHREAD_CTL_SET_SELF:
return _bsdthread_ctl_set_self(p, cmd, (pthread_priority_t)arg1, (mach_port_name_t)arg2, (_pthread_set_flags_t)arg3, retval);
default:
}
/* dropping WQ override counts has to be done outside the wq lock. */
- wq_thread_override_reset(th);
+ wq_thread_override_reset(th, THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD);
workqueue_lock_spin(p);
if (tid != 0) {
if ((tid == kwq->kw_owner) && (kwq->kw_kflags & KSYN_KWF_QOS_APPLIED)) {
// hint continues to be accurate, and a boost was already applied
- pthread_kern->proc_usynch_thread_qos_add_override(NULL, tid, kwq->kw_qos_override, FALSE);
+ pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), NULL, tid, kwq->kw_qos_override, FALSE, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
} else {
// either hint did not match previous owner, or hint was accurate but mutex was not contended enough for a boost previously
boolean_t boostsucceded;
- boostsucceded = pthread_kern->proc_usynch_thread_qos_add_override(NULL, tid, kwq->kw_qos_override, TRUE);
+ boostsucceded = pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), NULL, tid, kwq->kw_qos_override, TRUE, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
if (boostsucceded) {
kwq->kw_kflags |= KSYN_KWF_QOS_APPLIED;
if (wasboosted && (tid != kwq->kw_owner) && (kwq->kw_owner != 0)) {
// the hint did not match the previous owner, so drop overrides
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
} else {
if (wasboosted && (kwq->kw_owner != 0)) {
// the hint did not match the previous owner, so drop overrides
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
}
boolean_t boostsucceeded;
// More than one waiter, so resource will still be contended after handing off ownership
- boostsucceeded = pthread_kern->proc_usynch_thread_qos_add_override(kwe->kwe_uth, 0, kwq->kw_qos_override, TRUE);
+ boostsucceeded = pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), kwe->kwe_uth, 0, kwq->kw_qos_override, TRUE, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
if (boostsucceeded) {
kwq->kw_kflags |= KSYN_KWF_QOS_APPLIED;
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, 0, 0, 0, 0, 0);
} else if (thread_tid(current_thread()) != kwq->kw_owner) {
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
} else {
- pthread_kern->proc_usynch_thread_qos_remove_override(current_uthread(), 0);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), current_uthread(), 0, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
}
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, 0, 0, 0, 0, 0);
} else if (thread_tid(current_thread()) != kwq->kw_owner) {
PTHREAD_TRACE(TRACE_psynch_ksyn_incorrect_owner, kwq->kw_owner, 0, 0, 0, 0);
- pthread_kern->proc_usynch_thread_qos_remove_override(NULL, kwq->kw_owner);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), NULL, kwq->kw_owner, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
} else {
- pthread_kern->proc_usynch_thread_qos_remove_override(current_uthread(), 0);
+ pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), current_uthread(), 0, kwq->kw_addr, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX);
}
}
}
mach_vm_size_t th_allocsize;
mach_vm_offset_t th_stackaddr;
mach_port_name_t th_thport;
- uint32_t th_override_count;
- uint32_t th_dispatch_override_count;
};
#define TH_LIST_INITED 0x01
#define TH_LIST_RUNNING 0x02
int
_pthread_workqueue_override_reset(void);
+// Apply a QoS override on a given thread (can be non-workqueue as well) with a resource/queue token
+__OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA)
+int
+_pthread_workqueue_asynchronous_override_add(mach_port_t thread, pthread_priority_t priority, void *resource);
+
+// Reset overrides for the given resource for the current thread
+__OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA)
+int
+_pthread_workqueue_asynchronous_override_reset_self(void *resource);
+
+// Reset overrides for all resources for the current thread
+__OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA)
+int
+_pthread_workqueue_asynchronous_override_reset_all_self(void);
+
__END_DECLS
#endif // __PTHREAD_WORKQUEUE_H__
}
if (res == 0) {
- res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, rv->kthread, rv->priority, 0);
+ res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, rv->kthread, rv->priority, (uintptr_t)rv);
if (res != 0) {
mach_port_mod_refs(mach_task_self(), rv->kthread, MACH_PORT_RIGHT_SEND, -1);
override->sig = PTHREAD_OVERRIDE_SIG_DEAD;
/* Always consumes (and deallocates) the pthread_override_t object given. */
- res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, override->kthread, 0, 0);
+ res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, override->kthread, (uintptr_t)override, 0);
if (res == -1) { res = errno; }
/* EFAULT from the syscall means we underflowed. Crash here. */
int
_pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority)
{
- int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread, priority, 0);
+ // use pthread_self as the default per-thread memory allocation to track the override in the kernel
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread, priority, (uintptr_t)pthread_self());
if (res == -1) { res = errno; }
return res;
}
int
_pthread_override_qos_class_end_direct(mach_port_t thread)
{
- int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread, 0, 0);
+ // use pthread_self as the default per-thread memory allocation to track the override in the kernel
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread, (uintptr_t)pthread_self(), 0);
if (res == -1) { res = errno; }
return res;
}
return res;
}
+int
+_pthread_workqueue_asynchronous_override_add(mach_port_t thread, pthread_priority_t priority, void *resource)
+{
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread, priority, (uintptr_t)resource);
+ if (res == -1) { res = errno; }
+ return res;
+}
+
+int
+_pthread_workqueue_asynchronous_override_reset_self(void *resource)
+{
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET,
+ 0 /* !reset_all */,
+ (uintptr_t)resource,
+ 0);
+ if (res == -1) { res = errno; }
+ return res;
+}
+
+int
+_pthread_workqueue_asynchronous_override_reset_all_self(void)
+{
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET,
+ 1 /* reset_all */,
+ 0,
+ 0);
+ if (res == -1) { res = errno; }
+ return res;
+}
+
int
posix_spawnattr_set_qos_class_np(posix_spawnattr_t * __restrict __attr, qos_class_t __qos_class)
{