/*
- * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/cdefs.h>
-// <rdar://problem/26158937> panic() should be marked noreturn
-extern void panic(const char *string, ...) __printflike(1,2) __dead2;
-
#include <kern/assert.h>
#include <kern/ast.h>
#include <kern/clock.h>
#include <kern/kern_types.h>
#include <kern/policy_internal.h>
#include <kern/processor.h>
-#include <kern/sched_prim.h> /* for thread_exception_return */
+#include <kern/sched_prim.h> /* for thread_exception_return */
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/zalloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/param.h>
-#include <sys/proc_info.h> /* for fill_procworkqueue */
+#include <sys/proc_info.h> /* for fill_procworkqueue */
#include <sys/proc_internal.h>
#include <sys/pthread_shims.h>
#include <sys/resourcevar.h>
#include <os/log.h>
-extern thread_t port_name_to_thread(mach_port_name_t port_name); /* osfmk/kern/ipc_tt.h */
-
static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
-static void workq_schedule_creator(proc_t p, struct workqueue *wq, int flags);
+static void workq_schedule_creator(proc_t p, struct workqueue *wq,
+ workq_kern_threadreq_flags_t flags);
static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
- workq_threadreq_t req);
+ workq_threadreq_t req);
static uint32_t workq_constrained_allowance(struct workqueue *wq,
- thread_qos_t at_qos, struct uthread *uth, bool may_start_timer);
+ thread_qos_t at_qos, struct uthread *uth, bool may_start_timer);
static bool workq_thread_is_busy(uint64_t cur_ts,
- _Atomic uint64_t *lastblocked_tsp);
+ _Atomic uint64_t *lastblocked_tsp);
static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
};
#define WORKQ_SYSCTL_USECS(var, init) \
- static struct workq_usec_var var = { .usecs = init }; \
- SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
- workq_sysctl_handle_usecs, "I", "")
-
-static lck_grp_t *workq_lck_grp;
-static lck_attr_t *workq_lck_attr;
-static lck_grp_attr_t *workq_lck_grp_attr;
+ static struct workq_usec_var var = { .usecs = init }; \
+ SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
+ workq_sysctl_handle_usecs, "I", "")
+
+static LCK_GRP_DECLARE(workq_lck_grp, "workq");
os_refgrp_decl(static, workq_refgrp, "workq", NULL);
-static zone_t workq_zone_workqueue;
-static zone_t workq_zone_threadreq;
+static ZONE_DECLARE(workq_zone_workqueue, "workq.wq",
+ sizeof(struct workqueue), ZC_NONE);
+static ZONE_DECLARE(workq_zone_threadreq, "workq.threadreq",
+ sizeof(struct workq_threadreq_s), ZC_CACHING);
-WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
+static struct mpsc_daemon_queue workq_deallocate_queue;
+
+WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
#pragma unused(arg2)
struct workq_usec_var *v = arg1;
int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
- if (error || !req->newptr)
+ if (error || !req->newptr) {
return error;
+ }
clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
- &v->abstime);
+ &v->abstime);
return 0;
}
SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_max_threads, 0, "");
+ &wq_max_threads, 0, "");
SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_max_constrained_threads, 0, "");
+ &wq_max_constrained_threads, 0, "");
#pragma mark p_wqptr
struct workqueue *wq;
proc_lock(p);
- wq = p->p_wqptr;
+ wq = os_atomic_load(&p->p_wqptr, relaxed);
if (wq == NULL) {
- p->p_wqptr = WQPTR_IS_INITING_VALUE;
+ os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
proc_unlock(p);
return true;
}
static inline void
workq_thread_wakeup(struct uthread *uth)
{
- if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
- thread_wakeup_thread(workq_parked_wait_event(uth), uth->uu_thread);
- }
+ thread_wakeup_thread(workq_parked_wait_event(uth), uth->uu_thread);
}
#pragma mark wq_thactive
#define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
- "Make sure we have space to encode a QoS");
+ "Make sure we have space to encode a QoS");
static inline wq_thactive_t
_wq_thactive(struct workqueue *wq)
{
- return os_atomic_load(&wq->wq_thactive, relaxed);
+ return os_atomic_load_wide(&wq->wq_thactive, relaxed);
}
static inline int
}
#define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
- ((tha) >> WQ_THACTIVE_QOS_SHIFT)
+ ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue *wq)
workq_threadreq_t req;
req = priority_queue_max(&wq->wq_constrained_queue,
- struct workq_threadreq_s, tr_entry);
+ struct workq_threadreq_s, tr_entry);
new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
old_qos = _wq_thactive_best_constrained_req_qos(wq);
if (old_qos != new_qos) {
v = os_atomic_add(&wq->wq_thactive, v, relaxed);
#ifdef __LP64__
WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
- (uint64_t)(v >> 64), 0, 0);
+ (uint64_t)(v >> 64), 0, 0);
#else
WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0, 0);
#endif
static inline void
_wq_thactive_move(struct workqueue *wq,
- thread_qos_t old_qos, thread_qos_t new_qos)
+ thread_qos_t old_qos, thread_qos_t new_qos)
{
wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
- _wq_thactive_offset_for_qos(old_qos);
- os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
+ _wq_thactive_offset_for_qos(old_qos);
+ os_atomic_add(&wq->wq_thactive, v, relaxed);
wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
}
static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
- thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
+ thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
{
uint32_t count = 0, active;
uint64_t curtime;
return !wq || _wq_exiting(wq);
}
-struct turnstile *
-workq_turnstile(struct proc *p)
-{
- struct workqueue *wq = proc_get_wqptr(p);
- return wq ? wq->wq_turnstile : TURNSTILE_NULL;
-}
-
#pragma mark workqueue lock
static bool
static inline void
workq_lock_spin(struct workqueue *wq)
{
- lck_spin_lock(&wq->wq_lock);
+ lck_spin_lock_grp(&wq->wq_lock, &workq_lck_grp);
}
static inline void
static inline bool
workq_lock_try(struct workqueue *wq)
{
- return lck_spin_try_lock(&wq->wq_lock);
+ return lck_spin_try_lock_grp(&wq->wq_lock, &workq_lck_grp);
}
static inline void
#pragma mark idle thread lists
#define WORKQ_POLICY_INIT(qos) \
- (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
+ (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)
workq_threadreq_param_t cur_trp, req_trp = { };
cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
- if (req->tr_flags & TR_FLAG_WL_PARAMS) {
+ if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
req_trp = kqueue_threadreq_workloop_param(req);
}
return true;
}
- if ((req_flags & TRP_POLICY) && cur_trp.trp_pol != cur_trp.trp_pol) {
+ if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
return true;
}
static void
workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
- struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
- bool force_run)
+ struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
+ bool force_run)
{
thread_qos_t old_bucket = old_pri.qos_bucket;
thread_qos_t new_bucket = workq_pri_bucket(new_pri);
assert(uth == current_uthread());
workq_threadreq_param_t trp = { };
- if (req && (req->tr_flags & TR_FLAG_WL_PARAMS)) {
+ if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
trp = kqueue_threadreq_workloop_param(req);
}
if (trp.trp_flags & TRP_CPUPERCENT) {
thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
- (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
+ (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
}
}
static void
workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
- workq_threadreq_t req)
+ workq_threadreq_t req, bool unpark)
{
thread_t th = uth->uu_thread;
thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
int priority = 31;
int policy = POLICY_TIMESHARE;
- if (req && (req->tr_flags & TR_FLAG_WL_PARAMS)) {
+ if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
trp = kqueue_threadreq_workloop_param(req);
}
uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
- uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
- // qos sent out to userspace (may differ from uu_workq_pri on param threads)
- uth->uu_save.uus_workq_park_data.qos = qos;
+ if (unpark) {
+ uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
+ // qos sent out to userspace (may differ from uu_workq_pri on param threads)
+ uth->uu_save.uus_workq_park_data.qos = qos;
+ }
if (qos == WORKQ_THREAD_QOS_MANAGER) {
uint32_t mgr_pri = wq->wq_event_manager_priority;
if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
- POLICY_TIMESHARE);
+ POLICY_TIMESHARE);
return;
}
* every time a servicer is being told about a new max QoS.
*/
void
-workq_thread_set_max_qos(struct proc *p, struct kqrequest *kqr)
+workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
{
struct uu_workq_policy old_pri, new_pri;
- struct uthread *uth = get_bsdthread_info(kqr->kqr_thread);
+ struct uthread *uth = current_uthread();
struct workqueue *wq = proc_get_wqptr_fast(p);
- thread_qos_t qos = kqr->kqr_qos_index;
+ thread_qos_t qos = kqr->tr_kq_qos_index;
- if (uth->uu_workq_pri.qos_max == qos)
+ if (uth->uu_workq_pri.qos_max == qos) {
return;
+ }
workq_lock_spin(wq);
old_pri = new_pri = uth->uu_workq_pri;
static inline bool
workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
- uint64_t now)
+ uint64_t now)
{
uint64_t delay = workq_kill_delay_for_idle_thread(wq);
return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
* fall into long-term timer list shenanigans.
*/
thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
- wq_reduce_pool_window.abstime / 10,
- THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
+ wq_reduce_pool_window.abstime / 10,
+ THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
}
/*
struct uthread *uth;
assert(wq->wq_thdying_count >= decrement);
- if ((wq->wq_thdying_count -= decrement) > 0)
+ if ((wq->wq_thdying_count -= decrement) > 0) {
return;
+ }
- if (wq->wq_thidlecount <= 1)
+ if (wq->wq_thidlecount <= 1) {
return;
+ }
- if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL)
+ if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
return;
+ }
uint64_t now = mach_absolute_time();
uint64_t delay = workq_kill_delay_for_idle_thread(wq);
if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
- wq, wq->wq_thidlecount, 0, 0, 0);
+ wq, wq->wq_thidlecount, 0, 0, 0);
wq->wq_thdying_count++;
uth->uu_workq_flags |= UT_WORKQ_DYING;
- workq_thread_wakeup(uth);
+ if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
+ workq_thread_wakeup(uth);
+ }
return;
}
workq_death_call_schedule(wq,
- uth->uu_save.uus_workq_park_data.idle_stamp + delay);
+ uth->uu_save.uus_workq_park_data.idle_stamp + delay);
}
void
TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
if (uth->uu_workq_flags & UT_WORKQ_DYING) {
WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
- wq, wq->wq_thidlecount, 0, 0, 0);
+ wq, wq->wq_thidlecount, 0, 0, 0);
workq_death_policy_evaluate(wq, 1);
}
if (wq->wq_nthreads-- == wq_max_threads) {
workq_lock_spin(wq);
WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0, 0);
- os_atomic_and(&wq->wq_flags, ~WQ_DEATH_CALL_SCHEDULED, relaxed);
+ os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
workq_death_policy_evaluate(wq, 0);
WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0, 0);
workq_unlock(wq);
}
static struct uthread *
-workq_pop_idle_thread(struct workqueue *wq)
+workq_pop_idle_thread(struct workqueue *wq, uint8_t uu_flags,
+ bool *needs_wakeup)
{
struct uthread *uth;
TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
- uth->uu_workq_flags |= UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT;
+ uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
+ if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
+ wq->wq_constrained_threads_scheduled++;
+ }
wq->wq_threads_scheduled++;
wq->wq_thidlecount--;
if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
uth->uu_workq_flags ^= UT_WORKQ_DYING;
workq_death_policy_evaluate(wq, 1);
+ *needs_wakeup = false;
+ } else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
+ *needs_wakeup = false;
+ } else {
+ *needs_wakeup = true;
}
return uth;
}
uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
uth->uu_workq_thport = MACH_PORT_NULL;
uth->uu_workq_stackaddr = 0;
+ uth->uu_workq_pthread_kill_allowed = 0;
thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
if (kret != KERN_SUCCESS) {
WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
- kret, 1, 0, 0);
+ kret, 1, 0, 0);
goto out;
}
kret = thread_create_workq_waiting(p->task, workq_unpark_continue, &th);
if (kret != KERN_SUCCESS) {
WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
- kret, 0, 0, 0);
+ kret, 0, 0, 0);
pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
goto out;
}
wq->wq_creations++;
wq->wq_thidlecount++;
- uth->uu_workq_stackaddr = th_stackaddr;
+ uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
__attribute__((noreturn, noinline))
static void
workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
- struct uthread *uth, uint32_t death_flags)
+ struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
{
thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
if (qos > WORKQ_THREAD_QOS_CLEANUP) {
- workq_thread_reset_pri(wq, uth, NULL);
+ workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
qos = WORKQ_THREAD_QOS_CLEANUP;
}
workq_unlock(wq);
+ if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
+ __assert_only kern_return_t kr;
+ kr = thread_set_voucher_name(MACH_PORT_NULL);
+ assert(kr == KERN_SUCCESS);
+ }
+
uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
- uint32_t setup_flags = WQ_SETUP_EXIT_THREAD;
thread_t th = uth->uu_thread;
vm_map_t vmap = get_task_map(p->task);
- if (!first_use) flags |= WQ_FLAG_THREAD_REUSE;
+ if (!first_use) {
+ flags |= WQ_FLAG_THREAD_REUSE;
+ }
pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
- uth->uu_workq_thport, 0, setup_flags, flags);
+ uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
__builtin_unreachable();
}
__attribute__((always_inline))
static inline void
workq_perform_turnstile_operation_locked(struct workqueue *wq,
- void (^operation)(void))
+ void (^operation)(void))
{
workq_lock_held(wq);
wq->wq_turnstile_updater = current_thread();
static void
workq_turnstile_update_inheritor(struct workqueue *wq,
- turnstile_inheritor_t inheritor,
- turnstile_update_flags_t flags)
+ turnstile_inheritor_t inheritor,
+ turnstile_update_flags_t flags)
{
+ if (wq->wq_inheritor == inheritor) {
+ return;
+ }
+ wq->wq_inheritor = inheritor;
workq_perform_turnstile_operation_locked(wq, ^{
turnstile_update_inheritor(wq->wq_turnstile, inheritor,
- flags | TURNSTILE_IMMEDIATE_UPDATE);
+ flags | TURNSTILE_IMMEDIATE_UPDATE);
turnstile_update_inheritor_complete(wq->wq_turnstile,
- TURNSTILE_INTERLOCK_HELD);
+ TURNSTILE_INTERLOCK_HELD);
});
}
static void
-workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth)
+workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
+ uint32_t setup_flags)
{
uint64_t now = mach_absolute_time();
+ bool is_creator = (uth == wq->wq_creator);
- uth->uu_workq_flags &= ~UT_WORKQ_RUNNING;
if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) == 0) {
wq->wq_constrained_threads_scheduled--;
}
+ uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT);
TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
wq->wq_threads_scheduled--;
- if (wq->wq_creator == uth) {
- WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
- uth->uu_save.uus_workq_park_data.yields, 0);
+ if (is_creator) {
wq->wq_creator = NULL;
+ WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
+ uth->uu_save.uus_workq_park_data.yields, 0);
+ }
+
+ if (wq->wq_inheritor == uth->uu_thread) {
+ assert(wq->wq_creator == NULL);
if (wq->wq_reqcount) {
workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
} else {
workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
}
- if (uth->uu_workq_flags & UT_WORKQ_NEW) {
- TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
- wq->wq_thidlecount++;
- return;
- }
- } else {
+ }
+
+ if (uth->uu_workq_flags & UT_WORKQ_NEW) {
+ assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
+ TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
+ wq->wq_thidlecount++;
+ return;
+ }
+
+ if (!is_creator) {
_wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
- assert(!(uth->uu_workq_flags & UT_WORKQ_NEW));
uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
}
uint16_t cur_idle = wq->wq_thidlecount;
if (cur_idle >= wq_max_constrained_threads ||
- (wq->wq_thdying_count == 0 && oldest &&
- workq_should_kill_idle_thread(wq, oldest, now))) {
+ (wq->wq_thdying_count == 0 && oldest &&
+ workq_should_kill_idle_thread(wq, oldest, now))) {
/*
* Immediately kill threads if we have too may of them.
*
}
WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
- wq, cur_idle, 0, 0, 0);
+ wq, cur_idle, 0, 0, 0);
wq->wq_thdying_count++;
uth->uu_workq_flags |= UT_WORKQ_DYING;
uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
- workq_unpark_for_death_and_unlock(p, wq, uth, 0);
+ workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
__builtin_unreachable();
}
wq->wq_thidlecount = cur_idle;
if (cur_idle >= wq_death_max_load && tail &&
- tail->uu_save.uus_workq_park_data.has_stack) {
+ tail->uu_save.uus_workq_park_data.has_stack) {
uth->uu_save.uus_workq_park_data.has_stack = false;
TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
} else {
{
thread_qos_t qos = req->tr_qos;
- if (req->tr_flags & TR_FLAG_WL_OUTSIDE_QOS) {
+ if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
assert(trp.trp_flags & TRP_PRIORITY);
return trp.trp_pri;
return thread_workq_pri_for_qos(qos);
}
-static inline struct priority_queue *
+static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
{
- if (req->tr_flags & TR_FLAG_WL_OUTSIDE_QOS) {
+ if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
return &wq->wq_special_queue;
- } else if (req->tr_flags & TR_FLAG_OVERCOMMIT) {
+ } else if (req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
return &wq->wq_overcommit_queue;
} else {
return &wq->wq_constrained_queue;
static bool
workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
{
- assert(req->tr_state == TR_STATE_NEW);
+ assert(req->tr_state == WORKQ_TR_STATE_NEW);
- req->tr_state = TR_STATE_QUEUED;
+ req->tr_state = WORKQ_TR_STATE_QUEUED;
wq->wq_reqcount += req->tr_count;
if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
assert(wq->wq_event_manager_threadreq == NULL);
- assert(req->tr_flags & TR_FLAG_KEVENT);
+ assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
assert(req->tr_count == 1);
wq->wq_event_manager_threadreq = req;
return true;
}
- if (priority_queue_insert(workq_priority_queue_for_req(wq, req),
- &req->tr_entry, workq_priority_for_req(req),
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) {
- if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
+
+ struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
+ priority_queue_entry_set_sched_pri(q, &req->tr_entry,
+ workq_priority_for_req(req), false);
+
+ if (priority_queue_insert(q, &req->tr_entry)) {
+ if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
_wq_thactive_refresh_best_constrained_req_qos(wq);
}
return true;
return true;
}
if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
- &req->tr_entry, PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) {
- if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
+ &req->tr_entry)) {
+ if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
_wq_thactive_refresh_best_constrained_req_qos(wq);
}
return true;
static void
workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
{
- req->tr_state = TR_STATE_IDLE;
- if (req->tr_flags & (TR_FLAG_WORKLOOP | TR_FLAG_KEVENT)) {
+ req->tr_state = WORKQ_TR_STATE_CANCELED;
+ if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
kqueue_threadreq_cancel(p, req);
} else {
zfree(workq_zone_threadreq, req);
}
}
-/*
- * Mark a thread request as complete. At this point, it is treated as owned by
- * the submitting subsystem and you should assume it could be freed.
- *
- * Called with the workqueue lock held.
- */
-static void
-workq_threadreq_bind_and_unlock(proc_t p, struct workqueue *wq,
- workq_threadreq_t req, struct uthread *uth)
-{
- uint8_t tr_flags = req->tr_flags;
- bool needs_commit = false;
- int creator_flags = 0;
-
- wq->wq_fulfilled++;
-
- if (req->tr_state == TR_STATE_QUEUED) {
- workq_threadreq_dequeue(wq, req);
- creator_flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
- }
-
- if (wq->wq_creator == uth) {
- WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
- uth->uu_save.uus_workq_park_data.yields, 0);
- creator_flags = WORKQ_THREADREQ_CAN_CREATE_THREADS |
- WORKQ_THREADREQ_CREATOR_TRANSFER;
- wq->wq_creator = NULL;
- _wq_thactive_inc(wq, req->tr_qos);
- wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
- } else if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
- _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
- }
- workq_thread_reset_pri(wq, uth, req);
-
- if (tr_flags & TR_FLAG_OVERCOMMIT) {
- if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) == 0) {
- uth->uu_workq_flags |= UT_WORKQ_OVERCOMMIT;
- wq->wq_constrained_threads_scheduled--;
- }
- } else {
- if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0) {
- uth->uu_workq_flags &= ~UT_WORKQ_OVERCOMMIT;
- wq->wq_constrained_threads_scheduled++;
- }
- }
-
- if (tr_flags & (TR_FLAG_KEVENT | TR_FLAG_WORKLOOP)) {
- if (req->tr_state == TR_STATE_NEW) {
- /*
- * We're called from workq_kern_threadreq_initiate()
- * due to an unbind, with the kq req held.
- */
- assert(!creator_flags);
- req->tr_state = TR_STATE_IDLE;
- kqueue_threadreq_bind(p, req, uth->uu_thread, 0);
- } else {
- assert(req->tr_count == 0);
- workq_perform_turnstile_operation_locked(wq, ^{
- kqueue_threadreq_bind_prepost(p, req, uth->uu_thread);
- });
- needs_commit = true;
- }
- req = NULL;
- } else if (req->tr_count > 0) {
- req = NULL;
- }
-
- if (creator_flags) {
- /* This can drop the workqueue lock, and take it again */
- workq_schedule_creator(p, wq, creator_flags);
- }
-
- workq_unlock(wq);
-
- if (req) {
- zfree(workq_zone_threadreq, req);
- }
- if (needs_commit) {
- kqueue_threadreq_bind_commit(p, uth->uu_thread);
- }
-
- /*
- * Run Thread, Run!
- */
- uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
- if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
- upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
- } else if (tr_flags & TR_FLAG_OVERCOMMIT) {
- upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
- }
- if (tr_flags & TR_FLAG_KEVENT) {
- upcall_flags |= WQ_FLAG_THREAD_KEVENT;
- }
- if (tr_flags & TR_FLAG_WORKLOOP) {
- upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
- }
- uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
-}
-
#pragma mark workqueue thread creation thread calls
static inline bool
workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
- uint32_t fail_mask)
+ uint32_t fail_mask)
{
uint32_t old_flags, new_flags;
os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
- os_atomic_rmw_loop_give_up(return false);
+ os_atomic_rmw_loop_give_up(return false);
}
if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
- new_flags = old_flags | pend;
+ new_flags = old_flags | pend;
} else {
- new_flags = old_flags | sched;
+ new_flags = old_flags | sched;
}
});
assert(!preemption_enabled());
if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
- WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
- WQ_IMMEDIATE_CALL_SCHEDULED)) {
+ WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
+ WQ_IMMEDIATE_CALL_SCHEDULED)) {
return false;
}
} else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
wq->wq_timer_interval *= 2;
if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
- wq->wq_timer_interval = wq_max_timer_interval.abstime;
+ wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
}
} else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
wq->wq_timer_interval /= 2;
if (wq->wq_timer_interval < wq_stalled_window.abstime) {
- wq->wq_timer_interval = wq_stalled_window.abstime;
+ wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
}
}
WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
- _wq_flags(wq), wq->wq_timer_interval, 0);
+ _wq_flags(wq), wq->wq_timer_interval, 0);
thread_call_t call = wq->wq_delayed_call;
uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
assert(!preemption_enabled());
if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
- WQ_IMMEDIATE_CALL_PENDED, 0)) {
+ WQ_IMMEDIATE_CALL_PENDED, 0)) {
WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
- _wq_flags(wq), 0, 0);
+ _wq_flags(wq), 0, 0);
uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
{
struct workqueue *wq = proc_get_wqptr(p);
- if (wq) os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
+ if (wq) {
+ os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
+ }
}
void
struct workqueue *wq = proc_get_wqptr(p);
uint32_t wq_flags;
- if (!wq) return;
+ if (!wq) {
+ return;
+ }
- wq_flags = os_atomic_and_orig(&wq->wq_flags, ~(WQ_PROC_SUSPENDED |
- WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED), relaxed);
+ wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
+ WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
if ((wq_flags & WQ_EXITING) == 0) {
disable_preemption();
if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
workq_schedule_immediate_thread_creation(wq);
} else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
workq_schedule_delayed_thread_creation(wq,
- WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
+ WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
}
enable_preemption();
}
static bool
workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
{
- uint64_t lastblocked_ts = os_atomic_load(lastblocked_tsp, relaxed);
+ uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
if (now <= lastblocked_ts) {
/*
* Because the update of the timestamp when a thread blocks
* workq_exit() will set the workqueue to NULL before
* it cancels thread calls.
*/
- if (!wq) return;
+ if (!wq) {
+ return;
+ }
assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
- (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
+ (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
- wq->wq_nthreads, wq->wq_thidlecount, 0);
+ wq->wq_nthreads, wq->wq_thidlecount, 0);
workq_lock_spin(wq);
wq->wq_thread_call_last_run = mach_absolute_time();
- os_atomic_and(&wq->wq_flags, ~my_flag, release);
+ os_atomic_andnot(&wq->wq_flags, my_flag, release);
/* This can drop the workqueue lock, and take it again */
workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
workq_unlock(wq);
WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
- wq->wq_nthreads, wq->wq_thidlecount, 0);
+ wq->wq_nthreads, wq->wq_thidlecount, 0);
}
#pragma mark thread state tracking
* get scheduled and then block after we start down this path), it's
* not a problem. Either timestamp is adequate, so no need to retry
*/
- os_atomic_store(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
- thread_last_run_time(thread), relaxed);
+ os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
+ thread_last_run_time(thread), relaxed);
if (req_qos == THREAD_QOS_UNSPECIFIED) {
/*
} else {
uint32_t max_busycount, old_req_count;
old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
- req_qos, NULL, &max_busycount);
+ req_qos, NULL, &max_busycount);
/*
* If it is possible that may_start_constrained_thread had refused
* admission due to being over the max concurrency, we may need to
}
if (__improbable(kdebug_enable)) {
__unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
- old_thactive, qos, NULL, NULL);
+ old_thactive, qos, NULL, NULL);
WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
- old - 1, qos | (req_qos << 8),
- wq->wq_reqcount << 1 | start_timer, 0);
+ old - 1, qos | (req_qos << 8),
+ wq->wq_reqcount << 1 | start_timer, 0);
}
break;
old_thactive = _wq_thactive_inc(wq, qos);
if (__improbable(kdebug_enable)) {
__unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
- old_thactive, qos, NULL, NULL);
+ old_thactive, qos, NULL, NULL);
req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
- old + 1, qos | (req_qos << 8),
- wq->wq_threads_scheduled, 0);
+ old + 1, qos | (req_qos << 8),
+ wq->wq_threads_scheduled, 0);
}
break;
}
os_ref_retain(&wq->wq_refcnt);
}
-void
-workq_destroy(struct workqueue *wq)
+static void
+workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
+ __assert_only mpsc_daemon_queue_t dq)
{
+ struct workqueue *wq;
struct turnstile *ts;
- turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts);
+ wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
+ assert(dq == &workq_deallocate_queue);
+
+ turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
assert(ts);
turnstile_cleanup();
turnstile_deallocate(ts);
- lck_spin_destroy(&wq->wq_lock, workq_lck_grp);
+ lck_spin_destroy(&wq->wq_lock, &workq_lck_grp);
zfree(workq_zone_workqueue, wq);
}
workq_deallocate(struct workqueue *wq)
{
if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
- workq_destroy(wq);
+ workq_deallocate_queue_invoke(&wq->wq_destroy_link,
+ &workq_deallocate_queue);
}
}
workq_deallocate_safe(struct workqueue *wq)
{
if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
- workq_deallocate_enqueue(wq);
+ mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
+ MPSC_QUEUE_DISABLE_PREEMPTION);
}
}
*/
int
workq_open(struct proc *p, __unused struct workq_open_args *uap,
- __unused int32_t *retval)
+ __unused int32_t *retval)
{
struct workqueue *wq;
int error = 0;
}
if (wq_init_constrained_limit) {
- uint32_t limit, num_cpus = ml_get_max_cpus();
+ uint32_t limit, num_cpus = ml_wait_max_cpus();
/*
* set up the limit for the constrained pool
*/
limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
- if (limit > wq_max_constrained_threads)
+ if (limit > wq_max_constrained_threads) {
wq_max_constrained_threads = limit;
+ }
if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
wq_max_parallelism[_wq_bucket(qos)] =
- qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
+ qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
}
wq_init_constrained_limit = 0;
thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
wq->wq_event_manager_priority = (uint32_t)pp;
- wq->wq_timer_interval = wq_stalled_window.abstime;
+ wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
wq->wq_proc = p;
turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
- TURNSTILE_WORKQS);
+ TURNSTILE_WORKQS);
TAILQ_INIT(&wq->wq_thrunlist);
TAILQ_INIT(&wq->wq_thnewlist);
TAILQ_INIT(&wq->wq_thidlelist);
- priority_queue_init(&wq->wq_overcommit_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
- priority_queue_init(&wq->wq_constrained_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
- priority_queue_init(&wq->wq_special_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
+ priority_queue_init(&wq->wq_overcommit_queue);
+ priority_queue_init(&wq->wq_constrained_queue);
+ priority_queue_init(&wq->wq_special_queue);
wq->wq_delayed_call = thread_call_allocate_with_options(
- workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
- THREAD_CALL_OPTIONS_ONCE);
+ workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
+ THREAD_CALL_OPTIONS_ONCE);
wq->wq_immediate_call = thread_call_allocate_with_options(
- workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
- THREAD_CALL_OPTIONS_ONCE);
+ workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
+ THREAD_CALL_OPTIONS_ONCE);
wq->wq_death_call = thread_call_allocate_with_options(
- workq_kill_old_threads_call, wq,
- THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
+ workq_kill_old_threads_call, wq,
+ THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
- lck_spin_init(&wq->wq_lock, workq_lck_grp, workq_lck_attr);
+ lck_spin_init(&wq->wq_lock, &workq_lck_grp, LCK_ATTR_NULL);
WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
- VM_KERNEL_ADDRHIDE(wq), 0, 0, 0);
+ VM_KERNEL_ADDRHIDE(wq), 0, 0, 0);
proc_set_wqptr(p, wq);
}
out:
uint32_t wq_flags;
workq_threadreq_t mgr_req;
- if (!wq) return;
+ if (!wq) {
+ return;
+ }
- WQ_TRACE_WQ(TRACE_wq_pthread_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
+ WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0, 0);
workq_lock_spin(wq);
mgr_req = wq->wq_event_manager_threadreq;
wq->wq_event_manager_threadreq = NULL;
wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
- workq_turnstile_update_inheritor(wq, NULL, 0);
+ wq->wq_creator = NULL;
+ workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
workq_unlock(wq);
* It is hence safe to do the tear down without holding any lock.
*/
priority_queue_destroy(&wq->wq_overcommit_queue,
- struct workq_threadreq_s, tr_entry, ^(void *e){
+ struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
workq_threadreq_destroy(p, e);
});
priority_queue_destroy(&wq->wq_constrained_queue,
- struct workq_threadreq_s, tr_entry, ^(void *e){
+ struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
workq_threadreq_destroy(p, e);
});
priority_queue_destroy(&wq->wq_special_queue,
- struct workq_threadreq_s, tr_entry, ^(void *e){
+ struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
workq_threadreq_destroy(p, e);
});
- WQ_TRACE(TRACE_wq_pthread_exit|DBG_FUNC_END, 0, 0, 0, 0, 0);
+ WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0, 0);
}
/*
if (wq != NULL) {
thread_t th = current_thread();
- WQ_TRACE_WQ(TRACE_wq_workqueue_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
+ WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0, 0);
if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
/*
assert(TAILQ_EMPTY(&wq->wq_thidlelist));
WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
- VM_KERNEL_ADDRHIDE(wq), 0, 0, 0);
+ VM_KERNEL_ADDRHIDE(wq), 0, 0, 0);
workq_deallocate(wq);
- WQ_TRACE(TRACE_wq_workqueue_exit|DBG_FUNC_END, 0, 0, 0, 0, 0);
+ WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0, 0);
}
}
static bool
_pthread_priority_to_policy(pthread_priority_t priority,
- thread_qos_policy_data_t *data)
+ thread_qos_policy_data_t *data)
{
data->qos_tier = _pthread_priority_thread_qos(priority);
data->tier_importance = _pthread_priority_relpri(priority);
if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
- data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
+ data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
return false;
}
return true;
static int
bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
- mach_port_name_t voucher, enum workq_set_self_flags flags)
+ mach_port_name_t voucher, enum workq_set_self_flags flags)
{
struct uthread *uth = get_bsdthread_info(th);
struct workqueue *wq = proc_get_wqptr(p);
goto qos;
}
- struct kqrequest *kqr = uth->uu_kqr_bound;
+ workq_threadreq_t kqr = uth->uu_kqr_bound;
if (kqr == NULL) {
unbind_rv = EALREADY;
goto qos;
}
- if (kqr->kqr_state & KQR_WORKLOOP) {
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
unbind_rv = EINVAL;
goto qos;
}
- kqueue_threadreq_unbind(p, uth->uu_kqr_bound);
+ kqueue_threadreq_unbind(p, kqr);
}
qos:
qos_rv = EPERM;
goto voucher;
}
- } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
+ } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
+ uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
/*
- * Workqueue manager threads can't change QoS
+ * Workqueue manager threads or threads above UI can't change QoS
*/
qos_rv = EINVAL;
goto voucher;
if (old_overcommit) {
wq->wq_constrained_threads_scheduled++;
} else if (wq->wq_constrained_threads_scheduled-- ==
- wq_max_constrained_threads) {
+ wq_max_constrained_threads) {
force_run = true;
}
}
old_pri = new_pri = uth->uu_workq_pri;
- new_pri.qos_req = new_policy.qos_tier;
+ new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
workq_unlock(wq);
}
kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
- (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
+ (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
if (kr != KERN_SUCCESS) {
qos_rv = EINVAL;
}
}
fixedpri:
- if (qos_rv) goto done;
+ if (qos_rv) {
+ goto done;
+ }
if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
thread_extended_policy_data_t extpol = {.timeshare = 0};
}
kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
- (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
+ (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
if (kr != KERN_SUCCESS) {
fixedpri_rv = EINVAL;
goto done;
}
kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
- (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
+ (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
if (kr != KERN_SUCCESS) {
fixedpri_rv = EINVAL;
goto done;
return fixedpri_rv;
}
+
return 0;
}
static int
bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
- pthread_priority_t pp, user_addr_t resource)
+ pthread_priority_t pp, user_addr_t resource)
{
thread_qos_t qos = _pthread_priority_thread_qos(pp);
if (qos == THREAD_QOS_UNSPECIFIED) {
return EINVAL;
}
- thread_t th = port_name_to_thread(kport);
+ thread_t th = port_name_to_thread(kport,
+ PORT_TO_THREAD_IN_CURRENT_TASK);
if (th == THREAD_NULL) {
return ESRCH;
}
int rv = proc_thread_qos_add_override(p->task, th, 0, qos, TRUE,
- resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
+ resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
thread_deallocate(th);
return rv;
static int
bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
- user_addr_t resource)
+ user_addr_t resource)
{
- thread_t th = port_name_to_thread(kport);
+ thread_t th = port_name_to_thread(kport,
+ PORT_TO_THREAD_IN_CURRENT_TASK);
if (th == THREAD_NULL) {
return ESRCH;
}
int rv = proc_thread_qos_remove_override(p->task, th, 0, resource,
- THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
+ THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
thread_deallocate(th);
return rv;
static int
workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
- pthread_priority_t pp, user_addr_t ulock_addr)
+ pthread_priority_t pp, user_addr_t ulock_addr)
{
struct uu_workq_policy old_pri, new_pri;
struct workqueue *wq = proc_get_wqptr(p);
return EINVAL;
}
- thread_t thread = port_name_to_thread(kport);
+ thread_t thread = port_name_to_thread(kport,
+ PORT_TO_THREAD_IN_CURRENT_TASK);
if (thread == THREAD_NULL) {
return ESRCH;
}
}
WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
- wq, thread_tid(thread), 1, pp, 0);
+ wq, thread_tid(thread), 1, pp, 0);
thread_mtx_lock(thread);
if (ulock_addr) {
- uint64_t val;
+ uint32_t val;
int rc;
/*
* Workaround lack of explicit support for 'no-fault copyin'
* <rdar://problem/24999882>, as disabling preemption prevents paging in
*/
disable_preemption();
- rc = copyin_word(ulock_addr, &val, sizeof(kport));
+ rc = copyin_atomic32(ulock_addr, &val);
enable_preemption();
- if (rc == 0 && ulock_owner_value_to_port_name((uint32_t)val) != kport) {
+ if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
goto out;
}
}
return 0;
}
+static int
+workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
+{
+ if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
+ // If the thread isn't a workqueue thread, don't set the
+ // kill_allowed bit; however, we still need to return 0
+ // instead of an error code since this code is executed
+ // on the abort path which needs to not depend on the
+ // pthread_t (returning an error depends on pthread_t via
+ // cerror_nocancel)
+ return 0;
+ }
+ struct uthread *uth = get_bsdthread_info(thread);
+ uth->uu_workq_pthread_kill_allowed = enable;
+ return 0;
+}
+
static int
bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
- int *retval)
+ int *retval)
{
static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
- _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
+ _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
static_assert(QOS_PARALLELISM_REALTIME ==
- _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
+ _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL)) {
return EINVAL;
}
#define ENSURE_UNUSED(arg) \
- ({ if ((arg) != 0) { return EINVAL; } })
+ ({ if ((arg) != 0) { return EINVAL; } })
int
bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
switch (uap->cmd) {
case BSDTHREAD_CTL_QOS_OVERRIDE_START:
return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
- (pthread_priority_t)uap->arg2, uap->arg3);
+ (pthread_priority_t)uap->arg2, uap->arg3);
case BSDTHREAD_CTL_QOS_OVERRIDE_END:
ENSURE_UNUSED(uap->arg3);
return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
- (user_addr_t)uap->arg2);
+ (user_addr_t)uap->arg2);
case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
- (pthread_priority_t)uap->arg2, uap->arg3);
+ (pthread_priority_t)uap->arg2, uap->arg3);
case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
return workq_thread_reset_dispatch_override(p, current_thread());
case BSDTHREAD_CTL_SET_SELF:
return bsdthread_set_self(p, current_thread(),
- (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
- (enum workq_set_self_flags)uap->arg3);
+ (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
+ (enum workq_set_self_flags)uap->arg3);
case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
ENSURE_UNUSED(uap->arg3);
return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
- (unsigned long)uap->arg2, retval);
+ (unsigned long)uap->arg2, retval);
+ case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
+ ENSURE_UNUSED(uap->arg2);
+ ENSURE_UNUSED(uap->arg3);
+ return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
case BSDTHREAD_CTL_SET_QOS:
case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
#pragma mark workqueue thread manipulation
+static void __dead2
+workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
+ struct uthread *uth, uint32_t setup_flags);
+
static void __dead2
workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
- struct uthread *uth);
+ struct uthread *uth, uint32_t setup_flags);
static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
workq_trace_req_id(workq_threadreq_t req)
{
struct kqworkloop *kqwl;
- if (req->tr_flags & TR_FLAG_WORKLOOP) {
- kqwl = __container_of(req, struct kqworkloop, kqwl_request.kqr_req);
+ if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ kqwl = __container_of(req, struct kqworkloop, kqwl_request);
return kqwl->kqwl_dynamicid;
}
uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
- qos == THREAD_QOS_UNSPECIFIED) {
+ qos == THREAD_QOS_UNSPECIFIED) {
return EINVAL;
}
WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
- wq, reqcount, pp, 0, 0);
+ wq, reqcount, pp, 0, 0);
workq_threadreq_t req = zalloc(workq_zone_threadreq);
priority_queue_entry_init(&req->tr_entry);
- req->tr_state = TR_STATE_NEW;
+ req->tr_state = WORKQ_TR_STATE_NEW;
req->tr_flags = 0;
req->tr_qos = qos;
if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
- req->tr_flags |= TR_FLAG_OVERCOMMIT;
+ req->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
}
WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
- wq, workq_trace_req_id(req), req->tr_qos, reqcount, 0);
+ wq, workq_trace_req_id(req), req->tr_qos, reqcount, 0);
workq_lock_spin(wq);
do {
* If there aren't enough threads, add one, but re-evaluate everything
* as conditions may now have changed.
*/
- if (reqcount > 1 && (req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
+ if (reqcount > 1 && (req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
unpaced = workq_constrained_allowance(wq, qos, NULL, false);
if (unpaced >= reqcount - 1) {
unpaced = reqcount - 1;
* This path does not currently handle custom workloop parameters
* when creating threads for parallelism.
*/
- assert(!(req->tr_flags & TR_FLAG_WL_PARAMS));
+ assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
/*
* This is a trimmed down version of workq_threadreq_bind_and_unlock()
*/
while (unpaced > 0 && wq->wq_thidlecount) {
- struct uthread *uth = workq_pop_idle_thread(wq);
+ struct uthread *uth;
+ bool needs_wakeup;
+ uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
+
+ if (req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ uu_flags |= UT_WORKQ_OVERCOMMIT;
+ }
+
+ uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
_wq_thactive_inc(wq, qos);
wq->wq_thscheduled_count[_wq_bucket(qos)]++;
- workq_thread_reset_pri(wq, uth, req);
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
wq->wq_fulfilled++;
- uth->uu_workq_flags |= UT_WORKQ_EARLY_BOUND;
- if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
- uth->uu_workq_flags &= ~UT_WORKQ_OVERCOMMIT;
- wq->wq_constrained_threads_scheduled++;
- }
uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
uth->uu_save.uus_workq_park_data.thread_request = req;
- workq_thread_wakeup(uth);
+ if (needs_wakeup) {
+ workq_thread_wakeup(uth);
+ }
unpaced--;
reqcount--;
}
} while (unpaced && wq->wq_nthreads < wq_max_threads &&
- workq_add_new_idle_thread(p, wq));
+ workq_add_new_idle_thread(p, wq));
if (_wq_exiting(wq)) {
goto exiting;
}
- req->tr_count = reqcount;
+ req->tr_count = (uint16_t)reqcount;
if (workq_threadreq_enqueue(wq, req)) {
/* This can drop the workqueue lock, and take it again */
workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
}
bool
-workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr,
- struct turnstile *workloop_ts, thread_qos_t qos, int flags)
+workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
+ struct turnstile *workloop_ts, thread_qos_t qos,
+ workq_kern_threadreq_flags_t flags)
{
struct workqueue *wq = proc_get_wqptr_fast(p);
- workq_threadreq_t req = &kqr->kqr_req;
struct uthread *uth = NULL;
- uint8_t tr_flags = 0;
- if (kqr->kqr_state & KQR_WORKLOOP) {
- tr_flags = TR_FLAG_WORKLOOP;
+ assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
+ if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
- if (trp.trp_flags & TRP_PRIORITY) {
- tr_flags |= TR_FLAG_WL_OUTSIDE_QOS;
- qos = thread_workq_qos_for_pri(trp.trp_pri);
- if (qos == THREAD_QOS_UNSPECIFIED) {
- qos = WORKQ_THREAD_QOS_ABOVEUI;
- }
- }
- if (trp.trp_flags) {
- tr_flags |= TR_FLAG_WL_PARAMS;
+ qos = thread_workq_qos_for_pri(trp.trp_pri);
+ if (qos == THREAD_QOS_UNSPECIFIED) {
+ qos = WORKQ_THREAD_QOS_ABOVEUI;
}
- } else {
- tr_flags = TR_FLAG_KEVENT;
- }
- if (qos != WORKQ_THREAD_QOS_MANAGER &&
- (kqr->kqr_state & KQR_THOVERCOMMIT)) {
- tr_flags |= TR_FLAG_OVERCOMMIT;
}
- assert(req->tr_state == TR_STATE_IDLE);
+ assert(req->tr_state == WORKQ_TR_STATE_IDLE);
priority_queue_entry_init(&req->tr_entry);
req->tr_count = 1;
- req->tr_state = TR_STATE_NEW;
- req->tr_flags = tr_flags;
+ req->tr_state = WORKQ_TR_STATE_NEW;
req->tr_qos = qos;
WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
- workq_trace_req_id(req), qos, 1, 0);
+ workq_trace_req_id(req), qos, 1, 0);
if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
/*
workq_lock_spin(wq);
if (_wq_exiting(wq)) {
+ req->tr_state = WORKQ_TR_STATE_IDLE;
workq_unlock(wq);
return false;
}
if (uth && workq_threadreq_admissible(wq, uth, req)) {
assert(uth != wq->wq_creator);
- workq_threadreq_bind_and_unlock(p, wq, req, uth);
+ if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
+ _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
+ }
+ /*
+ * We're called from workq_kern_threadreq_initiate()
+ * due to an unbind, with the kq req held.
+ */
+ WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
+ workq_trace_req_id(req), 0, 0, 0);
+ wq->wq_fulfilled++;
+ kqueue_threadreq_bind(p, req, uth->uu_thread, 0);
} else {
if (workloop_ts) {
workq_perform_turnstile_operation_locked(wq, ^{
turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
- TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
+ TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
turnstile_update_inheritor_complete(workloop_ts,
- TURNSTILE_INTERLOCK_HELD);
+ TURNSTILE_INTERLOCK_HELD);
});
}
if (workq_threadreq_enqueue(wq, req)) {
workq_schedule_creator(p, wq, flags);
}
- workq_unlock(wq);
}
+ workq_unlock(wq);
+
return true;
}
void
-workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr,
- thread_qos_t qos, int flags)
+workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
+ thread_qos_t qos, workq_kern_threadreq_flags_t flags)
{
struct workqueue *wq = proc_get_wqptr_fast(p);
- workq_threadreq_t req = &kqr->kqr_req;
- bool change_overcommit = false;
+ bool make_overcommit = false;
- if (req->tr_flags & TR_FLAG_WL_OUTSIDE_QOS) {
+ if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
/* Requests outside-of-QoS shouldn't accept modify operations */
return;
}
workq_lock_spin(wq);
assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
- assert(req->tr_flags & (TR_FLAG_KEVENT | TR_FLAG_WORKLOOP));
+ assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
- if (req->tr_state == TR_STATE_BINDING) {
- kqueue_threadreq_bind(p, req, req->tr_binding_thread, 0);
+ if (req->tr_state == WORKQ_TR_STATE_BINDING) {
+ kqueue_threadreq_bind(p, req, req->tr_thread, 0);
workq_unlock(wq);
return;
}
- change_overcommit = (bool)(kqr->kqr_state & KQR_THOVERCOMMIT) !=
- (bool)(req->tr_flags & TR_FLAG_OVERCOMMIT);
+ if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
+ make_overcommit = (req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0;
+ }
- if (_wq_exiting(wq) || (req->tr_qos == qos && !change_overcommit)) {
+ if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
workq_unlock(wq);
return;
}
assert(req->tr_count == 1);
- if (req->tr_state != TR_STATE_QUEUED) {
+ if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
panic("Invalid thread request (%p) state %d", req, req->tr_state);
}
WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
- workq_trace_req_id(req), qos, 0, 0);
+ workq_trace_req_id(req), qos, 0, 0);
- struct priority_queue *pq = workq_priority_queue_for_req(wq, req);
+ struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
workq_threadreq_t req_max;
/*
* If we dequeue the root item of the constrained priority queue,
* maintain the best constrained request qos invariant.
*/
- if (priority_queue_remove(pq, &req->tr_entry,
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) {
- if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
+ if (priority_queue_remove(pq, &req->tr_entry)) {
+ if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
_wq_thactive_refresh_best_constrained_req_qos(wq);
}
}
* If the item will not become the root of the priority queue it belongs to,
* then we need to wait in line, just enqueue and return quickly.
*/
- if (__improbable(change_overcommit)) {
- req->tr_flags ^= TR_FLAG_OVERCOMMIT;
+ if (__improbable(make_overcommit)) {
+ req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
pq = workq_priority_queue_for_req(wq, req);
}
req->tr_qos = qos;
req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
if (req_max && req_max->tr_qos >= qos) {
- priority_queue_insert(pq, &req->tr_entry, workq_priority_for_req(req),
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
+ priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
+ workq_priority_for_req(req), false);
+ priority_queue_insert(pq, &req->tr_entry);
workq_unlock(wq);
return;
}
*
* Pretend the thread request is new again:
* - adjust wq_reqcount to not count it anymore.
- * - make its state TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
+ * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
* properly attempts a synchronous bind)
*/
wq->wq_reqcount--;
- req->tr_state = TR_STATE_NEW;
+ req->tr_state = WORKQ_TR_STATE_NEW;
if (workq_threadreq_enqueue(wq, req)) {
workq_schedule_creator(p, wq, flags);
}
}
void
-workq_kern_threadreq_update_inheritor(struct proc *p, struct kqrequest *kqr,
- thread_t owner, struct turnstile *wl_ts,
- turnstile_update_flags_t flags)
+workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
+ thread_t owner, struct turnstile *wl_ts,
+ turnstile_update_flags_t flags)
{
struct workqueue *wq = proc_get_wqptr_fast(p);
- workq_threadreq_t req = &kqr->kqr_req;
turnstile_inheritor_t inheritor;
assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
- assert(req->tr_flags & TR_FLAG_WORKLOOP);
+ assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
workq_lock_held(wq);
- if (req->tr_state == TR_STATE_BINDING) {
- kqueue_threadreq_bind(p, req, req->tr_binding_thread,
- KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE);
+ if (req->tr_state == WORKQ_TR_STATE_BINDING) {
+ kqueue_threadreq_bind(p, req, req->tr_thread,
+ KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE);
return;
}
if (_wq_exiting(wq)) {
inheritor = TURNSTILE_INHERITOR_NULL;
} else {
- if (req->tr_state != TR_STATE_QUEUED) {
+ if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
panic("Invalid thread request (%p) state %d", req, req->tr_state);
}
}
void
-workq_kern_threadreq_redrive(struct proc *p, int flags)
+workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
{
struct workqueue *wq = proc_get_wqptr_fast(p);
void
workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
{
- if (!locked) workq_lock_spin(wq);
- workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_CREATOR_SYNC_UPDATE);
- if (!locked) workq_unlock(wq);
+ if (locked) {
+ workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
+ } else {
+ workq_schedule_immediate_thread_creation(wq);
+ }
}
static int
workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
- struct workqueue *wq)
+ struct workqueue *wq)
{
thread_t th = current_thread();
struct uthread *uth = get_bsdthread_info(th);
- struct kqrequest *kqr = uth->uu_kqr_bound;
+ workq_threadreq_t kqr = uth->uu_kqr_bound;
workq_threadreq_param_t trp = { };
int nevents = uap->affinity, error;
user_addr_t eventlist = uap->item;
if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
- (uth->uu_workq_flags & UT_WORKQ_DYING)) {
+ (uth->uu_workq_flags & UT_WORKQ_DYING)) {
return EINVAL;
}
proc_unlock(p);
}
- if (kqr && kqr->kqr_req.tr_flags & TR_FLAG_WL_PARAMS) {
+ if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
/*
* Ensure we store the threadreq param before unbinding
* the kqr from this thread.
*/
- trp = kqueue_threadreq_workloop_param(&kqr->kqr_req);
+ trp = kqueue_threadreq_workloop_param(kqr);
}
+ /*
+ * Freeze thee base pri while we decide the fate of this thread.
+ *
+ * Either:
+ * - we return to user and kevent_cleanup will have unfrozen the base pri,
+ * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
+ */
+ thread_freeze_base_pri(th);
+
if (kqr) {
uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
- if (kqr->kqr_state & KQR_WORKLOOP) {
+ if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
} else {
upcall_flags |= WQ_FLAG_THREAD_KEVENT;
upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
} else {
upcall_flags |= uth->uu_workq_pri.qos_req |
- WQ_FLAG_THREAD_PRIO_QOS;
+ WQ_FLAG_THREAD_PRIO_QOS;
}
}
error = pthread_functions->workq_handle_stack_events(p, th,
- get_task_map(p->task), uth->uu_workq_stackaddr,
- uth->uu_workq_thport, eventlist, nevents, upcall_flags);
- if (error) return error;
+ get_task_map(p->task), uth->uu_workq_stackaddr,
+ uth->uu_workq_thport, eventlist, nevents, upcall_flags);
+ if (error) {
+ assert(uth->uu_kqr_bound == kqr);
+ return error;
+ }
// pthread is supposed to pass KEVENT_FLAG_PARKING here
// which should cause the above call to either:
workq_lock_spin(wq);
WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
- workq_select_threadreq_or_park_and_unlock(p, wq, uth);
+ workq_select_threadreq_or_park_and_unlock(p, wq, uth,
+ WQ_SETUP_CLEAR_VOUCHER);
__builtin_unreachable();
}
* arg3 = kevent support
*/
int offset = arg2;
- if (arg3 & 0x01){
+ if (arg3 & 0x01) {
// If we get here, then userspace has indicated support for kevent delivery.
}
*/
if (pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
- _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
+ _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
} else {
thread_qos_t qos = _pthread_priority_thread_qos(pri);
int relpri = _pthread_priority_relpri(pri);
if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
- qos == THREAD_QOS_UNSPECIFIED) {
+ qos == THREAD_QOS_UNSPECIFIED) {
error = EINVAL;
break;
}
thread_t th = current_thread();
struct uthread *uth = get_bsdthread_info(th);
if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
- (uth->uu_workq_flags & (UT_WORKQ_DYING|UT_WORKQ_OVERCOMMIT))) {
+ (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
error = EINVAL;
break;
}
*retval = should_narrow;
break;
}
+ case WQOPS_SETUP_DISPATCH: {
+ /*
+ * item = pointer to workq_dispatch_config structure
+ * arg2 = sizeof(item)
+ */
+ struct workq_dispatch_config cfg;
+ bzero(&cfg, sizeof(cfg));
+
+ error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
+ if (error) {
+ break;
+ }
+
+ if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
+ cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
+ error = ENOTSUP;
+ break;
+ }
+
+ /* Load fields from version 1 */
+ p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
+
+ /* Load fields from version 2 */
+ if (cfg.wdc_version >= 2) {
+ p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
+ }
+
+ break;
+ }
default:
error = EINVAL;
break;
}
- return (error);
+ return error;
}
/*
*/
__attribute__((noreturn, noinline))
static void
-workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth)
+workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
+ uint32_t setup_flags)
{
assert(uth == current_uthread());
assert(uth->uu_kqr_bound == NULL);
- workq_push_idle_thread(p, wq, uth); // may not return
+ workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
workq_thread_reset_cpupercent(NULL, uth);
- if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
+ if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
+ !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
workq_unlock(wq);
/*
*/
if (!uth->uu_save.uus_workq_park_data.has_stack) {
pthread_functions->workq_markfree_threadstack(p, uth->uu_thread,
- get_task_map(p->task), uth->uu_workq_stackaddr);
+ get_task_map(p->task), uth->uu_workq_stackaddr);
}
/*
workq_lock_spin(wq);
uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
+ setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
}
+ WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
+
if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
/*
* While we'd dropped the lock to unset our voucher, someone came
* event their thread_wakeup() was ineffectual. To correct for that,
* we just run the continuation ourselves.
*/
- WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
- workq_select_threadreq_or_park_and_unlock(p, wq, uth);
+ workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
__builtin_unreachable();
}
if (uth->uu_workq_flags & UT_WORKQ_DYING) {
workq_unpark_for_death_and_unlock(p, wq, uth,
- WORKQ_UNPARK_FOR_DEATH_WAS_IDLE);
+ WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
__builtin_unreachable();
}
thread_set_pending_block_hint(uth->uu_thread, kThreadWaitParkedWorkQueue);
assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
workq_unlock(wq);
- WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
thread_block(workq_unpark_continue);
__builtin_unreachable();
}
* - we are re-using the event manager
*/
return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
- (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
+ (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
}
static uint32_t
workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
- struct uthread *uth, bool may_start_timer)
+ struct uthread *uth, bool may_start_timer)
{
assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
uint32_t count = 0;
}
if (max_count >= wq_max_constrained_threads) {
WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
- wq->wq_constrained_threads_scheduled,
- wq_max_constrained_threads, 0);
+ wq->wq_constrained_threads_scheduled,
+ wq_max_constrained_threads, 0);
/*
* we need 1 or more constrained threads to return to the kernel before
* we can dispatch additional work
uint32_t busycount, thactive_count;
thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
- at_qos, &busycount, NULL);
+ at_qos, &busycount, NULL);
if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
- at_qos <= uth->uu_workq_pri.qos_bucket) {
+ at_qos <= uth->uu_workq_pri.qos_bucket) {
/*
* Don't count this thread as currently active, but only if it's not
* a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
if (count > thactive_count + busycount) {
count -= thactive_count + busycount;
WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
- thactive_count, busycount, 0);
+ thactive_count, busycount, 0);
return MIN(count, max_count);
} else {
WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
- thactive_count, busycount, 0);
+ thactive_count, busycount, 0);
}
if (busycount && may_start_timer) {
static bool
workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
- workq_threadreq_t req)
+ workq_threadreq_t req)
{
if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
return workq_may_start_event_mgr_thread(wq, uth);
}
- if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
+ if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
return workq_constrained_allowance(wq, req->tr_qos, uth, true);
}
return true;
static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue *wq)
{
- workq_threadreq_t req_qos, req_pri, req_tmp;
+ workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
uint8_t pri = 0;
- req_tmp = wq->wq_event_manager_threadreq;
- if (req_tmp && workq_may_start_event_mgr_thread(wq, NULL)) {
- return req_tmp;
- }
-
/*
* Compute the best priority request, and ignore the turnstile for now
*/
req_pri = priority_queue_max(&wq->wq_special_queue,
- struct workq_threadreq_s, tr_entry);
+ struct workq_threadreq_s, tr_entry);
if (req_pri) {
- pri = priority_queue_entry_key(&wq->wq_special_queue, &req_pri->tr_entry);
+ pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
+ &req_pri->tr_entry);
+ }
+
+ /*
+ * Handle the manager thread request. The special queue might yield
+ * a higher priority, but the manager always beats the QoS world.
+ */
+
+ req_mgr = wq->wq_event_manager_threadreq;
+ if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
+ uint32_t mgr_pri = wq->wq_event_manager_priority;
+
+ if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
+ mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
+ } else {
+ mgr_pri = thread_workq_pri_for_qos(
+ _pthread_priority_thread_qos(mgr_pri));
+ }
+
+ return mgr_pri >= pri ? req_mgr : req_pri;
}
/*
*/
req_qos = priority_queue_max(&wq->wq_overcommit_queue,
- struct workq_threadreq_s, tr_entry);
+ struct workq_threadreq_s, tr_entry);
if (req_qos) {
qos = req_qos->tr_qos;
}
req_tmp = priority_queue_max(&wq->wq_constrained_queue,
- struct workq_threadreq_s, tr_entry);
+ struct workq_threadreq_s, tr_entry);
if (req_tmp && qos < req_tmp->tr_qos) {
if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
static workq_threadreq_t
workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
{
- workq_threadreq_t req_qos, req_pri, req_tmp;
+ workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
uintptr_t proprietor;
thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
uint8_t pri = 0;
- if (uth == wq->wq_creator) uth = NULL;
-
- req_tmp = wq->wq_event_manager_threadreq;
- if (req_tmp && workq_may_start_event_mgr_thread(wq, uth)) {
- return req_tmp;
+ if (uth == wq->wq_creator) {
+ uth = NULL;
}
/*
* Compute the best priority request (special or turnstile)
*/
- pri = turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
- &proprietor);
+ pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
+ &proprietor);
if (pri) {
struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
- req_pri = &kqwl->kqwl_request.kqr_req;
- if (req_pri->tr_state != TR_STATE_QUEUED) {
+ req_pri = &kqwl->kqwl_request;
+ if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
panic("Invalid thread request (%p) state %d",
- req_pri, req_pri->tr_state);
+ req_pri, req_pri->tr_state);
}
} else {
req_pri = NULL;
}
req_tmp = priority_queue_max(&wq->wq_special_queue,
- struct workq_threadreq_s, tr_entry);
- if (req_tmp && pri < priority_queue_entry_key(&wq->wq_special_queue,
- &req_tmp->tr_entry)) {
+ struct workq_threadreq_s, tr_entry);
+ if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
+ &req_tmp->tr_entry)) {
req_pri = req_tmp;
- pri = priority_queue_entry_key(&wq->wq_special_queue, &req_tmp->tr_entry);
+ pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
+ &req_tmp->tr_entry);
+ }
+
+ /*
+ * Handle the manager thread request. The special queue might yield
+ * a higher priority, but the manager always beats the QoS world.
+ */
+
+ req_mgr = wq->wq_event_manager_threadreq;
+ if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
+ uint32_t mgr_pri = wq->wq_event_manager_priority;
+
+ if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
+ mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
+ } else {
+ mgr_pri = thread_workq_pri_for_qos(
+ _pthread_priority_thread_qos(mgr_pri));
+ }
+
+ return mgr_pri >= pri ? req_mgr : req_pri;
}
/*
*/
req_qos = priority_queue_max(&wq->wq_overcommit_queue,
- struct workq_threadreq_s, tr_entry);
+ struct workq_threadreq_s, tr_entry);
if (req_qos) {
qos = req_qos->tr_qos;
}
req_tmp = priority_queue_max(&wq->wq_constrained_queue,
- struct workq_threadreq_s, tr_entry);
+ struct workq_threadreq_s, tr_entry);
if (req_tmp && qos < req_tmp->tr_qos) {
if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
* efficient scheduling and reduced context switches.
*/
static void
-workq_schedule_creator(proc_t p, struct workqueue *wq, int flags)
+workq_schedule_creator(proc_t p, struct workqueue *wq,
+ workq_kern_threadreq_flags_t flags)
{
workq_threadreq_t req;
struct uthread *uth;
+ bool needs_wakeup;
workq_lock_held(wq);
assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
uth = wq->wq_creator;
if (!wq->wq_reqcount) {
+ /*
+ * There is no thread request left.
+ *
+ * If there is a creator, leave everything in place, so that it cleans
+ * up itself in workq_push_idle_thread().
+ *
+ * Else, make sure the turnstile state is reset to no inheritor.
+ */
if (uth == NULL) {
workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
}
req = workq_threadreq_select_for_creator(wq);
if (req == NULL) {
- if (flags & WORKQ_THREADREQ_CREATOR_SYNC_UPDATE) {
- assert((flags & WORKQ_THREADREQ_CREATOR_TRANSFER) == 0);
- /*
- * turnstile propagation code is reaching out to us,
- * and we still don't want to do anything, do not recurse.
- */
- } else {
+ /*
+ * There isn't a thread request that passes the admission check.
+ *
+ * If there is a creator, do not touch anything, the creator will sort
+ * it out when it runs.
+ *
+ * Else, set the inheritor to "WORKQ" so that the turnstile propagation
+ * code calls us if anything changes.
+ */
+ if (uth == NULL) {
workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
}
return;
*/
if (workq_thread_needs_priority_change(req, uth)) {
WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
- wq, 1, thread_tid(uth->uu_thread), req->tr_qos, 0);
- workq_thread_reset_pri(wq, uth, req);
+ wq, 1, thread_tid(uth->uu_thread), req->tr_qos, 0);
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
}
+ assert(wq->wq_inheritor == uth->uu_thread);
} else if (wq->wq_thidlecount) {
/*
* We need to unpark a creator thread
*/
- wq->wq_creator = uth = workq_pop_idle_thread(wq);
- if (workq_thread_needs_priority_change(req, uth)) {
- workq_thread_reset_pri(wq, uth, req);
- }
+ wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
+ &needs_wakeup);
+ /* Always reset the priorities on the newly chosen creator */
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
workq_turnstile_update_inheritor(wq, uth->uu_thread,
- TURNSTILE_INHERITOR_THREAD);
+ TURNSTILE_INHERITOR_THREAD);
WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
- wq, 2, thread_tid(uth->uu_thread), req->tr_qos, 0);
+ wq, 2, thread_tid(uth->uu_thread), req->tr_qos, 0);
uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
uth->uu_save.uus_workq_park_data.yields = 0;
- workq_thread_wakeup(uth);
+ if (needs_wakeup) {
+ workq_thread_wakeup(uth);
+ }
} else {
/*
* We need to allocate a thread...
*/
if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
/* out of threads, just go away */
+ flags = WORKQ_THREADREQ_NONE;
} else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
} else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
workq_schedule_delayed_thread_creation(wq, 0);
}
- if (flags & WORKQ_THREADREQ_CREATOR_TRANSFER) {
- /*
- * workq_schedule_creator() failed at creating a thread,
- * and the responsibility of redriving is now with a thread-call.
- *
- * We still need to tell the turnstile the previous creator is gone.
- */
- workq_turnstile_update_inheritor(wq, NULL, 0);
+ /*
+ * If the current thread is the inheritor:
+ *
+ * If we set the AST, then the thread will stay the inheritor until
+ * either the AST calls workq_kern_threadreq_redrive(), or it parks
+ * and calls workq_push_idle_thread().
+ *
+ * Else, the responsibility of the thread creation is with a thread-call
+ * and we need to clear the inheritor.
+ */
+ if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
+ wq->wq_inheritor == current_thread()) {
+ workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
}
}
}
+/**
+ * Same as workq_unpark_select_threadreq_or_park_and_unlock,
+ * but do not allow early binds.
+ *
+ * Called with the base pri frozen, will unfreeze it.
+ */
+__attribute__((noreturn, noinline))
+static void
+workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
+ struct uthread *uth, uint32_t setup_flags)
+{
+ workq_threadreq_t req = NULL;
+ bool is_creator = (wq->wq_creator == uth);
+ bool schedule_creator = false;
+
+ if (__improbable(_wq_exiting(wq))) {
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
+ goto park;
+ }
+
+ if (wq->wq_reqcount == 0) {
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0, 0);
+ goto park;
+ }
+
+ req = workq_threadreq_select(wq, uth);
+ if (__improbable(req == NULL)) {
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0, 0);
+ goto park;
+ }
+
+ uint8_t tr_flags = req->tr_flags;
+ struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
+
+ /*
+ * Attempt to setup ourselves as the new thing to run, moving all priority
+ * pushes to ourselves.
+ *
+ * If the current thread is the creator, then the fact that we are presently
+ * running is proof that we'll do something useful, so keep going.
+ *
+ * For other cases, peek at the AST to know whether the scheduler wants
+ * to preempt us, if yes, park instead, and move the thread request
+ * turnstile back to the workqueue.
+ */
+ if (req_ts) {
+ workq_perform_turnstile_operation_locked(wq, ^{
+ turnstile_update_inheritor(req_ts, uth->uu_thread,
+ TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
+ turnstile_update_inheritor_complete(req_ts,
+ TURNSTILE_INTERLOCK_HELD);
+ });
+ }
+
+ if (is_creator) {
+ WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
+ uth->uu_save.uus_workq_park_data.yields, 0);
+ wq->wq_creator = NULL;
+ _wq_thactive_inc(wq, req->tr_qos);
+ wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
+ } else if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
+ _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
+ }
+
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
+
+ thread_unfreeze_base_pri(uth->uu_thread);
+#if 0 // <rdar://problem/55259863> to turn this back on
+ if (__improbable(thread_unfreeze_base_pri(uth->uu_thread) && !is_creator)) {
+ if (req_ts) {
+ workq_perform_turnstile_operation_locked(wq, ^{
+ turnstile_update_inheritor(req_ts, wq->wq_turnstile,
+ TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
+ turnstile_update_inheritor_complete(req_ts,
+ TURNSTILE_INTERLOCK_HELD);
+ });
+ }
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0, 0);
+ goto park_thawed;
+ }
+#endif
+
+ /*
+ * We passed all checks, dequeue the request, bind to it, and set it up
+ * to return to user.
+ */
+ WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
+ workq_trace_req_id(req), 0, 0, 0);
+ wq->wq_fulfilled++;
+ schedule_creator = workq_threadreq_dequeue(wq, req);
+
+ if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
+ kqueue_threadreq_bind_prepost(p, req, uth);
+ req = NULL;
+ } else if (req->tr_count > 0) {
+ req = NULL;
+ }
+
+ workq_thread_reset_cpupercent(req, uth);
+ if (uth->uu_workq_flags & UT_WORKQ_NEW) {
+ uth->uu_workq_flags ^= UT_WORKQ_NEW;
+ setup_flags |= WQ_SETUP_FIRST_USE;
+ }
+ if (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) == 0) {
+ uth->uu_workq_flags |= UT_WORKQ_OVERCOMMIT;
+ wq->wq_constrained_threads_scheduled--;
+ }
+ } else {
+ if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0) {
+ uth->uu_workq_flags &= ~UT_WORKQ_OVERCOMMIT;
+ wq->wq_constrained_threads_scheduled++;
+ }
+ }
+
+ if (is_creator || schedule_creator) {
+ /* This can drop the workqueue lock, and take it again */
+ workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
+ }
+
+ workq_unlock(wq);
+
+ if (req) {
+ zfree(workq_zone_threadreq, req);
+ }
+
+ /*
+ * Run Thread, Run!
+ */
+ uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
+ if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
+ upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
+ } else if (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
+ }
+ if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
+ upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+ }
+ if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
+ }
+ uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
+
+ if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
+ kqueue_threadreq_bind_commit(p, uth->uu_thread);
+ }
+ workq_setup_and_run(p, uth, setup_flags);
+ __builtin_unreachable();
+
+park:
+ thread_unfreeze_base_pri(uth->uu_thread);
+#if 0 // <rdar://problem/55259863>
+park_thawed:
+#endif
+ workq_park_and_unlock(p, wq, uth, setup_flags);
+}
+
/**
* Runs a thread request on a thread
*
* Either way, the thread request object serviced will be moved to state
* BINDING and attached to the uthread.
*
- * Should be called with the workqueue lock held. Will drop it.
+ * Should be called with the workqueue lock held. Will drop it.
+ * Should be called with the base pri not frozen.
*/
__attribute__((noreturn, noinline))
static void
-workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
- struct uthread *uth)
+workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
+ struct uthread *uth, uint32_t setup_flags)
{
- uint32_t setup_flags = 0;
- workq_threadreq_t req;
-
if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
if (uth->uu_workq_flags & UT_WORKQ_NEW) {
setup_flags |= WQ_SETUP_FIRST_USE;
/*
* This pointer is possibly freed and only used for tracing purposes.
*/
- req = uth->uu_save.uus_workq_park_data.thread_request;
+ workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
workq_unlock(wq);
WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
- VM_KERNEL_ADDRHIDE(req), 0, 0, 0);
- goto run;
- } else if (_wq_exiting(wq)) {
- WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
- } else if (wq->wq_reqcount == 0) {
- WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0, 0);
- } else if ((req = workq_threadreq_select(wq, uth)) == NULL) {
- WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0, 0);
- } else {
- WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
- workq_trace_req_id(req), 0, 0, 0);
- if (uth->uu_workq_flags & UT_WORKQ_NEW) {
- uth->uu_workq_flags ^= UT_WORKQ_NEW;
- setup_flags |= WQ_SETUP_FIRST_USE;
- }
- workq_thread_reset_cpupercent(req, uth);
- workq_threadreq_bind_and_unlock(p, wq, req, uth);
-run:
+ VM_KERNEL_ADDRHIDE(req), 0, 0, 0);
+ (void)req;
workq_setup_and_run(p, uth, setup_flags);
__builtin_unreachable();
}
- workq_park_and_unlock(p, wq, uth);
- __builtin_unreachable();
+ thread_freeze_base_pri(uth->uu_thread);
+ workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
}
static bool
if (wq->wq_fulfilled - snapshot > conc) {
/* we fulfilled more than NCPU requests since being dispatched */
WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
- wq->wq_fulfilled, snapshot, 0);
+ wq->wq_fulfilled, snapshot, 0);
return true;
}
if (conc <= cnt) {
/* We fulfilled requests and have more than NCPU scheduled threads */
WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
- wq->wq_fulfilled, snapshot, 0);
+ wq->wq_fulfilled, snapshot, 0);
return true;
}
static void
workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
{
- struct uthread *uth = current_uthread();
+ thread_t th = current_thread();
+ struct uthread *uth = get_bsdthread_info(th);
proc_t p = current_proc();
struct workqueue *wq = proc_get_wqptr_fast(p);
}
if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
- workq_select_threadreq_or_park_and_unlock(p, wq, uth);
+ workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
__builtin_unreachable();
}
}
workq_unpark_for_death_and_unlock(p, wq, uth,
- WORKQ_UNPARK_FOR_DEATH_WAS_IDLE);
+ WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
__builtin_unreachable();
}
* anyway.
*/
upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
- WQ_FLAG_THREAD_PRIO_QOS;
+ WQ_FLAG_THREAD_PRIO_QOS;
}
if (uth->uu_workq_thport == MACH_PORT_NULL) {
* onto the stack, sets up the thread state and then returns to userspace.
*/
WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
- proc_get_wqptr_fast(p), 0, 0, 0, 0);
+ proc_get_wqptr_fast(p), 0, 0, 0, 0);
thread_sched_call(th, workq_sched_callback);
pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
- uth->uu_workq_thport, 0, setup_flags, upcall_flags);
+ uth->uu_workq_thport, 0, setup_flags, upcall_flags);
__builtin_unreachable();
}
{
struct workqueue *wq = proc_get_wqptr(p);
int error = 0;
- int activecount;
+ int activecount;
if (wq == NULL) {
return EINVAL;
wq_thactive_t act = _wq_thactive(wq);
activecount = _wq_thactive_aggregate_downto_qos(wq, act,
- WORKQ_THREAD_QOS_MIN, NULL, NULL);
+ WORKQ_THREAD_QOS_MIN, NULL, NULL);
if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
activecount++;
}
boolean_t
workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
- boolean_t *exceeded_constrained)
+ boolean_t *exceeded_constrained)
{
proc_t p = v;
struct proc_workqueueinfo pwqinfo;
workqueue_get_pwq_state_kdp(void * v)
{
static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
- kTaskWqExceededConstrainedThreadLimit);
+ kTaskWqExceededConstrainedThreadLimit);
static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
- kTaskWqExceededTotalThreadLimit);
+ kTaskWqExceededTotalThreadLimit);
static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
- WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
+ WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
if (v == NULL) {
return 0;
void
workq_init(void)
{
- workq_lck_grp_attr = lck_grp_attr_alloc_init();
- workq_lck_attr = lck_attr_alloc_init();
- workq_lck_grp = lck_grp_alloc_init("workq", workq_lck_grp_attr);
-
- workq_zone_workqueue = zinit(sizeof(struct workqueue),
- 1024 * sizeof(struct workqueue), 8192, "workq.wq");
- workq_zone_threadreq = zinit(sizeof(struct workq_threadreq_s),
- 1024 * sizeof(struct workq_threadreq_s), 8192, "workq.threadreq");
-
clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
- NSEC_PER_USEC, &wq_stalled_window.abstime);
+ NSEC_PER_USEC, &wq_stalled_window.abstime);
clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
- NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
+ NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
- NSEC_PER_USEC, &wq_max_timer_interval.abstime);
+ NSEC_PER_USEC, &wq_max_timer_interval.abstime);
+
+ thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
+ workq_deallocate_queue_invoke);
}