/*
- * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
workq_sysctl_handle_usecs, "I", "")
-static lck_grp_t *workq_lck_grp;
-static lck_attr_t *workq_lck_attr;
-static lck_grp_attr_t *workq_lck_grp_attr;
+static LCK_GRP_DECLARE(workq_lck_grp, "workq");
os_refgrp_decl(static, workq_refgrp, "workq", NULL);
+static ZONE_DECLARE(workq_zone_workqueue, "workq.wq",
+ sizeof(struct workqueue), ZC_NONE);
+static ZONE_DECLARE(workq_zone_threadreq, "workq.threadreq",
+ sizeof(struct workq_threadreq_s), ZC_CACHING);
+
static struct mpsc_daemon_queue workq_deallocate_queue;
-static zone_t workq_zone_workqueue;
-static zone_t workq_zone_threadreq;
WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
}
#define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
- ((tha) >> WQ_THACTIVE_QOS_SHIFT)
+ ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue *wq)
static inline void
workq_lock_spin(struct workqueue *wq)
{
- lck_spin_lock_grp(&wq->wq_lock, workq_lck_grp);
+ lck_spin_lock_grp(&wq->wq_lock, &workq_lck_grp);
}
static inline void
static inline bool
workq_lock_try(struct workqueue *wq)
{
- return lck_spin_try_lock_grp(&wq->wq_lock, workq_lck_grp);
+ return lck_spin_try_lock_grp(&wq->wq_lock, &workq_lck_grp);
}
static inline void
wq->wq_creations++;
wq->wq_thidlecount++;
- uth->uu_workq_stackaddr = th_stackaddr;
+ uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
return thread_workq_pri_for_qos(qos);
}
-static inline struct priority_queue *
+static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
{
if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
wq->wq_event_manager_threadreq = req;
return true;
}
- if (priority_queue_insert(workq_priority_queue_for_req(wq, req),
- &req->tr_entry, workq_priority_for_req(req),
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) {
+
+ struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
+ priority_queue_entry_set_sched_pri(q, &req->tr_entry,
+ workq_priority_for_req(req), false);
+
+ if (priority_queue_insert(q, &req->tr_entry)) {
if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
_wq_thactive_refresh_best_constrained_req_qos(wq);
}
return true;
}
if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
- &req->tr_entry, PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) {
+ &req->tr_entry)) {
if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
_wq_thactive_refresh_best_constrained_req_qos(wq);
}
} else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
wq->wq_timer_interval *= 2;
if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
- wq->wq_timer_interval = wq_max_timer_interval.abstime;
+ wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
}
} else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
wq->wq_timer_interval /= 2;
if (wq->wq_timer_interval < wq_stalled_window.abstime) {
- wq->wq_timer_interval = wq_stalled_window.abstime;
+ wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
}
}
turnstile_cleanup();
turnstile_deallocate(ts);
- lck_spin_destroy(&wq->wq_lock, workq_lck_grp);
+ lck_spin_destroy(&wq->wq_lock, &workq_lck_grp);
zfree(workq_zone_workqueue, wq);
}
}
if (wq_init_constrained_limit) {
- uint32_t limit, num_cpus = ml_get_max_cpus();
+ uint32_t limit, num_cpus = ml_wait_max_cpus();
/*
* set up the limit for the constrained pool
thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
wq->wq_event_manager_priority = (uint32_t)pp;
- wq->wq_timer_interval = wq_stalled_window.abstime;
+ wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
wq->wq_proc = p;
turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
TURNSTILE_WORKQS);
TAILQ_INIT(&wq->wq_thrunlist);
TAILQ_INIT(&wq->wq_thnewlist);
TAILQ_INIT(&wq->wq_thidlelist);
- priority_queue_init(&wq->wq_overcommit_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
- priority_queue_init(&wq->wq_constrained_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
- priority_queue_init(&wq->wq_special_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
+ priority_queue_init(&wq->wq_overcommit_queue);
+ priority_queue_init(&wq->wq_constrained_queue);
+ priority_queue_init(&wq->wq_special_queue);
wq->wq_delayed_call = thread_call_allocate_with_options(
workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
workq_kill_old_threads_call, wq,
THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
- lck_spin_init(&wq->wq_lock, workq_lck_grp, workq_lck_attr);
+ lck_spin_init(&wq->wq_lock, &workq_lck_grp, LCK_ATTR_NULL);
WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
VM_KERNEL_ADDRHIDE(wq), 0, 0, 0);
* It is hence safe to do the tear down without holding any lock.
*/
priority_queue_destroy(&wq->wq_overcommit_queue,
- struct workq_threadreq_s, tr_entry, ^(void *e){
+ struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
workq_threadreq_destroy(p, e);
});
priority_queue_destroy(&wq->wq_constrained_queue,
- struct workq_threadreq_s, tr_entry, ^(void *e){
+ struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
workq_threadreq_destroy(p, e);
});
priority_queue_destroy(&wq->wq_special_queue,
- struct workq_threadreq_s, tr_entry, ^(void *e){
+ struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
workq_threadreq_destroy(p, e);
});
}
old_pri = new_pri = uth->uu_workq_pri;
- new_pri.qos_req = new_policy.qos_tier;
+ new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
workq_unlock(wq);
}
goto exiting;
}
- req->tr_count = reqcount;
+ req->tr_count = (uint16_t)reqcount;
if (workq_threadreq_enqueue(wq, req)) {
/* This can drop the workqueue lock, and take it again */
workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
workq_trace_req_id(req), qos, 0, 0);
- struct priority_queue *pq = workq_priority_queue_for_req(wq, req);
+ struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
workq_threadreq_t req_max;
/*
* If we dequeue the root item of the constrained priority queue,
* maintain the best constrained request qos invariant.
*/
- if (priority_queue_remove(pq, &req->tr_entry,
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) {
+ if (priority_queue_remove(pq, &req->tr_entry)) {
if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
_wq_thactive_refresh_best_constrained_req_qos(wq);
}
req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
if (req_max && req_max->tr_qos >= qos) {
- priority_queue_insert(pq, &req->tr_entry, workq_priority_for_req(req),
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
+ priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
+ workq_priority_for_req(req), false);
+ priority_queue_insert(pq, &req->tr_entry);
workq_unlock(wq);
return;
}
setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
}
+ WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
+
if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
/*
* While we'd dropped the lock to unset our voucher, someone came
* event their thread_wakeup() was ineffectual. To correct for that,
* we just run the continuation ourselves.
*/
- WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
__builtin_unreachable();
}
thread_set_pending_block_hint(uth->uu_thread, kThreadWaitParkedWorkQueue);
assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
workq_unlock(wq);
- WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0);
thread_block(workq_unpark_continue);
__builtin_unreachable();
}
static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue *wq)
{
- workq_threadreq_t req_qos, req_pri, req_tmp;
+ workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
uint8_t pri = 0;
- req_tmp = wq->wq_event_manager_threadreq;
- if (req_tmp && workq_may_start_event_mgr_thread(wq, NULL)) {
- return req_tmp;
- }
-
/*
* Compute the best priority request, and ignore the turnstile for now
*/
req_pri = priority_queue_max(&wq->wq_special_queue,
struct workq_threadreq_s, tr_entry);
if (req_pri) {
- pri = priority_queue_entry_key(&wq->wq_special_queue, &req_pri->tr_entry);
+ pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
+ &req_pri->tr_entry);
+ }
+
+ /*
+ * Handle the manager thread request. The special queue might yield
+ * a higher priority, but the manager always beats the QoS world.
+ */
+
+ req_mgr = wq->wq_event_manager_threadreq;
+ if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
+ uint32_t mgr_pri = wq->wq_event_manager_priority;
+
+ if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
+ mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
+ } else {
+ mgr_pri = thread_workq_pri_for_qos(
+ _pthread_priority_thread_qos(mgr_pri));
+ }
+
+ return mgr_pri >= pri ? req_mgr : req_pri;
}
/*
static workq_threadreq_t
workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
{
- workq_threadreq_t req_qos, req_pri, req_tmp;
+ workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
uintptr_t proprietor;
thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
uint8_t pri = 0;
uth = NULL;
}
- req_tmp = wq->wq_event_manager_threadreq;
- if (req_tmp && workq_may_start_event_mgr_thread(wq, uth)) {
- return req_tmp;
- }
-
/*
* Compute the best priority request (special or turnstile)
*/
- pri = turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
+ pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
&proprietor);
if (pri) {
struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
req_tmp = priority_queue_max(&wq->wq_special_queue,
struct workq_threadreq_s, tr_entry);
- if (req_tmp && pri < priority_queue_entry_key(&wq->wq_special_queue,
+ if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
&req_tmp->tr_entry)) {
req_pri = req_tmp;
- pri = priority_queue_entry_key(&wq->wq_special_queue, &req_tmp->tr_entry);
+ pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
+ &req_tmp->tr_entry);
+ }
+
+ /*
+ * Handle the manager thread request. The special queue might yield
+ * a higher priority, but the manager always beats the QoS world.
+ */
+
+ req_mgr = wq->wq_event_manager_threadreq;
+ if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
+ uint32_t mgr_pri = wq->wq_event_manager_priority;
+
+ if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
+ mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
+ } else {
+ mgr_pri = thread_workq_pri_for_qos(
+ _pthread_priority_thread_qos(mgr_pri));
+ }
+
+ return mgr_pri >= pri ? req_mgr : req_pri;
}
/*
void
workq_init(void)
{
- workq_lck_grp_attr = lck_grp_attr_alloc_init();
- workq_lck_attr = lck_attr_alloc_init();
- workq_lck_grp = lck_grp_alloc_init("workq", workq_lck_grp_attr);
-
- workq_zone_workqueue = zinit(sizeof(struct workqueue),
- 1024 * sizeof(struct workqueue), 8192, "workq.wq");
- workq_zone_threadreq = zinit(sizeof(struct workq_threadreq_s),
- 1024 * sizeof(struct workq_threadreq_s), 8192, "workq.threadreq");
-
clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
NSEC_PER_USEC, &wq_stalled_window.abstime);
clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,