+/**
+ * Same as workq_unpark_select_threadreq_or_park_and_unlock,
+ * but do not allow early binds.
+ *
+ * Called with the base pri frozen, will unfreeze it.
+ */
+__attribute__((noreturn, noinline))
+static void
+workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
+ struct uthread *uth, uint32_t setup_flags)
+{
+ workq_threadreq_t req = NULL;
+ bool is_creator = (wq->wq_creator == uth);
+ bool schedule_creator = false;
+
+ if (__improbable(_wq_exiting(wq))) {
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
+ goto park;
+ }
+
+ if (wq->wq_reqcount == 0) {
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0, 0);
+ goto park;
+ }
+
+ req = workq_threadreq_select(wq, uth);
+ if (__improbable(req == NULL)) {
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0, 0);
+ goto park;
+ }
+
+ uint8_t tr_flags = req->tr_flags;
+ struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
+
+ /*
+ * Attempt to setup ourselves as the new thing to run, moving all priority
+ * pushes to ourselves.
+ *
+ * If the current thread is the creator, then the fact that we are presently
+ * running is proof that we'll do something useful, so keep going.
+ *
+ * For other cases, peek at the AST to know whether the scheduler wants
+ * to preempt us, if yes, park instead, and move the thread request
+ * turnstile back to the workqueue.
+ */
+ if (req_ts) {
+ workq_perform_turnstile_operation_locked(wq, ^{
+ turnstile_update_inheritor(req_ts, uth->uu_thread,
+ TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
+ turnstile_update_inheritor_complete(req_ts,
+ TURNSTILE_INTERLOCK_HELD);
+ });
+ }
+
+ if (is_creator) {
+ WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
+ uth->uu_save.uus_workq_park_data.yields, 0);
+ wq->wq_creator = NULL;
+ _wq_thactive_inc(wq, req->tr_qos);
+ wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
+ } else if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
+ _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
+ }
+
+ workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
+
+ thread_unfreeze_base_pri(uth->uu_thread);
+#if 0 // <rdar://problem/55259863> to turn this back on
+ if (__improbable(thread_unfreeze_base_pri(uth->uu_thread) && !is_creator)) {
+ if (req_ts) {
+ workq_perform_turnstile_operation_locked(wq, ^{
+ turnstile_update_inheritor(req_ts, wq->wq_turnstile,
+ TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
+ turnstile_update_inheritor_complete(req_ts,
+ TURNSTILE_INTERLOCK_HELD);
+ });
+ }
+ WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0, 0);
+ goto park_thawed;
+ }
+#endif
+
+ /*
+ * We passed all checks, dequeue the request, bind to it, and set it up
+ * to return to user.
+ */
+ WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
+ workq_trace_req_id(req), 0, 0, 0);
+ wq->wq_fulfilled++;
+ schedule_creator = workq_threadreq_dequeue(wq, req);
+
+ if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
+ kqueue_threadreq_bind_prepost(p, req, uth);
+ req = NULL;
+ } else if (req->tr_count > 0) {
+ req = NULL;
+ }
+
+ workq_thread_reset_cpupercent(req, uth);
+ if (uth->uu_workq_flags & UT_WORKQ_NEW) {
+ uth->uu_workq_flags ^= UT_WORKQ_NEW;
+ setup_flags |= WQ_SETUP_FIRST_USE;
+ }
+ if (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) == 0) {
+ uth->uu_workq_flags |= UT_WORKQ_OVERCOMMIT;
+ wq->wq_constrained_threads_scheduled--;
+ }
+ } else {
+ if ((uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0) {
+ uth->uu_workq_flags &= ~UT_WORKQ_OVERCOMMIT;
+ wq->wq_constrained_threads_scheduled++;
+ }
+ }
+
+ if (is_creator || schedule_creator) {
+ /* This can drop the workqueue lock, and take it again */
+ workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
+ }
+
+ workq_unlock(wq);
+
+ if (req) {
+ zfree(workq_zone_threadreq, req);
+ }
+
+ /*
+ * Run Thread, Run!
+ */
+ uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
+ if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
+ upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
+ } else if (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
+ upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
+ }
+ if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
+ upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+ }
+ if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
+ upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
+ }
+ uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
+
+ if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
+ kqueue_threadreq_bind_commit(p, uth->uu_thread);
+ }
+ workq_setup_and_run(p, uth, setup_flags);
+ __builtin_unreachable();
+
+park:
+ thread_unfreeze_base_pri(uth->uu_thread);
+#if 0 // <rdar://problem/55259863>
+park_thawed:
+#endif
+ workq_park_and_unlock(p, wq, uth, setup_flags);
+}
+