+ if (wait_option) {
+ assert_wait_timeout((event_t)assert_wait_timeout, interruptible, option_time, scale_factor);
+ } else {
+ disable_preemption();
+ bool should_yield = SCHED(thread_should_yield)(current_processor(), current_thread());
+ enable_preemption();
+
+ if (should_yield == false) {
+ /* Early-return if yielding to the scheduler will not be beneficial */
+ return KERN_SUCCESS;
+ }
+
+ if (depress_option) {
+ thread_depress_ms(option_time);
+ }
+ }
+
+ thread_yield_with_continuation(thread_switch_continue, (void *)(intptr_t)option);
+ __builtin_unreachable();
+}
+
+void
+thread_yield_with_continuation(
+ thread_continue_t continuation,
+ void *parameter)
+{
+ assert(continuation);
+ thread_block_reason(continuation, parameter, AST_YIELD);
+ __builtin_unreachable();
+}
+
+
+/* This function is called after an assert_wait(), therefore it must not
+ * cause another wait until after the thread_run() or thread_block()
+ *
+ *
+ * When called with a NULL continuation, the thread ref is consumed
+ * (thread_handoff_deallocate calling convention) else it is up to the
+ * continuation to do the cleanup (thread_handoff_parameter calling convention)
+ * and it instead doesn't return.
+ */
+static wait_result_t
+thread_handoff_internal(thread_t thread, thread_continue_t continuation,
+ void *parameter)
+{
+ thread_t deallocate_thread = THREAD_NULL;
+ thread_t self = current_thread();
+
+ /*
+ * Try to handoff if supplied.
+ */
+ if (thread != THREAD_NULL) {
+ spl_t s = splsched();
+
+ thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SWITCH) | DBG_FUNC_NONE,
+ thread_tid(thread), thread->state,
+ pulled_thread ? TRUE : FALSE, 0, 0);
+
+ if (pulled_thread != THREAD_NULL) {
+ if (continuation == NULL) {
+ /* We can't be dropping the last ref here */
+ thread_deallocate_safe(thread);
+ }
+
+ int result = thread_run(self, continuation, parameter, pulled_thread);
+
+ splx(s);
+ return result;
+ }
+
+ splx(s);
+
+ deallocate_thread = thread;
+ thread = THREAD_NULL;
+ }
+
+ int result = thread_block_parameter(continuation, parameter);
+ if (deallocate_thread != THREAD_NULL) {
+ thread_deallocate(deallocate_thread);
+ }
+
+ return result;
+}
+
+void
+thread_handoff_parameter(thread_t thread, thread_continue_t continuation,
+ void *parameter)
+{
+ thread_handoff_internal(thread, continuation, parameter);
+ panic("NULL continuation passed to %s", __func__);
+ __builtin_unreachable();
+}
+
+wait_result_t
+thread_handoff_deallocate(thread_t thread)
+{
+ return thread_handoff_internal(thread, NULL, NULL);
+}
+
+/*
+ * Thread depression
+ *
+ * This mechanism drops a thread to priority 0 in order for it to yield to
+ * all other runnnable threads on the system. It can be canceled or timed out,
+ * whereupon the thread goes back to where it was.
+ *
+ * Note that TH_SFLAG_DEPRESS and TH_SFLAG_POLLDEPRESS are never set at the
+ * same time. DEPRESS always defers to POLLDEPRESS.
+ *
+ * DEPRESS only lasts across a single thread_block call, and never returns
+ * to userspace.
+ * POLLDEPRESS can be active anywhere up until thread termination.
+ */
+
+/*
+ * Depress thread's priority to lowest possible for the specified interval,
+ * with an interval of zero resulting in no timeout being scheduled.
+ *
+ * Must block with AST_YIELD afterwards to take effect
+ */
+void
+thread_depress_abstime(uint64_t interval)
+{
+ thread_t self = current_thread();
+
+ spl_t s = splsched();