+
+/*
+ * Kernel-internal interface to yield for a specified period
+ *
+ * WARNING: Will still yield to priority 0 even if the thread is holding a contended lock!
+ */
+void
+thread_yield_internal(mach_msg_timeout_t ms)
+{
+ thread_t self = current_thread();
+
+ assert((self->sched_flags & TH_SFLAG_DEPRESSED_MASK) != TH_SFLAG_DEPRESSED_MASK);
+
+ processor_t myprocessor;
+
+ disable_preemption();
+ myprocessor = current_processor();
+ if (!SCHED(thread_should_yield)(myprocessor, self)) {
+ mp_enable_preemption();
+
+ return;
+ }
+ enable_preemption();
+
+ thread_depress_ms(ms);
+
+ thread_block_reason(THREAD_CONTINUE_NULL, NULL, AST_YIELD);
+
+ thread_depress_abort(self);
+}
+
+/*
+ * This yields to a possible non-urgent preemption pending on the current processor.
+ *
+ * This is useful when doing a long computation in the kernel without returning to userspace.
+ *
+ * As opposed to other yielding mechanisms, this does not drop the priority of the current thread.
+ */
+void
+thread_yield_to_preemption()
+{
+ /*
+ * ast_pending() should ideally be called with interrupts disabled, but
+ * the check here is fine because csw_check() will do the right thing.
+ */
+ ast_t *pending_ast = ast_pending();
+ ast_t ast = AST_NONE;
+ processor_t p;
+
+ if (*pending_ast & AST_PREEMPT) {
+ thread_t self = current_thread();
+
+ spl_t s = splsched();
+
+ p = current_processor();
+ thread_lock(self);
+ ast = csw_check(self, p, AST_YIELD);
+ ast_on(ast);
+ thread_unlock(self);
+
+ if (ast != AST_NONE) {
+ (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
+ }
+
+ splx(s);
+ }
+}