+
+/* Returns a +1 thread reference */
+thread_t
+port_name_to_thread_for_ulock(mach_port_name_t thread_name)
+{
+ thread_t thread = THREAD_NULL;
+ thread_t self = current_thread();
+
+ /*
+ * Translate the port name if supplied.
+ */
+ if (thread_name != MACH_PORT_NULL) {
+ ipc_port_t port;
+
+ if (ipc_port_translate_send(self->task->itk_space,
+ thread_name, &port) == KERN_SUCCESS) {
+ ip_reference(port);
+ ip_unlock(port);
+
+ thread = convert_port_to_thread(port);
+ ip_release(port);
+
+ if (thread == THREAD_NULL) {
+ return thread;
+ }
+
+ if ((thread == self) || (thread->task != self->task)) {
+ thread_deallocate(thread);
+ thread = THREAD_NULL;
+ }
+ }
+ }
+
+ return thread;
+}
+
+/* This function is called after an assert_wait(), therefore it must not
+ * cause another wait until after the thread_run() or thread_block()
+ *
+ * Consumes a ref on thread
+ */
+wait_result_t
+thread_handoff(thread_t thread)
+{
+ thread_t deallocate_thread = THREAD_NULL;
+ thread_t self = current_thread();
+
+ /*
+ * Try to handoff if supplied.
+ */
+ if (thread != THREAD_NULL) {
+ spl_t s = splsched();
+
+ thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
+ thread_tid(thread), thread->state,
+ pulled_thread ? TRUE : FALSE, 0, 0);
+
+ if (pulled_thread != THREAD_NULL) {
+ /* We can't be dropping the last ref here */
+ thread_deallocate_safe(thread);
+
+ int result = thread_run(self, THREAD_CONTINUE_NULL, NULL, pulled_thread);
+
+ splx(s);
+ return result;
+ }
+
+ splx(s);
+
+ deallocate_thread = thread;
+ thread = THREAD_NULL;
+ }
+
+ int result = thread_block(THREAD_CONTINUE_NULL);
+ if (deallocate_thread != THREAD_NULL) {
+ thread_deallocate(deallocate_thread);
+ }
+
+ return result;
+}
+
+/*
+ * Depress thread's priority to lowest possible for the specified interval,
+ * with a value of zero resulting in no timeout being scheduled.
+ */
+void
+thread_depress_abstime(
+ uint64_t interval)
+{
+ thread_t self = current_thread();
+ uint64_t deadline;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(self);
+ if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
+ processor_t myprocessor = self->last_processor;
+
+ self->sched_pri = DEPRESSPRI;
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+ (uintptr_t)thread_tid(self),
+ self->base_pri,
+ self->sched_pri,
+ 0, /* eventually, 'reason' */
+ 0);
+
+ myprocessor->current_pri = self->sched_pri;
+ myprocessor->current_perfctl_class = thread_get_perfcontrol_class(self);
+ self->sched_flags |= TH_SFLAG_DEPRESS;
+
+ if (interval != 0) {
+ clock_absolutetime_interval_to_deadline(interval, &deadline);
+ if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL))
+ self->depress_timer_active++;
+ }
+ }
+ thread_unlock(self);
+ splx(s);
+}
+
+void
+thread_depress_ms(
+ mach_msg_timeout_t interval)
+{
+ uint64_t abstime;
+
+ clock_interval_to_absolutetime_interval(
+ interval, NSEC_PER_MSEC, &abstime);
+ thread_depress_abstime(abstime);
+}
+
+/*
+ * Priority depression expiration.
+ */
+void
+thread_depress_expire(
+ void *p0,
+ __unused void *p1)
+{
+ thread_t thread = p0;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if (--thread->depress_timer_active == 0) {
+ thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
+ thread_recompute_sched_pri(thread, FALSE);
+ }
+ thread_unlock(thread);
+ splx(s);
+}
+
+/*
+ * Prematurely abort priority depression if there is one.
+ */
+kern_return_t
+thread_depress_abort_internal(
+ thread_t thread)
+{
+ kern_return_t result = KERN_NOT_DEPRESSED;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(thread);
+ if (!(thread->sched_flags & TH_SFLAG_POLLDEPRESS)) {
+ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
+ thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK;
+ thread_recompute_sched_pri(thread, FALSE);
+ result = KERN_SUCCESS;
+ }
+
+ if (timer_call_cancel(&thread->depress_timer))
+ thread->depress_timer_active--;
+ }
+ thread_unlock(thread);
+ splx(s);
+
+ return (result);
+}
+
+void
+thread_poll_yield(
+ thread_t self)
+{
+ spl_t s;
+
+ assert(self == current_thread());
+
+ s = splsched();
+ if (self->sched_mode == TH_MODE_FIXED) {
+ uint64_t total_computation, abstime;
+
+ abstime = mach_absolute_time();
+ total_computation = abstime - self->computation_epoch;
+ total_computation += self->computation_metered;
+ if (total_computation >= max_poll_computation) {
+ processor_t myprocessor = current_processor();
+ ast_t preempt;
+
+ thread_lock(self);
+ if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
+ self->sched_pri = DEPRESSPRI;
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+ (uintptr_t)thread_tid(self),
+ self->base_pri,
+ self->sched_pri,
+ 0, /* eventually, 'reason' */
+ 0);
+
+ myprocessor->current_pri = self->sched_pri;
+ myprocessor->current_perfctl_class = thread_get_perfcontrol_class(self);
+ }
+ self->computation_epoch = abstime;
+ self->computation_metered = 0;
+ self->sched_flags |= TH_SFLAG_POLLDEPRESS;
+
+ abstime += (total_computation >> sched_poll_yield_shift);
+ if (!timer_call_enter(&self->depress_timer, abstime, TIMER_CALL_USER_CRITICAL))
+ self->depress_timer_active++;
+
+ if ((preempt = csw_check(myprocessor, AST_NONE)) != AST_NONE)
+ ast_on(preempt);
+
+ thread_unlock(self);
+ }
+ }
+ splx(s);
+}
+
+
+void
+thread_yield_internal(
+ mach_msg_timeout_t ms)
+{
+ processor_t myprocessor;
+
+ disable_preemption();
+ myprocessor = current_processor();
+ if (!SCHED(thread_should_yield)(myprocessor, current_thread())) {
+ mp_enable_preemption();
+
+ return;
+ }
+ enable_preemption();
+
+ thread_depress_ms(ms);
+
+ thread_block_reason(THREAD_CONTINUE_NULL, NULL, AST_YIELD);
+
+ thread_depress_abort_internal(current_thread());
+}
+
+/*
+ * This yields to a possible non-urgent preemption pending on the current processor.
+ *
+ * This is useful when doing a long computation in the kernel without returning to userspace.
+ *
+ * As opposed to other yielding mechanisms, this does not drop the priority of the current thread.
+ */
+void
+thread_yield_to_preemption()
+{
+ /*
+ * ast_pending() should ideally be called with interrupts disabled, but
+ * the check here is fine because csw_check() will do the right thing.
+ */
+ ast_t *pending_ast = ast_pending();
+ ast_t ast = AST_NONE;
+ processor_t p;
+
+ if (*pending_ast & AST_PREEMPT) {
+ thread_t self = current_thread();
+
+ spl_t s = splsched();
+
+ p = current_processor();
+ thread_lock(self);
+ ast = csw_check(p, AST_YIELD);
+ ast_on(ast);
+ thread_unlock(self);
+
+ if (ast != AST_NONE) {
+ (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
+ }
+
+ splx(s);
+ }
+}
+