+ thread = convert_port_to_thread(port);
+ ip_release(port);
+
+ if (thread == self) {
+ thread_deallocate(thread);
+ thread = THREAD_NULL;
+ }
+ }
+ }
+
+ if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) {
+ if (thread != THREAD_NULL) {
+
+ if (thread->task != self->task) {
+ /*
+ * OSLock boosting only applies to other threads
+ * in your same task (even if you have a port for
+ * a thread in another task)
+ */
+
+ thread_deallocate(thread);
+ thread = THREAD_NULL;
+ } else {
+ /*
+ * Attempt to kick the lock owner up to our same IO throttling tier.
+ * If the thread is currently blocked in throttle_lowpri_io(),
+ * it will immediately break out.
+ *
+ * TODO: SFI break out?
+ */
+ int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO);
+
+ set_thread_iotier_override(thread, new_policy);
+ }
+ }
+ }
+
+ /*
+ * Try to handoff if supplied.
+ */
+ if (thread != THREAD_NULL) {
+ spl_t s = splsched();
+
+ /* This may return a different thread if the target is pushing on something */
+ thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
+ thread_tid(thread), thread->state,
+ pulled_thread ? TRUE : FALSE, 0, 0);
+
+ if (pulled_thread != THREAD_NULL) {
+ /* We can't be dropping the last ref here */
+ thread_deallocate_safe(thread);
+
+ if (wait_option)
+ assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
+ option_time, scale_factor);
+ else if (depress_option)
+ thread_depress_ms(option_time);
+
+ self->saved.swtch.option = option;
+ self->saved.swtch.reenable_workq_callback = reenable_workq_callback;
+
+ thread_run(self, (thread_continue_t)thread_switch_continue, NULL, pulled_thread);
+ /* NOTREACHED */
+ panic("returned from thread_run!");
+ }
+
+ splx(s);
+
+ thread_deallocate(thread);
+ }
+
+ if (wait_option)
+ assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, scale_factor);
+ else if (depress_option)
+ thread_depress_ms(option_time);
+
+ self->saved.swtch.option = option;
+ self->saved.swtch.reenable_workq_callback = reenable_workq_callback;
+
+ thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);
+
+ if (depress_option)
+ thread_depress_abort_internal(self);
+
+ if (reenable_workq_callback)
+ thread_switch_enable_workqueue_sched_callback();
+
+ return (KERN_SUCCESS);
+}
+
+/* Returns a +1 thread reference */
+thread_t
+port_name_to_thread_for_ulock(mach_port_name_t thread_name)
+{
+ thread_t thread = THREAD_NULL;
+ thread_t self = current_thread();
+
+ /*
+ * Translate the port name if supplied.
+ */
+ if (thread_name != MACH_PORT_NULL) {
+ ipc_port_t port;
+
+ if (ipc_port_translate_send(self->task->itk_space,
+ thread_name, &port) == KERN_SUCCESS) {
+ ip_reference(port);
+ ip_unlock(port);
+
+ thread = convert_port_to_thread(port);
+ ip_release(port);
+
+ if (thread == THREAD_NULL) {
+ return thread;
+ }
+
+ if ((thread == self) || (thread->task != self->task)) {
+ thread_deallocate(thread);
+ thread = THREAD_NULL;
+ }
+ }
+ }
+
+ return thread;
+}
+
+/* This function is called after an assert_wait(), therefore it must not
+ * cause another wait until after the thread_run() or thread_block()
+ *
+ * Consumes a ref on thread
+ */
+wait_result_t
+thread_handoff(thread_t thread)
+{
+ thread_t deallocate_thread = THREAD_NULL;
+ thread_t self = current_thread();
+
+ /*
+ * Try to handoff if supplied.
+ */
+ if (thread != THREAD_NULL) {
+ spl_t s = splsched();
+
+ thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
+ thread_tid(thread), thread->state,
+ pulled_thread ? TRUE : FALSE, 0, 0);
+
+ if (pulled_thread != THREAD_NULL) {
+ /* We can't be dropping the last ref here */
+ thread_deallocate_safe(thread);
+
+ int result = thread_run(self, THREAD_CONTINUE_NULL, NULL, pulled_thread);
+
+ splx(s);
+ return result;
+ }
+
+ splx(s);
+
+ deallocate_thread = thread;
+ thread = THREAD_NULL;
+ }
+
+ int result = thread_block(THREAD_CONTINUE_NULL);
+ if (deallocate_thread != THREAD_NULL) {
+ thread_deallocate(deallocate_thread);
+ }
+
+ return result;
+}
+
+/*
+ * Depress thread's priority to lowest possible for the specified interval,
+ * with a value of zero resulting in no timeout being scheduled.
+ */
+void
+thread_depress_abstime(
+ uint64_t interval)
+{
+ thread_t self = current_thread();
+ uint64_t deadline;
+ spl_t s;
+
+ s = splsched();
+ thread_lock(self);
+ if (!(self->sched_flags & TH_SFLAG_DEPRESSED_MASK)) {
+ processor_t myprocessor = self->last_processor;
+
+ self->sched_pri = DEPRESSPRI;
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
+ (uintptr_t)thread_tid(self),
+ self->base_pri,
+ self->sched_pri,
+ 0, /* eventually, 'reason' */
+ 0);
+
+ myprocessor->current_pri = self->sched_pri;
+ myprocessor->current_perfctl_class = thread_get_perfcontrol_class(self);
+ self->sched_flags |= TH_SFLAG_DEPRESS;
+
+ if (interval != 0) {
+ clock_absolutetime_interval_to_deadline(interval, &deadline);
+ if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL))
+ self->depress_timer_active++;