+
+/*
+ * thread_set_voucher_name - reset the voucher port name bound to this thread
+ *
+ * Conditions: nothing locked
+ *
+ * If we already converted the previous name to a cached voucher
+ * reference, then we discard that reference here. The next lookup
+ * will cache it again.
+ */
+
+kern_return_t
+thread_set_voucher_name(mach_port_name_t voucher_name)
+{
+ thread_t thread = current_thread();
+ ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
+ ipc_voucher_t voucher;
+ ledger_t bankledger = NULL;
+ thread_group_t banktg = NULL;
+
+ if (MACH_PORT_DEAD == voucher_name)
+ return KERN_INVALID_RIGHT;
+
+ /*
+ * agressively convert to voucher reference
+ */
+ if (MACH_PORT_VALID(voucher_name)) {
+ new_voucher = convert_port_name_to_voucher(voucher_name);
+ if (IPC_VOUCHER_NULL == new_voucher)
+ return KERN_INVALID_ARGUMENT;
+ }
+ bank_get_bank_ledger_and_thread_group(new_voucher, &bankledger, &banktg);
+
+ thread_mtx_lock(thread);
+ voucher = thread->ith_voucher;
+ thread->ith_voucher_name = voucher_name;
+ thread->ith_voucher = new_voucher;
+ thread_mtx_unlock(thread);
+
+ bank_swap_thread_bank_ledger(thread, bankledger);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)voucher_name,
+ VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
+ 1, 0);
+
+ if (IPC_VOUCHER_NULL != voucher)
+ ipc_voucher_release(voucher);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_get_mach_voucher - return a voucher reference for the specified thread voucher
+ *
+ * Conditions: nothing locked
+ *
+ * A reference to the voucher may be lazily pending, if someone set the voucher name
+ * but nobody has done a lookup yet. In that case, we'll have to do the equivalent
+ * lookup here.
+ *
+ * NOTE: At the moment, there is no distinction between the current and effective
+ * vouchers because we only set them at the thread level currently.
+ */
+kern_return_t
+thread_get_mach_voucher(
+ thread_act_t thread,
+ mach_voucher_selector_t __unused which,
+ ipc_voucher_t *voucherp)
+{
+ ipc_voucher_t voucher;
+ mach_port_name_t voucher_name;
+
+ if (THREAD_NULL == thread)
+ return KERN_INVALID_ARGUMENT;
+
+ thread_mtx_lock(thread);
+ voucher = thread->ith_voucher;
+
+ /* if already cached, just return a ref */
+ if (IPC_VOUCHER_NULL != voucher) {
+ ipc_voucher_reference(voucher);
+ thread_mtx_unlock(thread);
+ *voucherp = voucher;
+ return KERN_SUCCESS;
+ }
+
+ voucher_name = thread->ith_voucher_name;
+
+ /* convert the name to a port, then voucher reference */
+ if (MACH_PORT_VALID(voucher_name)) {
+ ipc_port_t port;
+
+ if (KERN_SUCCESS !=
+ ipc_object_copyin(thread->task->itk_space, voucher_name,
+ MACH_MSG_TYPE_COPY_SEND, (ipc_object_t *)&port)) {
+ thread->ith_voucher_name = MACH_PORT_NULL;
+ thread_mtx_unlock(thread);
+ *voucherp = IPC_VOUCHER_NULL;
+ return KERN_SUCCESS;
+ }
+
+ /* convert to a voucher ref to return, and cache a ref on thread */
+ voucher = convert_port_to_voucher(port);
+ ipc_voucher_reference(voucher);
+ thread->ith_voucher = voucher;
+ thread_mtx_unlock(thread);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)port,
+ VM_KERNEL_ADDRPERM((uintptr_t)voucher),
+ 2, 0);
+
+
+ ipc_port_release_send(port);
+ } else
+ thread_mtx_unlock(thread);
+
+ *voucherp = voucher;
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_set_mach_voucher - set a voucher reference for the specified thread voucher
+ *
+ * Conditions: callers holds a reference on the voucher.
+ * nothing locked.
+ *
+ * We grab another reference to the voucher and bind it to the thread. Any lazy
+ * binding is erased. The old voucher reference associated with the thread is
+ * discarded.
+ */
+kern_return_t
+thread_set_mach_voucher(
+ thread_t thread,
+ ipc_voucher_t voucher)
+{
+ ipc_voucher_t old_voucher;
+ ledger_t bankledger = NULL;
+ thread_group_t banktg = NULL;
+
+ if (THREAD_NULL == thread)
+ return KERN_INVALID_ARGUMENT;
+
+ if (thread != current_thread() && thread->started)
+ return KERN_INVALID_ARGUMENT;
+
+ ipc_voucher_reference(voucher);
+ bank_get_bank_ledger_and_thread_group(voucher, &bankledger, &banktg);
+
+ thread_mtx_lock(thread);
+ old_voucher = thread->ith_voucher;
+ thread->ith_voucher = voucher;
+ thread->ith_voucher_name = MACH_PORT_NULL;
+ thread_mtx_unlock(thread);
+
+ bank_swap_thread_bank_ledger(thread, bankledger);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)MACH_PORT_NULL,
+ VM_KERNEL_ADDRPERM((uintptr_t)voucher),
+ 3, 0);
+
+ ipc_voucher_release(old_voucher);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
+ *
+ * Conditions: callers holds a reference on the new and presumed old voucher(s).
+ * nothing locked.
+ *
+ * If the old voucher is still the same as passed in, replace it with new voucher
+ * and discard the old (and the reference passed in). Otherwise, discard the new
+ * and return an updated old voucher.
+ */
+kern_return_t
+thread_swap_mach_voucher(
+ thread_t thread,
+ ipc_voucher_t new_voucher,
+ ipc_voucher_t *in_out_old_voucher)
+{
+ mach_port_name_t old_voucher_name;
+ ipc_voucher_t old_voucher;
+ ledger_t bankledger = NULL;
+ thread_group_t banktg = NULL;
+
+ if (THREAD_NULL == thread)
+ return KERN_INVALID_TASK;
+
+ if (thread != current_thread() && thread->started)
+ return KERN_INVALID_ARGUMENT;
+
+ bank_get_bank_ledger_and_thread_group(new_voucher, &bankledger, &banktg);
+
+ thread_mtx_lock(thread);
+
+ old_voucher = thread->ith_voucher;
+
+ if (IPC_VOUCHER_NULL == old_voucher) {
+ old_voucher_name = thread->ith_voucher_name;
+
+ /* perform lazy binding if needed */
+ if (MACH_PORT_VALID(old_voucher_name)) {
+ old_voucher = convert_port_name_to_voucher(old_voucher_name);
+ thread->ith_voucher_name = MACH_PORT_NULL;
+ thread->ith_voucher = old_voucher;
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)old_voucher_name,
+ VM_KERNEL_ADDRPERM((uintptr_t)old_voucher),
+ 4, 0);
+
+ }
+ }
+
+ /* swap in new voucher, if old voucher matches the one supplied */
+ if (old_voucher == *in_out_old_voucher) {
+ ipc_voucher_reference(new_voucher);
+ thread->ith_voucher = new_voucher;
+ thread->ith_voucher_name = MACH_PORT_NULL;
+ thread_mtx_unlock(thread);
+ bank_swap_thread_bank_ledger(thread, bankledger);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)MACH_PORT_NULL,
+ VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
+ 5, 0);
+
+ ipc_voucher_release(old_voucher);
+
+ *in_out_old_voucher = IPC_VOUCHER_NULL;
+ return KERN_SUCCESS;
+ }
+
+ /* Otherwise, just return old voucher reference */
+ ipc_voucher_reference(old_voucher);
+ thread_mtx_unlock(thread);
+ *in_out_old_voucher = old_voucher;
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
+ */
+kern_return_t
+thread_get_current_voucher_origin_pid(
+ int32_t *pid)
+{
+ uint32_t buf_size;
+ kern_return_t kr;
+ thread_t thread = current_thread();
+
+ buf_size = sizeof(*pid);
+ kr = mach_voucher_attr_command(thread->ith_voucher,
+ MACH_VOUCHER_ATTR_KEY_BANK,
+ BANK_ORIGINATOR_PID,
+ NULL,
+ 0,
+ (mach_voucher_attr_content_t)pid,
+ &buf_size);
+
+ return kr;
+}
+
+
+boolean_t
+thread_has_thread_name(thread_t th)
+{
+ if ((th) && (th->uthread)) {
+ return bsd_hasthreadname(th->uthread);
+ }
+
+ /*
+ * This is an odd case; clients may set the thread name based on the lack of
+ * a name, but in this context there is no uthread to attach the name to.
+ */
+ return FALSE;
+}
+
+void
+thread_set_thread_name(thread_t th, const char* name)
+{
+ if ((th) && (th->uthread) && name) {
+ bsd_setthreadname(th->uthread, name);
+ }
+}
+
+/*
+ * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
+ */
+void thread_enable_send_importance(thread_t thread, boolean_t enable)
+{
+ if (enable == TRUE)
+ thread->options |= TH_OPT_SEND_IMPORTANCE;
+ else
+ thread->options &= ~TH_OPT_SEND_IMPORTANCE;
+}
+
+/*
+ * thread_set_allocation_name - .
+ */
+
+kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name)
+{
+ kern_allocation_name_t ret;
+ thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
+ ret = kstate->allocation_name;
+ // fifo
+ if (!new_name || !kstate->allocation_name) kstate->allocation_name = new_name;
+ return ret;
+}
+
+#if CONFIG_DTRACE
+uint32_t dtrace_get_thread_predcache(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->t_dtrace_predcache;
+ else
+ return 0;
+}
+
+int64_t dtrace_get_thread_vtime(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->t_dtrace_vtime;
+ else
+ return 0;
+}
+
+int dtrace_get_thread_last_cpu_id(thread_t thread)
+{
+ if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) {
+ return thread->last_processor->cpu_id;
+ } else {
+ return -1;
+ }
+}
+
+int64_t dtrace_get_thread_tracing(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->t_dtrace_tracing;
+ else
+ return 0;
+}
+
+boolean_t dtrace_get_thread_reentering(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return (thread->options & TH_OPT_DTRACE) ? TRUE : FALSE;
+ else
+ return 0;
+}
+
+vm_offset_t dtrace_get_kernel_stack(thread_t thread)
+{
+ if (thread != THREAD_NULL)
+ return thread->kernel_stack;
+ else
+ return 0;
+}
+
+#if KASAN
+struct kasan_thread_data *
+kasan_get_thread_data(thread_t thread)
+{
+ return &thread->kasan_data;
+}
+#endif
+
+int64_t dtrace_calc_thread_recent_vtime(thread_t thread)
+{
+ if (thread != THREAD_NULL) {
+ processor_t processor = current_processor();
+ uint64_t abstime = mach_absolute_time();
+ timer_t timer;
+
+ timer = PROCESSOR_DATA(processor, thread_timer);
+
+ return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) +
+ (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */
+ } else
+ return 0;
+}
+
+void dtrace_set_thread_predcache(thread_t thread, uint32_t predcache)
+{
+ if (thread != THREAD_NULL)
+ thread->t_dtrace_predcache = predcache;
+}
+
+void dtrace_set_thread_vtime(thread_t thread, int64_t vtime)
+{
+ if (thread != THREAD_NULL)
+ thread->t_dtrace_vtime = vtime;
+}
+
+void dtrace_set_thread_tracing(thread_t thread, int64_t accum)
+{
+ if (thread != THREAD_NULL)
+ thread->t_dtrace_tracing = accum;
+}
+
+void dtrace_set_thread_reentering(thread_t thread, boolean_t vbool)
+{
+ if (thread != THREAD_NULL) {
+ if (vbool)
+ thread->options |= TH_OPT_DTRACE;
+ else
+ thread->options &= (~TH_OPT_DTRACE);
+ }
+}
+
+vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover)
+{
+ vm_offset_t prev = 0;
+
+ if (thread != THREAD_NULL) {
+ prev = thread->recover;
+ thread->recover = recover;
+ }
+ return prev;
+}
+
+void dtrace_thread_bootstrap(void)
+{
+ task_t task = current_task();
+
+ if (task->thread_count == 1) {
+ thread_t thread = current_thread();
+ if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) {
+ thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS;
+ DTRACE_PROC(exec__success);
+ KDBG(BSDDBG_CODE(DBG_BSD_PROC,BSD_PROC_EXEC),
+ task_pid(task));
+ }
+ DTRACE_PROC(start);
+ }
+ DTRACE_PROC(lwp__start);
+
+}
+
+void
+dtrace_thread_didexec(thread_t thread)
+{
+ thread->t_dtrace_flags |= TH_DTRACE_EXECSUCCESS;
+}
+#endif /* CONFIG_DTRACE */