+ thread_t th)
+{
+ return thread_should_halt_fast(th);
+}
+
+/*
+ * thread_set_voucher_name - reset the voucher port name bound to this thread
+ *
+ * Conditions: nothing locked
+ */
+
+kern_return_t
+thread_set_voucher_name(mach_port_name_t voucher_name)
+{
+ thread_t thread = current_thread();
+ ipc_voucher_t new_voucher = IPC_VOUCHER_NULL;
+ ipc_voucher_t voucher;
+ ledger_t bankledger = NULL;
+ struct thread_group *banktg = NULL;
+ uint32_t persona_id = 0;
+
+ if (MACH_PORT_DEAD == voucher_name) {
+ return KERN_INVALID_RIGHT;
+ }
+
+ /*
+ * agressively convert to voucher reference
+ */
+ if (MACH_PORT_VALID(voucher_name)) {
+ new_voucher = convert_port_name_to_voucher(voucher_name);
+ if (IPC_VOUCHER_NULL == new_voucher) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+ bank_get_bank_ledger_thread_group_and_persona(new_voucher, &bankledger, &banktg, &persona_id);
+
+ thread_mtx_lock(thread);
+ voucher = thread->ith_voucher;
+ thread->ith_voucher_name = voucher_name;
+ thread->ith_voucher = new_voucher;
+ thread_mtx_unlock(thread);
+
+ bank_swap_thread_bank_ledger(thread, bankledger);
+#if CONFIG_THREAD_GROUPS
+ thread_group_set_bank(thread, banktg);
+#endif /* CONFIG_THREAD_GROUPS */
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)voucher_name,
+ VM_KERNEL_ADDRPERM((uintptr_t)new_voucher),
+ persona_id, 0);
+
+ if (IPC_VOUCHER_NULL != voucher) {
+ ipc_voucher_release(voucher);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_get_mach_voucher - return a voucher reference for the specified thread voucher
+ *
+ * Conditions: nothing locked
+ *
+ * NOTE: At the moment, there is no distinction between the current and effective
+ * vouchers because we only set them at the thread level currently.
+ */
+kern_return_t
+thread_get_mach_voucher(
+ thread_act_t thread,
+ mach_voucher_selector_t __unused which,
+ ipc_voucher_t *voucherp)
+{
+ ipc_voucher_t voucher;
+
+ if (THREAD_NULL == thread) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_mtx_lock(thread);
+ voucher = thread->ith_voucher;
+
+ if (IPC_VOUCHER_NULL != voucher) {
+ ipc_voucher_reference(voucher);
+ thread_mtx_unlock(thread);
+ *voucherp = voucher;
+ return KERN_SUCCESS;
+ }
+
+ thread_mtx_unlock(thread);
+
+ *voucherp = IPC_VOUCHER_NULL;
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_set_mach_voucher - set a voucher reference for the specified thread voucher
+ *
+ * Conditions: callers holds a reference on the voucher.
+ * nothing locked.
+ *
+ * We grab another reference to the voucher and bind it to the thread.
+ * The old voucher reference associated with the thread is
+ * discarded.
+ */
+kern_return_t
+thread_set_mach_voucher(
+ thread_t thread,
+ ipc_voucher_t voucher)
+{
+ ipc_voucher_t old_voucher;
+ ledger_t bankledger = NULL;
+ struct thread_group *banktg = NULL;
+ uint32_t persona_id = 0;
+
+ if (THREAD_NULL == thread) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
+
+ thread_mtx_lock(thread);
+ /*
+ * Once the thread is started, we will look at `ith_voucher` without
+ * holding any lock.
+ *
+ * Setting the voucher hence can only be done by current_thread() or
+ * before it started. "started" flips under the thread mutex and must be
+ * tested under it too.
+ */
+ if (thread != current_thread() && thread->started) {
+ thread_mtx_unlock(thread);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ ipc_voucher_reference(voucher);
+ old_voucher = thread->ith_voucher;
+ thread->ith_voucher = voucher;
+ thread->ith_voucher_name = MACH_PORT_NULL;
+ thread_mtx_unlock(thread);
+
+ bank_swap_thread_bank_ledger(thread, bankledger);
+#if CONFIG_THREAD_GROUPS
+ thread_group_set_bank(thread, banktg);
+#endif /* CONFIG_THREAD_GROUPS */
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE,
+ (uintptr_t)thread_tid(thread),
+ (uintptr_t)MACH_PORT_NULL,
+ VM_KERNEL_ADDRPERM((uintptr_t)voucher),
+ persona_id, 0);
+
+ ipc_voucher_release(old_voucher);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
+ *
+ * Conditions: callers holds a reference on the new and presumed old voucher(s).
+ * nothing locked.
+ *
+ * This function is no longer supported.
+ */
+kern_return_t
+thread_swap_mach_voucher(
+ __unused thread_t thread,
+ __unused ipc_voucher_t new_voucher,
+ ipc_voucher_t *in_out_old_voucher)
+{
+ /*
+ * Currently this function is only called from a MIG generated
+ * routine which doesn't release the reference on the voucher
+ * addressed by in_out_old_voucher. To avoid leaking this reference,
+ * a call to release it has been added here.
+ */
+ ipc_voucher_release(*in_out_old_voucher);
+ return KERN_NOT_SUPPORTED;
+}
+
+/*
+ * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher.
+ */
+kern_return_t
+thread_get_current_voucher_origin_pid(
+ int32_t *pid)
+{
+ uint32_t buf_size;
+ kern_return_t kr;
+ thread_t thread = current_thread();
+
+ buf_size = sizeof(*pid);
+ kr = mach_voucher_attr_command(thread->ith_voucher,
+ MACH_VOUCHER_ATTR_KEY_BANK,
+ BANK_ORIGINATOR_PID,
+ NULL,
+ 0,
+ (mach_voucher_attr_content_t)pid,
+ &buf_size);
+
+ return kr;
+}
+
+#if CONFIG_THREAD_GROUPS
+/*
+ * Returns the current thread's voucher-carried thread group
+ *
+ * Reference is borrowed from this being the current voucher, so it does NOT
+ * return a reference to the group.
+ */
+struct thread_group *
+thread_get_current_voucher_thread_group(thread_t thread)
+{
+ assert(thread == current_thread());
+
+ if (thread->ith_voucher == NULL) {
+ return NULL;
+ }
+
+ ledger_t bankledger = NULL;
+ struct thread_group *banktg = NULL;
+
+ bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL);
+
+ return banktg;
+}
+
+#endif /* CONFIG_THREAD_GROUPS */
+
+boolean_t
+thread_has_thread_name(thread_t th)
+{
+ if ((th) && (th->uthread)) {
+ return bsd_hasthreadname(th->uthread);
+ }
+
+ /*
+ * This is an odd case; clients may set the thread name based on the lack of
+ * a name, but in this context there is no uthread to attach the name to.
+ */
+ return FALSE;
+}
+
+void
+thread_set_thread_name(thread_t th, const char* name)
+{
+ if ((th) && (th->uthread) && name) {
+ bsd_setthreadname(th->uthread, name);
+ }
+}
+
+void
+thread_get_thread_name(thread_t th, char* name)
+{
+ if (!name) {
+ return;
+ }
+ if ((th) && (th->uthread)) {
+ bsd_getthreadname(th->uthread, name);
+ } else {
+ name[0] = '\0';
+ }
+}
+
+void
+thread_set_honor_qlimit(thread_t thread)
+{
+ thread->options |= TH_OPT_HONOR_QLIMIT;
+}
+
+void
+thread_clear_honor_qlimit(thread_t thread)
+{
+ thread->options &= (~TH_OPT_HONOR_QLIMIT);
+}
+
+/*
+ * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit.
+ */
+void
+thread_enable_send_importance(thread_t thread, boolean_t enable)
+{
+ if (enable == TRUE) {
+ thread->options |= TH_OPT_SEND_IMPORTANCE;
+ } else {
+ thread->options &= ~TH_OPT_SEND_IMPORTANCE;
+ }
+}
+
+/*
+ * thread_set_allocation_name - .
+ */
+
+kern_allocation_name_t
+thread_set_allocation_name(kern_allocation_name_t new_name)
+{
+ kern_allocation_name_t ret;
+ thread_kernel_state_t kstate = thread_get_kernel_state(current_thread());
+ ret = kstate->allocation_name;
+ // fifo
+ if (!new_name || !kstate->allocation_name) {
+ kstate->allocation_name = new_name;
+ }
+ return ret;
+}
+
+void *
+thread_iokit_tls_get(uint32_t index)
+{
+ assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
+ return current_thread()->saved.iokit.tls[index];
+}
+
+void
+thread_iokit_tls_set(uint32_t index, void * data)
+{
+ assert(index < THREAD_SAVE_IOKIT_TLS_COUNT);
+ current_thread()->saved.iokit.tls[index] = data;
+}
+
+uint64_t
+thread_get_last_wait_duration(thread_t thread)
+{
+ return thread->last_made_runnable_time - thread->last_run_time;
+}
+
+integer_t
+thread_kern_get_pri(thread_t thr)
+{
+ return thr->base_pri;
+}
+
+void
+thread_kern_set_pri(thread_t thr, integer_t pri)
+{
+ sched_set_kernel_thread_priority(thr, pri);
+}
+
+integer_t
+thread_kern_get_kernel_maxpri(void)
+{
+ return MAXPRI_KERNEL;
+}
+/*
+ * thread_port_with_flavor_notify
+ *
+ * Called whenever the Mach port system detects no-senders on
+ * the thread inspect or read port. These ports are allocated lazily and
+ * should be deallocated here when there are no senders remaining.
+ */
+void
+thread_port_with_flavor_notify(mach_msg_header_t *msg)
+{
+ mach_no_senders_notification_t *notification = (void *)msg;
+ ipc_port_t port = notification->not_header.msgh_remote_port;
+ thread_t thread;
+ mach_thread_flavor_t flavor;
+ ipc_kobject_type_t kotype;
+
+ ip_lock(port);
+ if (port->ip_srights > 0) {
+ ip_unlock(port);
+ return;
+ }
+ thread = (thread_t)ipc_kobject_get(port);
+ kotype = ip_kotype(port);
+ if (thread != THREAD_NULL) {
+ assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype));
+ thread_reference_internal(thread);
+ }
+ ip_unlock(port);
+
+ if (thread == THREAD_NULL) {
+ /* The thread is exiting or disabled; it will eventually deallocate the port */
+ return;
+ }
+
+ if (kotype == IKOT_THREAD_READ) {
+ flavor = THREAD_FLAVOR_READ;
+ } else {
+ flavor = THREAD_FLAVOR_INSPECT;
+ }
+
+ thread_mtx_lock(thread);
+ ip_lock(port);
+ /*
+ * If the port is no longer active, then ipc_thread_terminate() ran
+ * and destroyed the kobject already. Just deallocate the task
+ * ref we took and go away.
+ *
+ * It is also possible that several nsrequests are in flight,
+ * only one shall NULL-out the port entry, and this is the one
+ * that gets to dealloc the port.
+ *
+ * Check for a stale no-senders notification. A call to any function
+ * that vends out send rights to this port could resurrect it between
+ * this notification being generated and actually being handled here.
+ */
+ if (!ip_active(port) ||
+ thread->ith_thread_ports[flavor] != port ||
+ port->ip_srights > 0) {
+ ip_unlock(port);
+ thread_mtx_unlock(thread);
+ thread_deallocate(thread);
+ return;
+ }
+
+ assert(thread->ith_thread_ports[flavor] == port);
+ thread->ith_thread_ports[flavor] = IP_NULL;
+ ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE);
+ ip_unlock(port);
+ thread_mtx_unlock(thread);
+ thread_deallocate(thread);
+
+ ipc_port_dealloc_kernel(port);
+}
+
+/*
+ * The 'thread_region_page_shift' is used by footprint
+ * to specify the page size that it will use to
+ * accomplish its accounting work on the task being
+ * inspected. Since footprint uses a thread for each
+ * task that it works on, we need to keep the page_shift
+ * on a per-thread basis.
+ */
+
+int
+thread_self_region_page_shift(void)