+ rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0;
+ if (rr_info->depressed) {
+ rr_info->base_priority = DEPRESSPRI;
+ rr_info->depress_priority = thread->base_pri;
+ }
+ else {
+ rr_info->base_priority = thread->base_pri;
+ rr_info->depress_priority = -1;
+ }
+
+ quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
+ absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
+
+ rr_info->max_priority = thread->max_priority;
+ rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
+
+ thread_unlock(thread);
+ splx(s);
+
+ *thread_info_count = POLICY_RR_INFO_COUNT;
+
+ return (KERN_SUCCESS);
+ }
+ else
+ if (flavor == THREAD_EXTENDED_INFO) {
+ thread_basic_info_data_t basic_info;
+ thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out;
+
+ if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+
+ s = splsched();
+ thread_lock(thread);
+
+ /* NOTE: This mimics fill_taskthreadinfo(), which is the function used by proc_pidinfo() for
+ * the PROC_PIDTHREADINFO flavor (which can't be used on corpses)
+ */
+ retrieve_thread_basic_info(thread, &basic_info);
+ extended_info->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
+ extended_info->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
+
+ extended_info->pth_cpu_usage = basic_info.cpu_usage;
+ extended_info->pth_policy = basic_info.policy;
+ extended_info->pth_run_state = basic_info.run_state;
+ extended_info->pth_flags = basic_info.flags;
+ extended_info->pth_sleep_time = basic_info.sleep_time;
+ extended_info->pth_curpri = thread->sched_pri;
+ extended_info->pth_priority = thread->base_pri;
+ extended_info->pth_maxpriority = thread->max_priority;
+
+ bsd_getthreadname(thread->uthread,extended_info->pth_name);
+
+ thread_unlock(thread);
+ splx(s);
+
+ *thread_info_count = THREAD_EXTENDED_INFO_COUNT;
+
+ return (KERN_SUCCESS);
+ }
+ else
+ if (flavor == THREAD_DEBUG_INFO_INTERNAL) {
+#if DEVELOPMENT || DEBUG
+ thread_debug_info_internal_t dbg_info;
+ if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT)
+ return (KERN_NOT_SUPPORTED);
+
+ if (thread_info_out == NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ dbg_info = (thread_debug_info_internal_t) thread_info_out;
+ dbg_info->page_creation_count = thread->t_page_creation_count;
+
+ *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT;
+ return (KERN_SUCCESS);
+#endif /* DEVELOPMENT || DEBUG */
+ return (KERN_NOT_SUPPORTED);
+ }
+
+ return (KERN_INVALID_ARGUMENT);
+}
+
+void
+thread_read_times(
+ thread_t thread,
+ time_value_t *user_time,
+ time_value_t *system_time)
+{
+ clock_sec_t secs;
+ clock_usec_t usecs;
+ uint64_t tval_user, tval_system;
+
+ tval_user = timer_grab(&thread->user_timer);
+ tval_system = timer_grab(&thread->system_timer);
+
+ if (thread->precise_user_kernel_time) {
+ absolutetime_to_microtime(tval_user, &secs, &usecs);
+ user_time->seconds = (typeof(user_time->seconds))secs;
+ user_time->microseconds = usecs;
+
+ absolutetime_to_microtime(tval_system, &secs, &usecs);
+ system_time->seconds = (typeof(system_time->seconds))secs;
+ system_time->microseconds = usecs;
+ } else {
+ /* system_timer may represent either sys or user */
+ tval_user += tval_system;
+ absolutetime_to_microtime(tval_user, &secs, &usecs);
+ user_time->seconds = (typeof(user_time->seconds))secs;
+ user_time->microseconds = usecs;
+
+ system_time->seconds = 0;
+ system_time->microseconds = 0;
+ }
+}
+
+uint64_t thread_get_runtime_self(void)
+{
+ boolean_t interrupt_state;
+ uint64_t runtime;
+ thread_t thread = NULL;
+ processor_t processor = NULL;
+
+ thread = current_thread();
+
+ /* Not interrupt safe, as the scheduler may otherwise update timer values underneath us */
+ interrupt_state = ml_set_interrupts_enabled(FALSE);
+ processor = current_processor();
+ timer_switch(PROCESSOR_DATA(processor, thread_timer), mach_absolute_time(), PROCESSOR_DATA(processor, thread_timer));
+ runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer));
+ ml_set_interrupts_enabled(interrupt_state);
+
+ return runtime;
+}
+
+kern_return_t
+thread_assign(
+ __unused thread_t thread,
+ __unused processor_set_t new_pset)
+{
+ return (KERN_FAILURE);
+}
+
+/*
+ * thread_assign_default:
+ *
+ * Special version of thread_assign for assigning threads to default
+ * processor set.
+ */
+kern_return_t
+thread_assign_default(
+ thread_t thread)
+{
+ return (thread_assign(thread, &pset0));
+}
+
+/*
+ * thread_get_assignment
+ *
+ * Return current assignment for this thread.
+ */
+kern_return_t
+thread_get_assignment(
+ thread_t thread,
+ processor_set_t *pset)
+{
+ if (thread == NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ *pset = &pset0;
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * thread_wire_internal:
+ *
+ * Specify that the target thread must always be able
+ * to run and to allocate memory.
+ */
+kern_return_t
+thread_wire_internal(
+ host_priv_t host_priv,
+ thread_t thread,
+ boolean_t wired,
+ boolean_t *prev_state)
+{
+ if (host_priv == NULL || thread != current_thread())
+ return (KERN_INVALID_ARGUMENT);
+
+ assert(host_priv == &realhost);
+
+ if (prev_state)
+ *prev_state = (thread->options & TH_OPT_VMPRIV) != 0;
+
+ if (wired) {
+ if (!(thread->options & TH_OPT_VMPRIV))
+ vm_page_free_reserve(1); /* XXX */
+ thread->options |= TH_OPT_VMPRIV;
+ }
+ else {
+ if (thread->options & TH_OPT_VMPRIV)
+ vm_page_free_reserve(-1); /* XXX */
+ thread->options &= ~TH_OPT_VMPRIV;
+ }
+
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * thread_wire:
+ *
+ * User-api wrapper for thread_wire_internal()
+ */
+kern_return_t
+thread_wire(
+ host_priv_t host_priv,
+ thread_t thread,
+ boolean_t wired)
+{
+ return (thread_wire_internal(host_priv, thread, wired, NULL));
+}
+
+
+boolean_t
+set_vm_privilege(boolean_t privileged)
+{
+ boolean_t was_vmpriv;
+
+ if (current_thread()->options & TH_OPT_VMPRIV)
+ was_vmpriv = TRUE;
+ else
+ was_vmpriv = FALSE;
+
+ if (privileged != FALSE)
+ current_thread()->options |= TH_OPT_VMPRIV;
+ else
+ current_thread()->options &= ~TH_OPT_VMPRIV;
+
+ return (was_vmpriv);
+}
+
+
+/*
+ * XXX assuming current thread only, for now...
+ */
+void
+thread_guard_violation(thread_t thread, unsigned type)
+{
+ assert(thread == current_thread());
+
+ spl_t s = splsched();
+ /*
+ * Use the saved state area of the thread structure
+ * to store all info required to handle the AST when
+ * returning to userspace
+ */
+ thread->guard_exc_info.type = type;
+ thread_ast_set(thread, AST_GUARD);
+ ast_propagate(thread->ast);
+
+ splx(s);
+}
+
+/*
+ * guard_ast:
+ *
+ * Handle AST_GUARD for a thread. This routine looks at the
+ * state saved in the thread structure to determine the cause
+ * of this exception. Based on this value, it invokes the
+ * appropriate routine which determines other exception related
+ * info and raises the exception.
+ */
+void
+guard_ast(thread_t thread)
+{
+ if (thread->guard_exc_info.type == GUARD_TYPE_MACH_PORT)
+ mach_port_guard_ast(thread);
+ else
+ fd_guard_ast(thread);
+}
+
+static void
+thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1)
+{
+ if (warning == LEDGER_WARNING_ROSE_ABOVE) {
+#if CONFIG_TELEMETRY
+ /*
+ * This thread is in danger of violating the CPU usage monitor. Enable telemetry
+ * on the entire task so there are micro-stackshots available if and when
+ * EXC_RESOURCE is triggered. We could have chosen to enable micro-stackshots
+ * for this thread only; but now that this task is suspect, knowing what all of
+ * its threads are up to will be useful.
+ */
+ telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 1);
+#endif
+ return;
+ }
+
+#if CONFIG_TELEMETRY
+ /*
+ * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
+ * exceeded the limit, turn telemetry off for the task.
+ */
+ telemetry_task_ctl(current_task(), TF_CPUMON_WARNING, 0);
+#endif
+
+ if (warning == 0) {
+ THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE();
+ }
+}
+
+void __attribute__((noinline))
+THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU__SENDING_EXC_RESOURCE(void)
+{
+ int pid = 0;
+ task_t task = current_task();
+ thread_t thread = current_thread();
+ uint64_t tid = thread->thread_id;
+ const char *procname = "unknown";
+ time_value_t thread_total_time = {0, 0};
+ time_value_t thread_system_time;
+ time_value_t thread_user_time;
+ int action;
+ uint8_t percentage;
+ uint32_t limit_percent;
+ uint32_t usage_percent;
+ uint32_t interval_sec;
+ uint64_t interval_ns;
+ uint64_t balance_ns;
+ boolean_t fatal = FALSE;
+
+ mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
+ struct ledger_entry_info lei;
+
+ assert(thread->t_threadledger != LEDGER_NULL);
+
+ /*
+ * Now that a thread has tripped the monitor, disable it for the entire task.
+ */
+ task_lock(task);
+ if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) {
+ /*
+ * The CPU usage monitor has been disabled on our task, so some other
+ * thread must have gotten here first. We only send one exception per
+ * task lifetime, so there's nothing left for us to do here.
+ */
+ task_unlock(task);
+ return;
+ }
+ if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_CPUMON) {
+ fatal = TRUE;
+ }
+ task_disable_cpumon(task);
+ task_unlock(task);
+
+#ifdef MACH_BSD
+ pid = proc_selfpid();
+ if (task->bsd_info != NULL)
+ procname = proc_name_address(task->bsd_info);
+#endif
+
+ thread_get_cpulimit(&action, &percentage, &interval_ns);
+
+ interval_sec = (uint32_t)(interval_ns / NSEC_PER_SEC);
+
+ thread_read_times(thread, &thread_user_time, &thread_system_time);
+ time_value_add(&thread_total_time, &thread_user_time);
+ time_value_add(&thread_total_time, &thread_system_time);
+
+ ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei);
+
+ absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns);
+ usage_percent = (uint32_t) ((balance_ns * 100ULL) / lei.lei_last_refill);