+ kperf_sample(pet_sample, &ctx, pet_action_id, SAMPLE_FLAG_TASK_ONLY);
+ }
+
+ if (!kperf_action_has_thread(pet_action_id)) {
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END);
+ return;
+ }
+
+ kern_return_t kr = KERN_SUCCESS;
+
+ /*
+ * Suspend the task to see an atomic snapshot of all its threads. This
+ * is expensive, and disruptive.
+ */
+ bool needs_suspend = task != kernel_task;
+ if (needs_suspend) {
+ kr = task_suspend_internal(task);
+ if (kr != KERN_SUCCESS) {
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, 1);
+ return;
+ }
+ needs_suspend = true;
+ }
+
+ kr = pet_threads_prepare(task);
+ if (kr != KERN_SUCCESS) {
+ BUF_INFO(PERF_PET_ERROR, ERR_THREAD, kr);
+ goto out;
+ }
+
+ for (unsigned int i = 0; i < pet_threads_count; i++) {
+ thread_t thread = pet_threads[i];
+ assert(thread != THREAD_NULL);
+
+ /*
+ * Do not sample the thread if it was on a CPU when the timer fired.
+ */
+ int cpu = 0;
+ for (cpu = 0; cpu < machine_info.logical_cpu_max; cpu++) {
+ if (kperf_tid_on_cpus[cpu] == thread_tid(thread)) {
+ break;
+ }
+ }
+
+ /* the thread was not on a CPU */
+ if (cpu == machine_info.logical_cpu_max) {
+ pet_sample_thread(pid, task, thread, idle_rate);
+ }
+
+ thread_deallocate(pet_threads[i]);
+ }
+
+out:
+ if (needs_suspend) {
+ task_resume_internal(task);
+ }
+
+ BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, pet_threads_count);
+}
+
+static kern_return_t
+pet_tasks_prepare_internal(void)
+{
+ lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED);
+
+ vm_size_t tasks_size_needed = 0;
+
+ for (;;) {
+ lck_mtx_lock(&tasks_threads_lock);
+
+ /* do we have the memory we need? */
+ tasks_size_needed = tasks_count * sizeof(task_t);
+ if (tasks_size_needed <= pet_tasks_size) {
+ break;
+ }
+
+ /* unlock and allocate more memory */
+ lck_mtx_unlock(&tasks_threads_lock);
+
+ /* grow task array */
+ if (tasks_size_needed > pet_tasks_size) {
+ if (pet_tasks_size != 0) {
+ kfree(pet_tasks, pet_tasks_size);
+ }
+
+ assert(tasks_size_needed > 0);
+ pet_tasks_size = tasks_size_needed;