#include <kperf/thread_samplers.h>
#include <kperf/ast.h>
+#if MONOTONIC
+#include <kern/monotonic.h>
+#include <machine/monotonic.h>
+#endif /* MONOTONIC */
+
extern boolean_t stackshot_thread_is_idle_worker_unsafe(thread_t thread);
/*
kperf_state |= KPERF_TI_IDLE;
}
+#if !CONFIG_EMBEDDED
/* on desktop, if state is blank, leave not idle set */
if (kperf_state == 0) {
return (TH_IDLE << 16);
}
+#endif /* !CONFIG_EMBEDDED */
/* high two bytes are inverted mask, low two bytes are normal */
return (((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff));
BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread));
- thsc->kpthsc_user_time = timer_grab(&(thread->user_timer));
- uint64_t system_time = timer_grab(&(thread->system_timer));
+ thsc->kpthsc_user_time = timer_grab(&thread->user_timer);
+ uint64_t system_time = timer_grab(&thread->system_timer);
if (thread->precise_user_kernel_time) {
thsc->kpthsc_system_time = system_time;
thsc->kpthsc_system_time = 0;
}
+ thsc->kpthsc_runnable_time = timer_grab(&thread->runnable_timer);
thsc->kpthsc_state = thread->state;
thsc->kpthsc_base_priority = thread->base_pri;
thsc->kpthsc_sched_priority = thread->sched_pri;
thsc->kpthsc_effective_qos = thread->effective_policy.thep_qos;
thsc->kpthsc_requested_qos = thread->requested_policy.thrp_qos;
- thsc->kpthsc_requested_qos_override = thread->requested_policy.thrp_qos_override;
+ thsc->kpthsc_requested_qos_override = MAX(thread->requested_policy.thrp_qos_override,
+ thread->requested_policy.thrp_qos_workq_override);
+ thsc->kpthsc_requested_qos_promote = thread->requested_policy.thrp_qos_promote;
+ thsc->kpthsc_requested_qos_ipc_override = thread->requested_policy.thrp_qos_ipc_override;
+ thsc->kpthsc_requested_qos_sync_ipc_override = thread->requested_policy.thrp_qos_sync_ipc_override;
thsc->kpthsc_effective_latency_qos = thread->effective_policy.thep_latency_qos;
BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_END);
{
assert(thsc != NULL);
#if defined(__LP64__)
- BUF_DATA(PERF_TI_SCHEDDATA, thsc->kpthsc_user_time,
- thsc->kpthsc_system_time,
- (((uint64_t)thsc->kpthsc_base_priority) << 48)
- | ((uint64_t)thsc->kpthsc_sched_priority << 32)
- | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24)
- | (thsc->kpthsc_effective_qos << 6)
- | (thsc->kpthsc_requested_qos << 3)
- | thsc->kpthsc_requested_qos_override,
- ((uint64_t)thsc->kpthsc_effective_latency_qos << 61));
+ BUF_DATA(PERF_TI_SCHEDDATA_2, thsc->kpthsc_user_time,
+ thsc->kpthsc_system_time,
+ (((uint64_t)thsc->kpthsc_base_priority) << 48)
+ | ((uint64_t)thsc->kpthsc_sched_priority << 32)
+ | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24)
+ | (thsc->kpthsc_effective_qos << 6)
+ | (thsc->kpthsc_requested_qos << 3)
+ | thsc->kpthsc_requested_qos_override,
+ ((uint64_t)thsc->kpthsc_effective_latency_qos << 61)
+ | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58)
+ | ((uint64_t)thsc->kpthsc_requested_qos_ipc_override << 55)
+ | ((uint64_t)thsc->kpthsc_requested_qos_sync_ipc_override << 52)
+ );
+ BUF_DATA(PERF_TI_SCHEDDATA_3, thsc->kpthsc_runnable_time);
#else
BUF_DATA(PERF_TI_SCHEDDATA1_32, UPPER_32(thsc->kpthsc_user_time),
- LOWER_32(thsc->kpthsc_user_time),
- UPPER_32(thsc->kpthsc_system_time),
- LOWER_32(thsc->kpthsc_system_time));
- BUF_DATA(PERF_TI_SCHEDDATA2_32, (((uint32_t)thsc->kpthsc_base_priority) << 16)
- | thsc->kpthsc_sched_priority,
- ((thsc->kpthsc_state & 0xff) << 24)
- | (thsc->kpthsc_effective_qos << 6)
- | (thsc->kpthsc_requested_qos << 3)
- | thsc->kpthsc_requested_qos_override,
- (uint32_t)thsc->kpthsc_effective_latency_qos << 29);
+ LOWER_32(thsc->kpthsc_user_time),
+ UPPER_32(thsc->kpthsc_system_time),
+ LOWER_32(thsc->kpthsc_system_time)
+ );
+ BUF_DATA(PERF_TI_SCHEDDATA2_32_2, (((uint32_t)thsc->kpthsc_base_priority) << 16)
+ | thsc->kpthsc_sched_priority,
+ ((thsc->kpthsc_state & 0xff) << 24)
+ | (thsc->kpthsc_effective_qos << 6)
+ | (thsc->kpthsc_requested_qos << 3)
+ | thsc->kpthsc_requested_qos_override,
+ ((uint32_t)thsc->kpthsc_effective_latency_qos << 29)
+ | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26)
+ | ((uint32_t)thsc->kpthsc_requested_qos_ipc_override << 23)
+ | ((uint32_t)thsc->kpthsc_requested_qos_sync_ipc_override << 20)
+ );
+ BUF_DATA(PERF_TI_SCHEDDATA3_32, UPPER_32(thsc->kpthsc_runnable_time),
+ LOWER_32(thsc->kpthsc_runnable_time));
#endif /* defined(__LP64__) */
}
BUF_INFO(PERF_TI_DISPSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread));
task_t task = thread->task;
- boolean_t task_64 = task_has_64BitAddr(task);
+ boolean_t task_64 = task_has_64Bit_addr(task);
size_t user_addr_size = task_64 ? 8 : 4;
assert(thread->task != kernel_task);
LOWER_32(thdi->kpthdi_dq_serialno));
#endif /* defined(__LP64__) */
}
+
+/*
+ * A bit different from other samplers -- since logging disables interrupts,
+ * it's a fine place to sample the thread counters.
+ */
+void
+kperf_thread_inscyc_log(struct kperf_context *context)
+{
+#if MONOTONIC
+ thread_t cur_thread = current_thread();
+
+ if (context->cur_thread != cur_thread) {
+ /* can't safely access another thread's counters */
+ return;
+ }
+
+ uint64_t counts[MT_CORE_NFIXED];
+
+ int ret = mt_fixed_thread_counts(cur_thread, counts);
+ if (ret) {
+ return;
+ }
+
+#if defined(__LP64__)
+ BUF_DATA(PERF_TI_INSCYCDATA, counts[MT_CORE_INSTRS], counts[MT_CORE_CYCLES]);
+#else /* defined(__LP64__) */
+ /* 32-bit platforms don't count instructions */
+ BUF_DATA(PERF_TI_INSCYCDATA_32, 0, 0, UPPER_32(counts[MT_CORE_CYCLES]),
+ LOWER_32(counts[MT_CORE_CYCLES]));
+#endif /* !defined(__LP64__) */
+
+#else
+#pragma unused(context)
+#endif /* MONOTONIC */
+
+}