kperf_state |= KPERF_TI_IDLE;
}
-#if !CONFIG_EMBEDDED
+#if defined(XNU_TARGET_OS_OSX)
/* on desktop, if state is blank, leave not idle set */
if (kperf_state == 0) {
- return (TH_IDLE << 16);
+ return TH_IDLE << 16;
}
-#endif /* !CONFIG_EMBEDDED */
+#endif /* defined(XNU_TARGET_OS_OSX) */
/* high two bytes are inverted mask, low two bytes are normal */
- return (((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff));
+ return ((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff);
}
void
kperf_thread_info_log(struct kperf_thread_info *ti)
{
BUF_DATA(PERF_TI_DATA, ti->kpthi_pid, ti->kpthi_tid /* K64-only */,
- ti->kpthi_dq_addr, ti->kpthi_runmode);
+ ti->kpthi_dq_addr, ti->kpthi_runmode);
}
/*
void
kperf_thread_scheduling_sample(struct kperf_thread_scheduling *thsc,
- struct kperf_context *context)
+ struct kperf_context *context)
{
assert(thsc != NULL);
assert(context != NULL);
thsc->kpthsc_effective_qos = thread->effective_policy.thep_qos;
thsc->kpthsc_requested_qos = thread->requested_policy.thrp_qos;
thsc->kpthsc_requested_qos_override = MAX(thread->requested_policy.thrp_qos_override,
- thread->requested_policy.thrp_qos_workq_override);
+ thread->requested_policy.thrp_qos_workq_override);
thsc->kpthsc_requested_qos_promote = thread->requested_policy.thrp_qos_promote;
- thsc->kpthsc_requested_qos_ipc_override = thread->requested_policy.thrp_qos_ipc_override;
- thsc->kpthsc_requested_qos_sync_ipc_override = thread->requested_policy.thrp_qos_sync_ipc_override;
+ thsc->kpthsc_requested_qos_kevent_override = MAX(
+ thread->requested_policy.thrp_qos_kevent_override,
+ thread->requested_policy.thrp_qos_wlsvc_override);
+ thsc->kpthsc_requested_qos_sync_ipc_override = THREAD_QOS_UNSPECIFIED;
thsc->kpthsc_effective_latency_qos = thread->effective_policy.thep_latency_qos;
BUF_INFO(PERF_TI_SCHEDSAMPLE | DBG_FUNC_END);
assert(thsc != NULL);
#if defined(__LP64__)
BUF_DATA(PERF_TI_SCHEDDATA_2, thsc->kpthsc_user_time,
- thsc->kpthsc_system_time,
- (((uint64_t)thsc->kpthsc_base_priority) << 48)
- | ((uint64_t)thsc->kpthsc_sched_priority << 32)
- | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24)
- | (thsc->kpthsc_effective_qos << 6)
- | (thsc->kpthsc_requested_qos << 3)
- | thsc->kpthsc_requested_qos_override,
- ((uint64_t)thsc->kpthsc_effective_latency_qos << 61)
- | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58)
- | ((uint64_t)thsc->kpthsc_requested_qos_ipc_override << 55)
- | ((uint64_t)thsc->kpthsc_requested_qos_sync_ipc_override << 52)
- );
+ thsc->kpthsc_system_time,
+ (((uint64_t)thsc->kpthsc_base_priority) << 48)
+ | ((uint64_t)thsc->kpthsc_sched_priority << 32)
+ | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24)
+ | (thsc->kpthsc_effective_qos << 6)
+ | (thsc->kpthsc_requested_qos << 3)
+ | thsc->kpthsc_requested_qos_override,
+ ((uint64_t)thsc->kpthsc_effective_latency_qos << 61)
+ | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58)
+ | ((uint64_t)thsc->kpthsc_requested_qos_kevent_override << 55)
+ );
BUF_DATA(PERF_TI_SCHEDDATA_3, thsc->kpthsc_runnable_time);
#else
BUF_DATA(PERF_TI_SCHEDDATA1_32, UPPER_32(thsc->kpthsc_user_time),
- LOWER_32(thsc->kpthsc_user_time),
- UPPER_32(thsc->kpthsc_system_time),
- LOWER_32(thsc->kpthsc_system_time)
- );
+ LOWER_32(thsc->kpthsc_user_time),
+ UPPER_32(thsc->kpthsc_system_time),
+ LOWER_32(thsc->kpthsc_system_time)
+ );
BUF_DATA(PERF_TI_SCHEDDATA2_32_2, (((uint32_t)thsc->kpthsc_base_priority) << 16)
- | thsc->kpthsc_sched_priority,
- ((thsc->kpthsc_state & 0xff) << 24)
- | (thsc->kpthsc_effective_qos << 6)
- | (thsc->kpthsc_requested_qos << 3)
- | thsc->kpthsc_requested_qos_override,
- ((uint32_t)thsc->kpthsc_effective_latency_qos << 29)
- | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26)
- | ((uint32_t)thsc->kpthsc_requested_qos_ipc_override << 23)
- | ((uint32_t)thsc->kpthsc_requested_qos_sync_ipc_override << 20)
- );
+ | thsc->kpthsc_sched_priority,
+ ((thsc->kpthsc_state & 0xff) << 24)
+ | (thsc->kpthsc_effective_qos << 6)
+ | (thsc->kpthsc_requested_qos << 3)
+ | thsc->kpthsc_requested_qos_override,
+ ((uint32_t)thsc->kpthsc_effective_latency_qos << 29)
+ | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26)
+ | ((uint32_t)thsc->kpthsc_requested_qos_kevent_override << 23)
+ );
BUF_DATA(PERF_TI_SCHEDDATA3_32, UPPER_32(thsc->kpthsc_runnable_time),
- LOWER_32(thsc->kpthsc_runnable_time));
+ LOWER_32(thsc->kpthsc_runnable_time));
#endif /* defined(__LP64__) */
}
void
kperf_thread_snapshot_sample(struct kperf_thread_snapshot *thsn,
- struct kperf_context *context)
+ struct kperf_context *context)
{
assert(thsn != NULL);
assert(context != NULL);
}
thsn->kpthsn_suspend_count = thread->suspend_count;
- thsn->kpthsn_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
+ /*
+ * Only have room for 8-bits in the trace event, so truncate here.
+ */
+ thsn->kpthsn_io_tier = (uint8_t)proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
BUF_VERB(PERF_TI_SNAPSAMPLE | DBG_FUNC_END);
}
assert(thsn != NULL);
#if defined(__LP64__)
BUF_DATA(PERF_TI_SNAPDATA, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8)
- | (thsn->kpthsn_io_tier << 24),
- thsn->kpthsn_last_made_runnable_time);
+ | (thsn->kpthsn_io_tier << 24),
+ thsn->kpthsn_last_made_runnable_time);
#else
BUF_DATA(PERF_TI_SNAPDATA_32, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8)
- | (thsn->kpthsn_io_tier << 24),
- UPPER_32(thsn->kpthsn_last_made_runnable_time),
- LOWER_32(thsn->kpthsn_last_made_runnable_time));
+ | (thsn->kpthsn_io_tier << 24),
+ UPPER_32(thsn->kpthsn_last_made_runnable_time),
+ LOWER_32(thsn->kpthsn_last_made_runnable_time));
#endif /* defined(__LP64__) */
}
void
kperf_thread_dispatch_sample(struct kperf_thread_dispatch *thdi,
- struct kperf_context *context)
+ struct kperf_context *context)
{
assert(thdi != NULL);
assert(context != NULL);
uint64_t user_dq_addr;
if ((copyin((user_addr_t)user_dq_key_addr,
- (char *)&user_dq_addr,
- user_addr_size) != 0) ||
- (user_dq_addr == 0))
- {
+ (char *)&user_dq_addr,
+ user_addr_size) != 0) ||
+ (user_dq_addr == 0)) {
goto error;
}
uint64_t user_dq_serialno_addr =
- user_dq_addr + get_task_dispatchqueue_serialno_offset(task);
+ user_dq_addr + get_task_dispatchqueue_serialno_offset(task);
if (copyin((user_addr_t)user_dq_serialno_addr,
- (char *)&(thdi->kpthdi_dq_serialno),
- user_addr_size) == 0)
- {
+ (char *)&(thdi->kpthdi_dq_serialno),
+ user_addr_size) == 0) {
goto out;
}
}
int
-kperf_thread_dispatch_pend(struct kperf_context *context)
+kperf_thread_dispatch_pend(struct kperf_context *context,
+ unsigned int actionid)
{
- return kperf_ast_pend(context->cur_thread, T_KPERF_AST_DISPATCH);
+ return kperf_ast_pend(context->cur_thread, T_KPERF_AST_DISPATCH,
+ actionid);
}
void
BUF_DATA(PERF_TI_DISPDATA, thdi->kpthdi_dq_serialno);
#else
BUF_DATA(PERF_TI_DISPDATA_32, UPPER_32(thdi->kpthdi_dq_serialno),
- LOWER_32(thdi->kpthdi_dq_serialno));
+ LOWER_32(thdi->kpthdi_dq_serialno));
#endif /* defined(__LP64__) */
}
return;
}
- uint64_t counts[MT_CORE_NFIXED];
-
- int ret = mt_fixed_thread_counts(cur_thread, counts);
- if (ret) {
- return;
- }
+ uint64_t counts[MT_CORE_NFIXED] = { 0 };
+ mt_cur_thread_fixed_counts(counts);
#if defined(__LP64__)
BUF_DATA(PERF_TI_INSCYCDATA, counts[MT_CORE_INSTRS], counts[MT_CORE_CYCLES]);
#else /* defined(__LP64__) */
/* 32-bit platforms don't count instructions */
BUF_DATA(PERF_TI_INSCYCDATA_32, 0, 0, UPPER_32(counts[MT_CORE_CYCLES]),
- LOWER_32(counts[MT_CORE_CYCLES]));
+ LOWER_32(counts[MT_CORE_CYCLES]));
#endif /* !defined(__LP64__) */
#else
#pragma unused(context)
#endif /* MONOTONIC */
-
}