*/
spin:
start_gen = atomic_load_explicit(&thread->t_monotonic.mth_gen,
- memory_order_acquire);
+ memory_order_acquire);
retry:
if (start_gen & 1) {
spins++;
* again.
*/
end_gen = atomic_load_explicit(&thread->t_monotonic.mth_gen,
- memory_order_acquire);
+ memory_order_acquire);
if (end_gen != start_gen) {
retries++;
if (retries > MAXRETRIES) {
* even.
*/
__assert_only uint64_t enter_gen = atomic_fetch_add_explicit(
- &thread->t_monotonic.mth_gen, 1, memory_order_release);
+ &thread->t_monotonic.mth_gen, 1, memory_order_release);
/*
* Should not have pre-empted a modification to the counts.
*/
* before and after reading don't match.
*/
__assert_only uint64_t exit_gen = atomic_fetch_add_explicit(
- &thread->t_monotonic.mth_gen, 1, memory_order_release);
+ &thread->t_monotonic.mth_gen, 1, memory_order_release);
/*
* Make sure no other writers came through behind us.
*/
KDBG_RELEASE(MT_KDBG_IC_CPU_CSWITCH,
#ifdef MT_CORE_INSTRS
- mtc->mtc_counts[MT_CORE_INSTRS],
+ mtc->mtc_counts[MT_CORE_INSTRS],
#else /* defined(MT_CORE_INSTRS) */
- 0,
+ 0,
#endif /* !defined(MT_CORE_INSTRS) */
- mtc->mtc_counts[MT_CORE_CYCLES]);
+ mtc->mtc_counts[MT_CORE_CYCLES]);
}
}
assert(task != TASK_NULL);
assert(counts_out != NULL);
- uint64_t counts[MT_CORE_NFIXED];
if (!mt_core_supported) {
- for (int i = 0; i < MT_CORE_NFIXED; i++) {
- counts[i] = 0;
- }
- return 0;
+ memset(counts_out, 0, sizeof(*counts_out) * MT_CORE_NFIXED);
+ return 1;
}
task_lock(task);
+ uint64_t counts[MT_CORE_NFIXED] = { 0 };
for (int i = 0; i < MT_CORE_NFIXED; i++) {
counts[i] = task->task_monotonic.mtk_counts[i];
}
- uint64_t thread_counts[MT_CORE_NFIXED] = {};
+ uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
thread_t thread = THREAD_NULL;
thread_t curthread = current_thread();
bool needs_current = false;
if (snap < mtc->mtc_snaps[ctr]) {
if (mt_debug) {
kprintf("monotonic: cpu %d: thread %#llx: "
- "retrograde counter %u value: %llu, last read = %llu\n",
- cpu_number(), thread_tid(current_thread()), ctr, snap,
- mtc->mtc_snaps[ctr]);
+ "retrograde counter %u value: %llu, last read = %llu\n",
+ cpu_number(), thread_tid(current_thread()), ctr, snap,
+ mtc->mtc_snaps[ctr]);
}
(void)atomic_fetch_add_explicit(&mt_retrograde, 1,
- memory_order_relaxed);
+ memory_order_relaxed);
mtc->mtc_snaps[ctr] = snap;
return 0;
}
void
mt_mtc_update_fixed_counts(struct mt_cpu *mtc, uint64_t *counts,
- uint64_t *counts_since)
+ uint64_t *counts_since)
{
if (!mt_core_supported) {
return;
mt_cur_thread_fixed_counts(uint64_t *counts)
{
if (!mt_core_supported) {
- for (int i = 0; i < MT_CORE_NFIXED; i++) {
- counts[i] = 0;
- }
+ memset(counts, 0, sizeof(*counts) * MT_CORE_NFIXED);
return;
}
int
mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn handler,
- void *ctx)
+ void *ctx)
{
assert(ctr < MT_CORE_NFIXED);
int error = mt_microstackshot_start_arch(period);
if (error) {
+ mt_microstackshot_ctr = 0;
+ mt_microstackshot_pmi_handler = NULL;
+ mt_microstackshot_ctx = NULL;
return error;
}
return 0;
}
-