]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/kern_monotonic.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / kern / kern_monotonic.c
index 0c9d825e93be769e61f4fa5ab60d35e45877c503..9837913e1175ad45288b17dcfa781dba4e6a82c2 100644 (file)
@@ -83,7 +83,7 @@ mt_fixed_thread_counts(thread_t thread, uint64_t *counts_out)
         */
 spin:
        start_gen = atomic_load_explicit(&thread->t_monotonic.mth_gen,
-                       memory_order_acquire);
+           memory_order_acquire);
 retry:
        if (start_gen & 1) {
                spins++;
@@ -107,7 +107,7 @@ retry:
         * again.
         */
        end_gen = atomic_load_explicit(&thread->t_monotonic.mth_gen,
-                       memory_order_acquire);
+           memory_order_acquire);
        if (end_gen != start_gen) {
                retries++;
                if (retries > MAXRETRIES) {
@@ -147,7 +147,7 @@ mt_update_thread(thread_t thread)
         * even.
         */
        __assert_only uint64_t enter_gen = atomic_fetch_add_explicit(
-                       &thread->t_monotonic.mth_gen, 1, memory_order_release);
+               &thread->t_monotonic.mth_gen, 1, memory_order_release);
        /*
         * Should not have pre-empted a modification to the counts.
         */
@@ -163,7 +163,7 @@ mt_update_thread(thread_t thread)
         * before and after reading don't match.
         */
        __assert_only uint64_t exit_gen = atomic_fetch_add_explicit(
-                       &thread->t_monotonic.mth_gen, 1, memory_order_release);
+               &thread->t_monotonic.mth_gen, 1, memory_order_release);
        /*
         * Make sure no other writers came through behind us.
         */
@@ -185,11 +185,11 @@ mt_sched_update(thread_t thread)
 
                KDBG_RELEASE(MT_KDBG_IC_CPU_CSWITCH,
 #ifdef MT_CORE_INSTRS
-                               mtc->mtc_counts[MT_CORE_INSTRS],
+                   mtc->mtc_counts[MT_CORE_INSTRS],
 #else /* defined(MT_CORE_INSTRS) */
-                               0,
+                   0,
 #endif /* !defined(MT_CORE_INSTRS) */
-                               mtc->mtc_counts[MT_CORE_CYCLES]);
+                   mtc->mtc_counts[MT_CORE_CYCLES]);
        }
 }
 
@@ -199,21 +199,19 @@ mt_fixed_task_counts(task_t task, uint64_t *counts_out)
        assert(task != TASK_NULL);
        assert(counts_out != NULL);
 
-       uint64_t counts[MT_CORE_NFIXED];
        if (!mt_core_supported) {
-               for (int i = 0; i < MT_CORE_NFIXED; i++) {
-                       counts[i] = 0;
-               }
-               return 0;
+               memset(counts_out, 0, sizeof(*counts_out) * MT_CORE_NFIXED);
+               return 1;
        }
 
        task_lock(task);
 
+       uint64_t counts[MT_CORE_NFIXED] = { 0 };
        for (int i = 0; i < MT_CORE_NFIXED; i++) {
                counts[i] = task->task_monotonic.mtk_counts[i];
        }
 
-       uint64_t thread_counts[MT_CORE_NFIXED] = {};
+       uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
        thread_t thread = THREAD_NULL;
        thread_t curthread = current_thread();
        bool needs_current = false;
@@ -264,12 +262,12 @@ mt_mtc_update_count(struct mt_cpu *mtc, unsigned int ctr)
        if (snap < mtc->mtc_snaps[ctr]) {
                if (mt_debug) {
                        kprintf("monotonic: cpu %d: thread %#llx: "
-                                       "retrograde counter %u value: %llu, last read = %llu\n",
-                                       cpu_number(), thread_tid(current_thread()), ctr, snap,
-                                       mtc->mtc_snaps[ctr]);
+                           "retrograde counter %u value: %llu, last read = %llu\n",
+                           cpu_number(), thread_tid(current_thread()), ctr, snap,
+                           mtc->mtc_snaps[ctr]);
                }
                (void)atomic_fetch_add_explicit(&mt_retrograde, 1,
-                               memory_order_relaxed);
+                   memory_order_relaxed);
                mtc->mtc_snaps[ctr] = snap;
                return 0;
        }
@@ -299,7 +297,7 @@ mt_fixed_counts_internal(uint64_t *counts, uint64_t *counts_since)
 
 void
 mt_mtc_update_fixed_counts(struct mt_cpu *mtc, uint64_t *counts,
-               uint64_t *counts_since)
+    uint64_t *counts_since)
 {
        if (!mt_core_supported) {
                return;
@@ -357,9 +355,7 @@ void
 mt_cur_thread_fixed_counts(uint64_t *counts)
 {
        if (!mt_core_supported) {
-               for (int i = 0; i < MT_CORE_NFIXED; i++) {
-                       counts[i] = 0;
-               }
+               memset(counts, 0, sizeof(*counts) * MT_CORE_NFIXED);
                return;
        }
 
@@ -544,7 +540,7 @@ uint64_t mt_core_reset_values[MT_CORE_NFIXED] = { 0 };
 
 int
 mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn handler,
-               void *ctx)
+    void *ctx)
 {
        assert(ctr < MT_CORE_NFIXED);
 
@@ -561,6 +557,9 @@ mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn handler,
 
        int error = mt_microstackshot_start_arch(period);
        if (error) {
+               mt_microstackshot_ctr = 0;
+               mt_microstackshot_pmi_handler = NULL;
+               mt_microstackshot_ctx = NULL;
                return error;
        }
 
@@ -577,4 +576,3 @@ mt_microstackshot_stop(void)
 
        return 0;
 }
-