]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kern_monotonic.c
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/assert.h>
30 #include <kern/monotonic.h>
31 #include <kern/thread.h>
32 #include <machine/atomic.h>
33 #include <machine/monotonic.h>
34 #include <mach/mach_traps.h>
35 #include <stdatomic.h>
36 #include <sys/errno.h>
38 bool mt_debug
= false;
39 _Atomic
uint64_t mt_pmis
= 0;
40 _Atomic
uint64_t mt_retrograde
= 0;
42 #define MT_KDBG_INSTRS_CYCLES(CODE) \
43 KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_INSTRS_CYCLES, CODE)
45 #define MT_KDBG_IC_CPU_CSWITCH MT_KDBG_INSTRS_CYCLES(1)
48 * Updating the thread counters takes place in the context switch path, so it
49 * cannot introduce too much overhead. Thus, updating takes no locks, instead
50 * updating a generation count to an odd value to indicate that it's in the
51 * critical section and that readers should wait until the generation count
52 * returns to an even value.
54 * Reading the counters also needs to not see any "torn" states of the counters,
55 * where a few of the counters are from a previous state and the rest are from
56 * the current state. For this reason, the reader redrives the entire read
57 * operation if it sees mismatching generation counts at the beginning and end
65 mt_fixed_thread_counts(thread_t thread
, uint64_t *counts_out
)
67 uint64_t start_gen
, end_gen
;
68 uint64_t spins
= 0, retries
= 0;
69 uint64_t counts
[MT_CORE_NFIXED
];
72 * Try to read a thread's counter values by ensuring its gen count is
73 * even. If it's odd, it means that a thread is trying to update its
76 * Spin until the gen count is even.
79 start_gen
= atomic_load_explicit(&thread
->t_monotonic
.mth_gen
,
80 memory_order_acquire
);
84 if (spins
> MAXSPINS
) {
90 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
91 counts
[i
] = thread
->t_monotonic
.mth_counts
[i
];
95 * After reading the counters, check the gen count again. If it is
96 * different from the value that we started with, the thread raced
97 * writing its counters with us reading them. We need to redrive the
100 * Go back to check if the value we just read was even and try to read
103 end_gen
= atomic_load_explicit(&thread
->t_monotonic
.mth_gen
,
104 memory_order_acquire
);
105 if (end_gen
!= start_gen
) {
107 if (retries
> MAXRETRIES
) {
115 * Only after getting a consistent snapshot of the counters should we
116 * write them into the provided buffer.
118 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
119 counts_out
[i
] = counts
[i
];
124 static void mt_fixed_counts_internal(uint64_t *counts
, uint64_t *counts_since
);
127 mt_update_thread(thread_t thread
)
129 if (!mt_core_supported
) {
133 assert(ml_get_interrupts_enabled() == FALSE
);
135 uint64_t counts
[MT_CORE_NFIXED
], counts_since
[MT_CORE_NFIXED
];
136 mt_fixed_counts_internal(counts
, counts_since
);
139 * Enter the update cycle by incrementing the gen count to be odd --
140 * this tells any readers to spin on the gen count, waiting for it to go
143 __assert_only
uint64_t enter_gen
= atomic_fetch_add_explicit(
144 &thread
->t_monotonic
.mth_gen
, 1, memory_order_release
);
146 * Should not have pre-empted a modification to the counts.
148 assert((enter_gen
& 1) == 0);
150 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
151 thread
->t_monotonic
.mth_counts
[i
] += counts_since
[i
];
155 * Exit the update by making the gen count even again. Readers check
156 * the gen count for equality, and will redrive the reads if the values
157 * before and after reading don't match.
159 __assert_only
uint64_t exit_gen
= atomic_fetch_add_explicit(
160 &thread
->t_monotonic
.mth_gen
, 1, memory_order_release
);
162 * Make sure no other writers came through behind us.
164 assert(exit_gen
== (enter_gen
+ 1));
170 mt_sched_update(thread_t thread
)
172 bool updated
= mt_update_thread(thread
);
177 if (kdebug_debugid_explicitly_enabled(MT_KDBG_IC_CPU_CSWITCH
)) {
178 struct mt_cpu
*mtc
= mt_cur_cpu();
180 KDBG_RELEASE(MT_KDBG_IC_CPU_CSWITCH
,
181 #ifdef MT_CORE_INSTRS
182 mtc
->mtc_counts
[MT_CORE_INSTRS
],
183 #else /* defined(MT_CORE_INSTRS) */
185 #endif /* !defined(MT_CORE_INSTRS) */
186 mtc
->mtc_counts
[MT_CORE_CYCLES
]);
191 mt_fixed_task_counts(task_t task
, uint64_t *counts_out
)
193 assert(task
!= TASK_NULL
);
194 assert(counts_out
!= NULL
);
196 uint64_t counts
[MT_CORE_NFIXED
];
197 if (!mt_core_supported
) {
198 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
206 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
207 counts
[i
] = task
->task_monotonic
.mtk_counts
[i
];
210 uint64_t thread_counts
[MT_CORE_NFIXED
] = {};
211 thread_t thread
= THREAD_NULL
;
212 thread_t curthread
= current_thread();
213 bool needs_current
= false;
215 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
217 * Get the current thread's counters after doing this
218 * processing, without holding the task lock.
220 if (thread
== curthread
) {
221 needs_current
= true;
224 r
= mt_fixed_thread_counts(thread
, thread_counts
);
230 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
231 counts
[i
] += thread_counts
[i
];
238 mt_cur_thread_fixed_counts(thread_counts
);
241 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
243 counts
[i
] += thread_counts
[i
];
245 counts_out
[i
] = counts
[i
];
255 mt_mtc_update_count(struct mt_cpu
*mtc
, unsigned int ctr
)
257 uint64_t snap
= mt_core_snap(ctr
);
258 if (snap
< mtc
->mtc_snaps
[ctr
]) {
260 kprintf("monotonic: cpu %d: thread %#llx: "
261 "retrograde counter %u value: %llu, last read = %llu\n",
262 cpu_number(), thread_tid(current_thread()), ctr
, snap
,
263 mtc
->mtc_snaps
[ctr
]);
265 (void)atomic_fetch_add_explicit(&mt_retrograde
, 1,
266 memory_order_relaxed
);
267 mtc
->mtc_snaps
[ctr
] = snap
;
271 uint64_t count
= snap
- mtc
->mtc_snaps
[ctr
];
272 mtc
->mtc_snaps
[ctr
] = snap
;
278 mt_cpu_update_count(cpu_data_t
*cpu
, unsigned int ctr
)
280 return mt_mtc_update_count(&cpu
->cpu_monotonic
, ctr
);
284 mt_fixed_counts_internal(uint64_t *counts
, uint64_t *counts_since
)
286 assert(ml_get_interrupts_enabled() == FALSE
);
288 struct mt_cpu
*mtc
= mt_cur_cpu();
291 mt_mtc_update_fixed_counts(mtc
, counts
, counts_since
);
295 mt_mtc_update_fixed_counts(struct mt_cpu
*mtc
, uint64_t *counts
,
296 uint64_t *counts_since
)
298 if (!mt_core_supported
) {
302 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
306 last_delta
= mt_mtc_update_count(mtc
, i
);
307 count
= mtc
->mtc_counts
[i
] + last_delta
;
313 assert(counts
!= NULL
);
314 counts_since
[i
] = count
- mtc
->mtc_counts_last
[i
];
315 mtc
->mtc_counts_last
[i
] = count
;
318 mtc
->mtc_counts
[i
] = count
;
323 mt_update_fixed_counts(void)
325 assert(ml_get_interrupts_enabled() == FALSE
);
327 #if defined(__x86_64__)
328 __builtin_ia32_lfence();
329 #elif defined(__arm__) || defined(__arm64__)
330 __builtin_arm_isb(ISB_SY
);
331 #endif /* !defined(__x86_64__) && (defined(__arm__) || defined(__arm64__)) */
333 mt_fixed_counts_internal(NULL
, NULL
);
337 mt_fixed_counts(uint64_t *counts
)
339 #if defined(__x86_64__)
340 __builtin_ia32_lfence();
341 #elif defined(__arm__) || defined(__arm64__)
342 __builtin_arm_isb(ISB_SY
);
343 #endif /* !defined(__x86_64__) && (defined(__arm__) || defined(__arm64__)) */
345 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
346 mt_fixed_counts_internal(counts
, NULL
);
347 ml_set_interrupts_enabled(intrs_en
);
351 mt_cur_thread_fixed_counts(uint64_t *counts
)
353 if (!mt_core_supported
) {
354 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
360 thread_t curthread
= current_thread();
361 int intrs_en
= ml_set_interrupts_enabled(FALSE
);
362 (void)mt_update_thread(curthread
);
363 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
364 counts
[i
] = curthread
->t_monotonic
.mth_counts
[i
];
366 ml_set_interrupts_enabled(intrs_en
);
370 mt_cur_task_fixed_counts(uint64_t *counts
)
372 task_t curtask
= current_task();
374 mt_fixed_task_counts(curtask
, counts
);
377 /* FIXME these should only update the counter that is being accessed */
380 mt_cur_thread_instrs(void)
382 #ifdef MT_CORE_INSTRS
383 thread_t curthread
= current_thread();
387 if (!mt_core_supported
) {
391 intrs_en
= ml_set_interrupts_enabled(FALSE
);
392 (void)mt_update_thread(curthread
);
393 count
= curthread
->t_monotonic
.mth_counts
[MT_CORE_INSTRS
];
394 ml_set_interrupts_enabled(intrs_en
);
397 #else /* defined(MT_CORE_INSTRS) */
399 #endif /* !defined(MT_CORE_INSTRS) */
403 mt_cur_thread_cycles(void)
405 thread_t curthread
= current_thread();
409 if (!mt_core_supported
) {
413 intrs_en
= ml_set_interrupts_enabled(FALSE
);
414 (void)mt_update_thread(curthread
);
415 count
= curthread
->t_monotonic
.mth_counts
[MT_CORE_CYCLES
];
416 ml_set_interrupts_enabled(intrs_en
);
422 mt_cur_cpu_instrs(void)
424 #ifdef MT_CORE_INSTRS
425 uint64_t counts
[MT_CORE_NFIXED
];
427 if (!mt_core_supported
) {
431 mt_fixed_counts(counts
);
432 return counts
[MT_CORE_INSTRS
];
433 #else /* defined(MT_CORE_INSTRS) */
435 #endif /* !defined(MT_CORE_INSTRS) */
439 mt_cur_cpu_cycles(void)
441 uint64_t counts
[MT_CORE_NFIXED
];
443 if (!mt_core_supported
) {
447 mt_fixed_counts(counts
);
448 return counts
[MT_CORE_CYCLES
];
452 mt_update_task(task_t task
, thread_t thread
)
454 task_lock_assert_owned(task
);
456 if (!mt_core_supported
) {
460 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
461 task
->task_monotonic
.mtk_counts
[i
] += thread
->t_monotonic
.mth_counts
[i
];
466 mt_terminate_update(task_t task
, thread_t thread
)
468 mt_update_task(task
, thread
);
472 mt_perfcontrol(uint64_t *instrs
, uint64_t *cycles
)
474 if (!mt_core_supported
) {
480 struct mt_cpu
*mtc
= mt_cur_cpu();
483 * The performance controller queries the hardware directly, so provide the
484 * last snapshot we took for the core. This is the value from when we
485 * updated the thread counts.
488 #ifdef MT_CORE_INSTRS
489 *instrs
= mtc
->mtc_snaps
[MT_CORE_INSTRS
];
490 #else /* defined(MT_CORE_INSTRS) */
492 #endif /* !defined(MT_CORE_INSTRS) */
494 *cycles
= mtc
->mtc_snaps
[MT_CORE_CYCLES
];
498 mt_stackshot_thread(thread_t thread
, uint64_t *instrs
, uint64_t *cycles
)
500 assert(mt_core_supported
);
502 #ifdef MT_CORE_INSTRS
503 *instrs
= thread
->t_monotonic
.mth_counts
[MT_CORE_INSTRS
];
504 #else /* defined(MT_CORE_INSTRS) */
506 #endif /* !defined(MT_CORE_INSTRS) */
508 *cycles
= thread
->t_monotonic
.mth_counts
[MT_CORE_CYCLES
];
512 mt_stackshot_task(task_t task
, uint64_t *instrs
, uint64_t *cycles
)
514 assert(mt_core_supported
);
516 #ifdef MT_CORE_INSTRS
517 *instrs
= task
->task_monotonic
.mtk_counts
[MT_CORE_INSTRS
];
518 #else /* defined(MT_CORE_INSTRS) */
520 #endif /* !defined(MT_CORE_INSTRS) */
522 *cycles
= task
->task_monotonic
.mtk_counts
[MT_CORE_CYCLES
];