]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/monotonic.h
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / monotonic.h
1 /*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef KERN_MONOTONIC_H
29 #define KERN_MONOTONIC_H
30
31 #if MONOTONIC
32
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <sys/cdefs.h>
36
37 __BEGIN_DECLS
38
39 extern bool mt_debug;
40 extern _Atomic uint64_t mt_pmis;
41 extern _Atomic uint64_t mt_retrograde;
42
43 void mt_fixed_counts(uint64_t *counts);
44 void mt_cur_thread_fixed_counts(uint64_t *counts);
45 void mt_cur_task_fixed_counts(uint64_t *counts);
46 uint64_t mt_cur_cpu_instrs(void);
47 uint64_t mt_cur_cpu_cycles(void);
48 uint64_t mt_cur_thread_instrs(void);
49 uint64_t mt_cur_thread_cycles(void);
50
51 __END_DECLS
52
53 #if MACH_KERNEL_PRIVATE
54
55 #include <kern/thread.h>
56 #include <kern/task.h>
57 #include <stdbool.h>
58
59 __BEGIN_DECLS
60
61 #if defined(__arm__) || defined(__arm64__)
62 #include <arm/cpu_data_internal.h>
63 #elif defined(__x86_64__)
64 #include <i386/cpu_data.h>
65 #else /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
66 #error unsupported architecture
67 #endif /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */
68
69 void mt_update_fixed_counts(void);
70 void mt_update_task(task_t task, thread_t thread);
71 bool mt_update_thread(thread_t thread);
72 int mt_fixed_thread_counts(thread_t thread, uint64_t *counts_out);
73 int mt_fixed_task_counts(task_t task, uint64_t *counts_out);
74
75 /*
76 * Private API for the platform layers.
77 */
78
79 /*
80 * Called once early in boot, before CPU initialization occurs (where
81 * `mt_cpu_up` is called).
82 *
83 * This allows monotonic to detect if the hardware supports performance counters
84 * and install the global PMI handler.
85 */
86 void mt_early_init(void);
87
88 /*
89 * Called when a core is idling and exiting from idle.
90 */
91 void mt_cpu_idle(cpu_data_t *cpu);
92 void mt_cpu_run(cpu_data_t *cpu);
93
94 /*
95 * Called when a core is shutting down or powering up.
96 */
97 void mt_cpu_down(cpu_data_t *cpu);
98 void mt_cpu_up(cpu_data_t *cpu);
99
100 /*
101 * Called while single-threaded when the system is going to sleep.
102 */
103 void mt_sleep(void);
104
105 /*
106 * Called on each CPU as the system is waking from sleep.
107 */
108 void mt_wake_per_core(void);
109
110 #if __ARM_CLUSTER_COUNT__
111 /*
112 * Called when a cluster is initialized.
113 */
114 void mt_cluster_init(void);
115 #endif /* __ARM_CLUSTER_COUNT__ */
116
117 /*
118 * "Up-call" to the Mach layer to update counters from a PMI.
119 */
120 uint64_t mt_cpu_update_count(cpu_data_t *cpu, unsigned int ctr);
121
122 /*
123 * Private API for the scheduler.
124 */
125
126 /*
127 * Called when a thread is switching off-core or expires its quantum.
128 */
129 void mt_sched_update(thread_t thread);
130
131 /*
132 * Called when a thread is terminating to save its counters into the task. The
133 * task lock must be held and the thread should be removed from the task's
134 * thread list in that same critical section.
135 */
136 void mt_terminate_update(task_t task, thread_t thread);
137
138 /*
139 * Private API for the performance controller callout.
140 */
141 void mt_perfcontrol(uint64_t *instrs, uint64_t *cycles);
142
143 /*
144 * Private API for stackshot.
145 */
146 void mt_stackshot_thread(thread_t thread, uint64_t *instrs, uint64_t *cycles);
147 void mt_stackshot_task(task_t task, uint64_t *instrs, uint64_t *cycles);
148
149 /*
150 * Private API for microstackshot.
151 */
152 typedef void (*mt_pmi_fn)(bool user_mode, void *ctx);
153 int mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn fn,
154 void *ctx);
155 int mt_microstackshot_stop(void);
156
157 __END_DECLS
158
159 #endif /* MACH_KERNEL_PRIVATE */
160
161 #endif /* MONOTONIC */
162
163 #endif /* !defined(KERN_MONOTONIC_H) */