]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Must come before including darwintest.h | |
3 | */ | |
4 | #ifdef T_NAMESPACE | |
5 | #undef T_NAMESPACE | |
6 | #endif /* defined(T_NAMESPACE) */ | |
7 | ||
8 | #include <darwintest.h> | |
9 | #include <fcntl.h> | |
10 | #include <inttypes.h> | |
11 | #ifndef PRIVATE | |
12 | /* | |
13 | * Need new CPU families. | |
14 | */ | |
15 | #define PRIVATE | |
16 | #include <mach/machine.h> | |
17 | #undef PRIVATE | |
18 | #else /* !defined(PRIVATE) */ | |
19 | #include <mach/machine.h> | |
20 | #endif /* defined(PRIVATE) */ | |
21 | #include <ktrace.h> | |
22 | #include <mach/mach.h> | |
23 | #include <stdint.h> | |
24 | #include <System/sys/guarded.h> | |
25 | #include <System/sys/monotonic.h> | |
26 | #include <sys/ioctl.h> | |
27 | #include <sys/kdebug.h> | |
28 | #include <sys/sysctl.h> | |
29 | #include <unistd.h> | |
30 | ||
31 | T_GLOBAL_META( | |
32 | T_META_NAMESPACE("xnu.monotonic"), | |
33 | T_META_CHECK_LEAKS(false) | |
34 | ); | |
35 | ||
36 | static void | |
37 | skip_if_unsupported(void) | |
38 | { | |
39 | int r; | |
40 | int supported = 0; | |
41 | size_t supported_size = sizeof(supported); | |
42 | ||
43 | r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size, | |
44 | NULL, 0); | |
45 | if (r < 0) { | |
46 | T_WITH_ERRNO; | |
47 | T_SKIP("could not find \"kern.monotonic.supported\" sysctl"); | |
48 | } | |
49 | ||
50 | if (!supported) { | |
51 | T_SKIP("monotonic is not supported on this platform"); | |
52 | } | |
53 | } | |
54 | ||
55 | static void | |
56 | check_fixed_counts(uint64_t counts[2][2]) | |
57 | { | |
58 | T_QUIET; | |
59 | T_EXPECT_GT(counts[0][0], UINT64_C(0), "instructions are larger than 0"); | |
60 | T_QUIET; | |
61 | T_EXPECT_GT(counts[0][1], UINT64_C(0), "cycles are larger than 0"); | |
62 | ||
63 | T_EXPECT_GT(counts[1][0], counts[0][0], "instructions increase monotonically"); | |
64 | T_EXPECT_GT(counts[1][1], counts[0][1], "cycles increase monotonically"); | |
65 | } | |
66 | ||
67 | T_DECL(core_fixed_thread_self, "check the current thread's fixed counters", | |
68 | T_META_ASROOT(true)) | |
69 | { | |
70 | int err; | |
71 | extern int thread_selfcounts(int type, void *buf, size_t nbytes); | |
72 | uint64_t counts[2][2]; | |
73 | ||
74 | T_SETUPBEGIN; | |
75 | skip_if_unsupported(); | |
76 | T_SETUPEND; | |
77 | ||
78 | err = thread_selfcounts(1, &counts[0], sizeof(counts[0])); | |
79 | T_ASSERT_POSIX_ZERO(err, "thread_selfcounts"); | |
80 | err = thread_selfcounts(1, &counts[1], sizeof(counts[1])); | |
81 | T_ASSERT_POSIX_ZERO(err, "thread_selfcounts"); | |
82 | ||
83 | check_fixed_counts(counts); | |
84 | } | |
85 | ||
86 | T_DECL(core_fixed_task, "check that task counting is working", | |
87 | T_META_ASROOT(true)) | |
88 | { | |
89 | task_t task = mach_task_self(); | |
90 | kern_return_t kr; | |
91 | mach_msg_type_number_t size = TASK_INSPECT_BASIC_COUNTS_COUNT; | |
92 | uint64_t counts[2][2]; | |
93 | ||
94 | skip_if_unsupported(); | |
95 | ||
96 | kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS, | |
97 | (task_inspect_info_t)&counts[0], &size); | |
98 | T_ASSERT_MACH_SUCCESS(kr, | |
99 | "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)"); | |
100 | ||
101 | size = TASK_INSPECT_BASIC_COUNTS_COUNT; | |
102 | kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS, | |
103 | (task_inspect_info_t)&counts[1], &size); | |
104 | T_ASSERT_MACH_SUCCESS(kr, | |
105 | "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)"); | |
106 | ||
107 | check_fixed_counts(counts); | |
108 | } | |
109 | ||
110 | T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work", | |
111 | T_META_ASROOT(true)) | |
112 | { | |
113 | __block bool saw_events = false; | |
114 | ktrace_session_t s; | |
115 | int r; | |
116 | int set = 1; | |
117 | ||
118 | T_SETUPBEGIN; | |
119 | skip_if_unsupported(); | |
120 | ||
121 | s = ktrace_session_create(); | |
122 | T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); | |
123 | ||
124 | ktrace_events_single_paired(s, | |
125 | KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_TMPCPU, 0x3fff), | |
126 | ^(struct trace_point *start, struct trace_point *end) | |
127 | { | |
128 | uint64_t counts[2][2]; | |
129 | ||
130 | saw_events = true; | |
131 | ||
132 | counts[0][0] = start->arg1; | |
133 | counts[0][1] = start->arg2; | |
134 | counts[1][0] = end->arg1; | |
135 | counts[1][1] = end->arg2; | |
136 | ||
137 | check_fixed_counts(counts); | |
138 | }); | |
139 | ||
140 | ktrace_set_completion_handler(s, ^{ | |
141 | T_ASSERT_TRUE(saw_events, "should see monotonic kdebug events"); | |
142 | T_END; | |
143 | }); | |
144 | T_SETUPEND; | |
145 | ||
146 | T_ASSERT_POSIX_ZERO(ktrace_start(s, | |
147 | dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), NULL); | |
148 | ||
149 | r = sysctlbyname("kern.monotonic.kdebug_test", NULL, NULL, &set, | |
150 | sizeof(set)); | |
151 | T_ASSERT_POSIX_SUCCESS(r, | |
152 | "sysctlbyname(\"kern.monotonic.kdebug_test\", ...)"); | |
153 | ||
154 | ktrace_end(s, 0); | |
155 | dispatch_main(); | |
156 | } | |
157 | ||
158 | static void * | |
159 | spin_thread_self_counts(__unused void *arg) | |
160 | { | |
161 | extern int thread_selfcounts(int, void *, size_t); | |
162 | uint64_t counts[2] = { 0 }; | |
163 | while (true) { | |
164 | (void)thread_selfcounts(1, &counts, sizeof(counts)); | |
165 | } | |
166 | } | |
167 | ||
168 | static void * | |
169 | spin_task_inspect(__unused void *arg) | |
170 | { | |
171 | task_t task = mach_task_self(); | |
172 | uint64_t counts[2] = { 0 }; | |
173 | unsigned int size = 0; | |
174 | while (true) { | |
175 | size = (unsigned int)sizeof(counts); | |
176 | (void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS, | |
177 | (task_inspect_info_t)&counts[0], &size); | |
178 | /* | |
179 | * Not realistic for a process to see count values with the high bit | |
180 | * set, but kernel pointers will be that high. | |
181 | */ | |
182 | T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63, | |
183 | "check for valid count entry 1"); | |
184 | T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63, | |
185 | "check for valid count entry 2"); | |
186 | } | |
187 | } | |
188 | ||
189 | T_DECL(core_fixed_stack_leak_race, | |
190 | "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS") | |
191 | { | |
192 | T_SETUPBEGIN; | |
193 | ||
194 | int ncpus = 0; | |
195 | T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus, | |
196 | &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs"); | |
197 | T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs"); | |
198 | pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads)); | |
199 | ||
200 | T_QUIET; T_ASSERT_NOTNULL(threads, "allocated space for threads"); | |
201 | ||
202 | T_LOG("creating %d threads to attempt to race around task counts", ncpus); | |
203 | /* | |
204 | * Have half the threads hammering thread_self_counts and the other half | |
205 | * trying to get an error to occur inside TASK_INSPECT_BASIC_COUNTS and see | |
206 | * uninitialized kernel memory. | |
207 | */ | |
208 | for (int i = 0; i < ncpus; i++) { | |
209 | T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL, | |
210 | i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL), | |
211 | NULL); | |
212 | } | |
213 | ||
214 | T_SETUPEND; | |
215 | ||
216 | sleep(10); | |
217 | T_PASS("ending test after 10 seconds"); | |
218 | } | |
219 | ||
220 | static void | |
221 | perf_sysctl_deltas(const char *sysctl_name, const char *stat_name) | |
222 | { | |
223 | uint64_t deltas[2]; | |
224 | size_t deltas_size; | |
225 | int r; | |
226 | ||
227 | T_SETUPBEGIN; | |
228 | skip_if_unsupported(); | |
229 | ||
230 | dt_stat_t instrs = dt_stat_create("instructions", "%s_instrs", | |
231 | stat_name); | |
232 | dt_stat_t cycles = dt_stat_create("cycles", "%s_cycles", stat_name); | |
233 | T_SETUPEND; | |
234 | ||
235 | while (!dt_stat_stable(instrs) || !dt_stat_stable(cycles)) { | |
236 | deltas_size = sizeof(deltas); | |
237 | r = sysctlbyname(sysctl_name, deltas, &deltas_size, NULL, 0); | |
238 | T_QUIET; | |
239 | T_ASSERT_POSIX_SUCCESS(r, "sysctlbyname(\"%s\", ...)", sysctl_name); | |
240 | dt_stat_add(instrs, (double)deltas[0]); | |
241 | dt_stat_add(cycles, (double)deltas[1]); | |
242 | } | |
243 | ||
244 | dt_stat_finalize(instrs); | |
245 | dt_stat_finalize(cycles); | |
246 | } | |
247 | ||
248 | T_DECL(perf_core_fixed_cpu, "test the performance of fixed CPU counter access", | |
249 | T_META_ASROOT(true), T_META_TAG_PERF) | |
250 | { | |
251 | perf_sysctl_deltas("kern.monotonic.fixed_cpu_perf", "fixed_cpu_counters"); | |
252 | } | |
253 | ||
254 | T_DECL(perf_core_fixed_thread, "test the performance of fixed thread counter access", | |
255 | T_META_ASROOT(true), T_META_TAG_PERF) | |
256 | { | |
257 | perf_sysctl_deltas("kern.monotonic.fixed_thread_perf", | |
258 | "fixed_thread_counters"); | |
259 | } | |
260 | ||
261 | T_DECL(perf_core_fixed_task, "test the performance of fixed task counter access", | |
262 | T_META_ASROOT(true), T_META_TAG_PERF) | |
263 | { | |
264 | perf_sysctl_deltas("kern.monotonic.fixed_task_perf", "fixed_task_counters"); | |
265 | } | |
266 | ||
267 | T_DECL(perf_core_fixed_thread_self, "test the performance of thread self counts", | |
268 | T_META_TAG_PERF) | |
269 | { | |
270 | extern int thread_selfcounts(int type, void *buf, size_t nbytes); | |
271 | uint64_t counts[2][2]; | |
272 | ||
273 | T_SETUPBEGIN; | |
274 | dt_stat_t instrs = dt_stat_create("fixed_thread_self_instrs", "instructions"); | |
275 | dt_stat_t cycles = dt_stat_create("fixed_thread_self_cycles", "cycles"); | |
276 | ||
277 | skip_if_unsupported(); | |
278 | T_SETUPEND; | |
279 | ||
280 | while (!dt_stat_stable(instrs) || !dt_stat_stable(cycles)) { | |
281 | int r1, r2; | |
282 | ||
283 | r1 = thread_selfcounts(1, &counts[0], sizeof(counts[0])); | |
284 | r2 = thread_selfcounts(1, &counts[1], sizeof(counts[1])); | |
285 | T_QUIET; T_ASSERT_POSIX_ZERO(r1, "__thread_selfcounts"); | |
286 | T_QUIET; T_ASSERT_POSIX_ZERO(r2, "__thread_selfcounts"); | |
287 | ||
288 | T_QUIET; T_ASSERT_GT(counts[1][0], counts[0][0], | |
289 | "instructions increase monotonically"); | |
290 | dt_stat_add(instrs, counts[1][0] - counts[0][0]); | |
291 | ||
292 | T_QUIET; T_ASSERT_GT(counts[1][1], counts[0][1], | |
293 | "cycles increase monotonically"); | |
294 | dt_stat_add(cycles, counts[1][1] - counts[0][1]); | |
295 | } | |
296 | ||
297 | dt_stat_finalize(instrs); | |
298 | dt_stat_finalize(cycles); | |
299 | } |