]> git.saurik.com Git - apple/xnu.git/blob - tests/monotonic_uncore.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / tests / monotonic_uncore.c
1 /*
2 * Must come before including darwintest.h
3 */
4 #ifdef T_NAMESPACE
5 #undef T_NAMESPACE
6 #endif /* defined(T_NAMESPACE) */
7
8 #include <darwintest.h>
9 #include <fcntl.h>
10 #include <inttypes.h>
11 #ifndef PRIVATE
12 /*
13 * Need new CPU families.
14 */
15 #define PRIVATE
16 #include <mach/machine.h>
17 #undef PRIVATE
18 #else /* !defined(PRIVATE) */
19 #include <mach/machine.h>
20 #endif /* defined(PRIVATE) */
21 #include <stdint.h>
22 #include <System/sys/guarded.h>
23 #include <System/sys/monotonic.h>
24 #include <sys/ioctl.h>
25 #include <sys/sysctl.h>
26 #include <unistd.h>
27
28 T_GLOBAL_META(
29 T_META_NAMESPACE("xnu.monotonic"),
30 T_META_CHECK_LEAKS(false),
31 T_META_ENABLED(false)
32 );
33
34 static bool
35 device_supports_uncore(void)
36 {
37 int r;
38 int type, subtype;
39 unsigned int family;
40 size_t size = sizeof(type);
41
42 /*
43 * Only arm64 Monsoon devices support uncore counters.
44 */
45
46 r = sysctlbyname("hw.cputype", &type, &size, NULL, 0);
47 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "sysctlbyname(\"hw.cputype\")");
48 r = sysctlbyname("hw.cpusubtype", &subtype, &size, NULL, 0);
49 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "sysctlbyname(\"hw.cpusubtype\")");
50 r = sysctlbyname("hw.cpufamily", &family, &size, NULL, 0);
51 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "sysctlbyname(\"hw.cpufamily\")");
52
53 if (type == CPU_TYPE_ARM64 &&
54 subtype == CPU_SUBTYPE_ARM64_V8 &&
55 (family == CPUFAMILY_ARM_MONSOON_MISTRAL ||
56 family == CPUFAMILY_ARM_VORTEX_TEMPEST)) {
57 return true;
58 }
59
60 return false;
61 }
62
63 #define UNCORE_DEV_PATH "/dev/monotonic/uncore"
64
65 static int
66 open_uncore_error(int *error)
67 {
68 guardid_t guard;
69 int fd;
70
71 guard = 0xa5adcafe;
72
73 T_SETUPBEGIN;
74
75 fd = guarded_open_np(UNCORE_DEV_PATH, &guard,
76 GUARD_CLOSE | GUARD_DUP | GUARD_WRITE, O_CLOEXEC | O_EXCL);
77 if (fd < 0 && errno == ENOENT) {
78 T_ASSERT_FALSE(device_supports_uncore(),
79 "lack of dev node implies no uncore support");
80 T_SKIP("uncore counters are unsupported");
81 __builtin_unreachable();
82 }
83
84 if (error == NULL) {
85 T_ASSERT_POSIX_SUCCESS(fd, "open '%s'", UNCORE_DEV_PATH);
86 } else {
87 *error = errno;
88 }
89
90 T_SETUPEND;
91
92 return fd;
93 }
94
95 static void
96 uncore_counts(int fd, uint64_t ctr_mask, uint64_t *counts)
97 {
98 int r;
99 union monotonic_ctl_counts *cts_ctl;
100
101 cts_ctl = (union monotonic_ctl_counts *)counts;
102 cts_ctl->in.ctr_mask = ctr_mask;
103
104 r = ioctl(fd, MT_IOC_COUNTS, cts_ctl);
105 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "MT_IOC_COUNTS got counter values");
106 }
107
108 #define REF_TIMEBASE_EVENT 0x3
109 #define CTRS_MAX 32
110
111 T_DECL(uncore_max_counters,
112 "ensure that the maximum number of uncore countes is sane",
113 T_META_ASROOT(true))
114 {
115 int nctrs = 0;
116 int fd;
117
118 fd = open_uncore_error(NULL);
119
120 do {
121 union monotonic_ctl_add add_ctl;
122 int r;
123
124 add_ctl.in.config.event = REF_TIMEBASE_EVENT;
125 add_ctl.in.config.allowed_ctr_mask = UINT64_MAX;
126
127 r = ioctl(fd, MT_IOC_ADD, &add_ctl);
128 if (r < 0 && errno == E2BIG) {
129 break;
130 }
131
132 T_QUIET;
133 T_ASSERT_POSIX_SUCCESS(r, "added reference timebase event to counters");
134 nctrs++;
135 } while (nctrs < CTRS_MAX);
136
137 T_EXPECT_LT(nctrs, CTRS_MAX,
138 "only able to allocate a reasonable number of counters");
139 }
140
141 static uint32_t
142 uncore_add(int fd, uint64_t event, uint64_t allowed_ctrs, int error)
143 {
144 int save_errno;
145 int r;
146 uint32_t ctr;
147 union monotonic_ctl_add add_ctl;
148
149 add_ctl.in.config.event = event;
150 add_ctl.in.config.allowed_ctr_mask = allowed_ctrs;
151 r = ioctl(fd, MT_IOC_ADD, &add_ctl);
152 if (error) {
153 save_errno = errno;
154 T_EXPECT_LT(r, 0, "adding event to counter should fail");
155 T_EXPECT_EQ(save_errno, error,
156 "adding event to counter should fail with %d: %s",
157 error, strerror(error));
158 return UINT32_MAX;
159 } else {
160 T_QUIET;
161 T_ASSERT_POSIX_SUCCESS(r,
162 "added event %#" PRIx64 " to counters", event);
163 }
164
165 ctr = add_ctl.out.ctr;
166 T_QUIET; T_ASSERT_LT(ctr, (uint32_t)CTRS_MAX, "counter returned should be sane");
167 return ctr;
168 }
169
170 T_DECL(uncore_collision,
171 "ensure that trying to add an event on the same counter fails",
172 T_META_ASROOT(true))
173 {
174 int fd;
175 uint32_t ctr;
176
177 fd = open_uncore_error(NULL);
178
179 ctr = uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_MAX, 0);
180 T_LOG("added event to uncore counter %d\n", ctr);
181
182 (void)uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_C(1) << ctr, ENOSPC);
183 }
184
185 static void
186 uncore_enable(int fd)
187 {
188 union monotonic_ctl_enable en_ctl = {
189 .in = { .enable = true }
190 };
191
192 T_ASSERT_POSIX_SUCCESS(ioctl(fd, MT_IOC_ENABLE, &en_ctl),
193 "enabling counters");
194 }
195
196 T_DECL(uncore_enabled_busy,
197 "ensure that trying to add an event while enabled fails",
198 T_META_ASROOT(true))
199 {
200 int fd;
201
202 fd = open_uncore_error(NULL);
203
204 (void)uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_MAX, 0);
205
206 uncore_enable(fd);
207 (void)uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_MAX, EBUSY);
208 }
209
210 T_DECL(uncore_reset,
211 "ensure that resetting the counters works")
212 {
213 int fd;
214 int r;
215
216 fd = open_uncore_error(NULL);
217
218 (void)uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_C(1), 0);
219 (void)uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_C(1), ENOSPC);
220
221 r = ioctl(fd, MT_IOC_RESET);
222 T_ASSERT_POSIX_SUCCESS(r, "resetting succeeds");
223
224 T_LOG("adding event to same counter after reset");
225 (void)uncore_add(fd, REF_TIMEBASE_EVENT, UINT64_C(1), 0);
226 }
227
228 #define SLEEP_USECS (500 * 1000)
229
230 static int
231 uncore_add_all(int fd, uint64_t event, int *nmonitors)
232 {
233 int nctrs = 0;
234 int r;
235
236 do {
237 union monotonic_ctl_add add_ctl;
238
239 add_ctl.in.config.event = event;
240 add_ctl.in.config.allowed_ctr_mask = UINT64_MAX;
241
242 r = ioctl(fd, MT_IOC_ADD, &add_ctl);
243 if (r < 0 && errno == E2BIG) {
244 break;
245 }
246
247 T_QUIET;
248 T_ASSERT_POSIX_SUCCESS(r, "added event %#" PRIx64 " to counters",
249 event);
250 nctrs++;
251 } while (nctrs < CTRS_MAX);
252
253 if (nmonitors) {
254 union monotonic_ctl_info info_ctl;
255 r = ioctl(fd, MT_IOC_GET_INFO, &info_ctl);
256 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "got info about uncore counters");
257
258 *nmonitors = (int)info_ctl.out.nmonitors;
259 }
260
261 return nctrs;
262 }
263
264 T_DECL(uncore_accuracy,
265 "ensure that the uncore counters count accurately",
266 T_META_ASROOT(true))
267 {
268 int fd;
269 int nctrs = 0;
270 int nmonitors = 0;
271 uint64_t ctr_mask;
272 uint64_t counts[2][CTRS_MAX];
273 uint64_t times[2];
274
275 fd = open_uncore_error(NULL);
276
277 /*
278 * The reference timebase event counts the same as mach_continuous_time
279 * (on hardware supporting uncore counters). Make sure that the counter
280 * is close to the values returned from the trap.
281 *
282 * Fill all the counters with this event.
283 */
284 nctrs = uncore_add_all(fd, REF_TIMEBASE_EVENT, &nmonitors);
285 ctr_mask = (UINT64_C(1) << nctrs) - 1;
286
287 T_LOG("added %d counters to check", nctrs);
288
289 uncore_enable(fd);
290
291 /*
292 * First, make sure there's an upper bound on the counter -- take the
293 * time around getting the counter values.
294 */
295
296 times[0] = mach_absolute_time();
297 uncore_counts(fd, ctr_mask, counts[0]);
298
299 usleep(SLEEP_USECS);
300
301 uncore_counts(fd, ctr_mask, counts[1]);
302 times[1] = mach_absolute_time();
303
304 T_QUIET; T_EXPECT_GT(times[1], times[0],
305 "mach_continuous_time is monotonically increasing");
306 for (int i = 0; i < nctrs; i++) {
307 T_EXPECT_GT(counts[1][i], counts[0][i],
308 "uncore counter %d value is monotonically increasing", i);
309 T_EXPECT_LT(counts[1][i] - counts[0][i], times[1] - times[0],
310 "reference timebase on uncore counter %d satisfies upper bound "
311 "from mach_absolute_time", i);
312 }
313
314 /*
315 * Next, the lower bound -- put mach_absolute_time inside getting the
316 * counter values.
317 */
318
319 uncore_counts(fd, ctr_mask, counts[0]);
320 times[0] = mach_absolute_time();
321
322 volatile int iterations = 100000;
323 while (iterations--) {
324 ;
325 }
326
327 times[1] = mach_absolute_time();
328 uncore_counts(fd, ctr_mask, counts[1]);
329
330 for (int mon = 0; mon < nmonitors; mon++) {
331 for (int i = 0; i < nctrs; i++) {
332 T_QUIET;
333 T_EXPECT_GT(counts[1][i * mon], counts[0][i * mon],
334 "uncore %d counter %d value is monotonically increasing",
335 mon, i);
336 T_EXPECT_GT(counts[1][i * mon] - counts[0][i * mon],
337 times[1] - times[0],
338 "reference timebase on uncore %d counter %d satisfies "
339 "lower bound from mach_absolute_time", mon, i);
340 }
341 }
342 }
343
344 T_DECL(uncore_ownership,
345 "ensure the dev node cannot be open in two places",
346 T_META_ASROOT(true))
347 {
348 int fd;
349 int other_fd;
350 int error;
351
352 fd = open_uncore_error(NULL);
353
354 other_fd = open_uncore_error(&error);
355 T_ASSERT_LT(other_fd, 0, "opening a second uncore fd should fail");
356 T_ASSERT_EQ(error, EBUSY, "failure should be EBUSY");
357 }
358
359 T_DECL(uncore_root_required,
360 "ensure the dev node cannot be opened by non-root users",
361 T_META_ASROOT(false))
362 {
363 int fd;
364 int error = 0;
365
366 T_SKIP("libdarwintest doesn't drop privileges properly");
367
368 fd = open_uncore_error(&error);
369 T_ASSERT_LT(fd, 0, "opening dev node should not return an fd");
370 T_ASSERT_EQ(error, EPERM,
371 "opening dev node as non-root user should fail with EPERM");
372 }
373
374 T_DECL(perf_uncore,
375 "measure the latency of accessing the counters",
376 T_META_TAG_PERF)
377 {
378 int fd;
379 int nctrs;
380 int nmonitors;
381 int r;
382 uint64_t ctr_mask;
383 dt_stat_thread_instructions_t counts_instrs;
384 dt_stat_t counter_deltas;
385
386 counts_instrs = dt_stat_thread_instructions_create("ioctl_counts");
387 counter_deltas = dt_stat_create("abs_time", "between_each_counter");
388
389 fd = open_uncore_error(NULL);
390
391 nctrs = uncore_add_all(fd, REF_TIMEBASE_EVENT, &nmonitors);
392 ctr_mask = (UINT64_C(1) << nctrs) - 1;
393
394 uncore_enable(fd);
395
396 do {
397 dt_stat_token token;
398 uint64_t counts[nctrs * nmonitors];
399 union monotonic_ctl_counts *cts_ctl;
400
401 cts_ctl = (union monotonic_ctl_counts *)counts;
402 cts_ctl->in.ctr_mask = ctr_mask;
403
404 token = dt_stat_thread_instructions_begin(counts_instrs);
405 r = ioctl(fd, MT_IOC_COUNTS, cts_ctl);
406 dt_stat_thread_instructions_end(counts_instrs, token);
407 T_QUIET;
408 T_ASSERT_POSIX_SUCCESS(r,
409 "getting uncore counter values %#" PRIx64, ctr_mask);
410
411 for (int i = 0; i < (nctrs - 1); i++) {
412 dt_stat_add(counter_deltas, (double)(counts[i + 1] - counts[i]));
413 }
414 } while (!dt_stat_stable(counts_instrs) || !dt_stat_stable(counter_deltas));
415
416 dt_stat_finalize(counts_instrs);
417 dt_stat_finalize(counter_deltas);
418 }