]>
Commit | Line | Data |
---|---|---|
cb323159 A |
1 | /* Copyright (c) 2018 Apple Inc. All rights reserved. */ |
2 | ||
5ba3f43e A |
3 | #include <darwintest.h> |
4 | #include <inttypes.h> | |
5 | #include <stdint.h> | |
cb323159 | 6 | #include <sys/sysctl.h> |
5ba3f43e A |
7 | |
8 | #include <kperf/kpc.h> | |
9 | ||
cb323159 A |
10 | T_GLOBAL_META( |
11 | T_META_NAMESPACE("xnu.ktrace"), | |
12 | T_META_ASROOT(true), | |
13 | T_META_CHECK_LEAKS(false)); | |
5ba3f43e A |
14 | |
15 | T_DECL(fixed_thread_counters, | |
cb323159 | 16 | "test that fixed thread counters return monotonically increasing values") |
5ba3f43e | 17 | { |
cb323159 | 18 | |
5ba3f43e A |
19 | int err; |
20 | uint32_t ctrs_cnt; | |
21 | uint64_t *ctrs_a; | |
22 | uint64_t *ctrs_b; | |
23 | ||
24 | T_SETUPBEGIN; | |
25 | ||
26 | ctrs_cnt = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); | |
27 | if (ctrs_cnt == 0) { | |
28 | T_SKIP("no fixed counters available"); | |
29 | } | |
30 | T_LOG("device has %" PRIu32 " fixed counters", ctrs_cnt); | |
31 | ||
32 | T_QUIET; T_ASSERT_POSIX_SUCCESS(kpc_force_all_ctrs_set(1), NULL); | |
33 | T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_FIXED_MASK), | |
0a7de745 | 34 | "kpc_set_counting"); |
5ba3f43e | 35 | T_ASSERT_POSIX_SUCCESS(kpc_set_thread_counting(KPC_CLASS_FIXED_MASK), |
0a7de745 | 36 | "kpc_set_thread_counting"); |
5ba3f43e A |
37 | |
38 | T_SETUPEND; | |
39 | ||
40 | ctrs_a = malloc(ctrs_cnt * sizeof(uint64_t)); | |
41 | T_QUIET; T_ASSERT_NOTNULL(ctrs_a, NULL); | |
42 | ||
43 | err = kpc_get_thread_counters(0, ctrs_cnt, ctrs_a); | |
44 | T_ASSERT_POSIX_SUCCESS(err, "kpc_get_thread_counters"); | |
45 | ||
46 | for (uint32_t i = 0; i < ctrs_cnt; i++) { | |
47 | T_LOG("checking counter %d with value %" PRIu64 " > 0", i, ctrs_a[i]); | |
48 | T_QUIET; | |
49 | T_EXPECT_GT(ctrs_a[i], UINT64_C(0), "counter %d is non-zero", i); | |
50 | } | |
51 | ||
52 | ctrs_b = malloc(ctrs_cnt * sizeof(uint64_t)); | |
53 | T_QUIET; T_ASSERT_NOTNULL(ctrs_b, NULL); | |
54 | ||
55 | err = kpc_get_thread_counters(0, ctrs_cnt, ctrs_b); | |
56 | T_ASSERT_POSIX_SUCCESS(err, "kpc_get_thread_counters"); | |
57 | ||
58 | for (uint32_t i = 0; i < ctrs_cnt; i++) { | |
59 | T_LOG("checking counter %d with value %" PRIu64 | |
0a7de745 | 60 | " > previous value %" PRIu64, i, ctrs_b[i], ctrs_a[i]); |
5ba3f43e A |
61 | T_QUIET; |
62 | T_EXPECT_GT(ctrs_b[i], UINT64_C(0), "counter %d is non-zero", i); | |
63 | T_QUIET; T_EXPECT_LT(ctrs_a[i], ctrs_b[i], | |
0a7de745 | 64 | "counter %d is increasing", i); |
5ba3f43e A |
65 | } |
66 | ||
67 | free(ctrs_a); | |
68 | free(ctrs_b); | |
69 | } | |
cb323159 A |
70 | |
71 | #if defined(__arm64__) | |
72 | /* | |
73 | * This policy only applies to arm64 devices. | |
74 | */ | |
75 | ||
76 | static int g_prev_disablewl = 0; | |
77 | ||
78 | static void | |
79 | whitelist_atend(void) | |
80 | { | |
81 | int ret = sysctlbyname("kpc.disable_whitelist", NULL, NULL, | |
82 | &g_prev_disablewl, sizeof(g_prev_disablewl)); | |
83 | if (ret < 0) { | |
84 | T_LOG("failed to reset whitelist: %d (%s)", errno, strerror(errno)); | |
85 | } | |
86 | } | |
87 | ||
88 | T_DECL(whitelist, "ensure kpc's whitelist is filled out") | |
89 | { | |
90 | /* Start enforcing the whitelist. */ | |
91 | int set = 0; | |
92 | size_t getsz = sizeof(g_prev_disablewl); | |
93 | int ret = sysctlbyname("kpc.disable_whitelist", &g_prev_disablewl, &getsz, | |
94 | &set, sizeof(set)); | |
95 | if (ret < 0 && errno == ENOENT) { | |
96 | T_SKIP("kpc not running with a whitelist, or RELEASE kernel"); | |
97 | } | |
98 | ||
99 | T_ASSERT_POSIX_SUCCESS(ret, "started enforcing the event whitelist"); | |
100 | T_ATEND(whitelist_atend); | |
101 | ||
102 | uint32_t nconfigs = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK); | |
103 | uint64_t *config = calloc(nconfigs, sizeof(*config)); | |
104 | ||
105 | /* | |
106 | * Check that events in the whitelist are allowed. CORE_CYCLE (0x2) is | |
107 | * always present in the whitelist. | |
108 | */ | |
109 | config[0] = 0x02; | |
110 | ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config); | |
111 | T_ASSERT_POSIX_SUCCESS(ret, "configured kpc to count cycles"); | |
112 | ||
113 | /* Check that non-event bits are ignored by the whitelist. */ | |
114 | config[0] = 0x102; | |
115 | ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config); | |
116 | T_ASSERT_POSIX_SUCCESS(ret, | |
117 | "configured kpc to count cycles with non-event bits set"); | |
118 | ||
119 | /* Check that configurations of non-whitelisted events fail. */ | |
120 | config[0] = 0xfe; | |
121 | ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config); | |
122 | T_ASSERT_POSIX_FAILURE(ret, EPERM, | |
123 | "shouldn't allow arbitrary events with whitelist enabled"); | |
124 | ||
125 | /* Clean up the configuration. */ | |
126 | config[0] = 0; | |
127 | (void)kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config); | |
128 | ||
129 | free(config); | |
130 | } | |
131 | ||
132 | #endif /* defined(__arm64__) */ |