]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/counter_test.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / counter_test.c
1 /* * Copyright (c) 2020 Apple Inc. All rights reserved.
2 *
3 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 *
5 * This file contains Original Code and/or Modifications of Original Code
6 * as defined in and that are subject to the Apple Public Source License
7 * Version 2.0 (the 'License'). You may not use this file except in
8 * compliance with the License. The rights granted to you under the License
9 * may not be used to create, or enable the creation or redistribution of,
10 * unlawful or unlicensed copies of an Apple operating system, or to
11 * circumvent, violate, or enable the circumvention or violation of, any
12 * terms of an Apple operating system software license agreement.
13 *
14 * Please obtain a copy of the License at
15 * http://www.opensource.apple.com/apsl/ and read it before using this file.
16 *
17 * The Original Code and all software distributed under the License are
18 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 * Please see the License for the specific language governing rights and
23 * limitations under the License.
24 *
25 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 */
27
28 /* sysctl interface for testing percpu counters in DEBUG or DEVELOPMENT kernel only. */
29 #if !(DEVELOPMENT || DEBUG)
30 #error "Counter testing is not enabled on RELEASE configurations"
31 #endif
32
33 #include <sys/sysctl.h>
34 #include <kern/counter.h>
35 #include <machine/atomic.h>
36 #include <libkern/libkern.h>
37 #include <machine/machine_routines.h>
38 #include <kern/cpu_data.h>
39
40 #include <os/log.h>
41
42 #ifdef CONFIG_XNUPOST
43 #include <tests/xnupost.h>
44 #endif /* CONFIG_XNUPOST */
45
46 static _Atomic boolean_t scalable_counter_test_running = FALSE;
47 scalable_counter_t test_scalable_counter;
48
49 SCALABLE_COUNTER_DEFINE(test_static_scalable_counter);
50
51 #ifdef CONFIG_XNUPOST
52 kern_return_t counter_tests(void);
53 /*
54 * Sanity test that a counter can be modified before zalloc is initialized.
55 */
56 static void
57 bump_static_counter(void* arg)
58 {
59 (void) arg;
60 counter_inc(&test_static_scalable_counter);
61 }
62
63 STARTUP_ARG(PMAP_STEAL, STARTUP_RANK_MIDDLE, bump_static_counter, NULL);
64
65 kern_return_t
66 counter_tests()
67 {
68 T_ASSERT_EQ_ULLONG(counter_load(&test_static_scalable_counter), 1, "Counter was incremented");
69 return KERN_SUCCESS;
70 }
71 #endif /* CONFIG_XNUPOST */
72
73 static int
74 sysctl_scalable_counter_test_start SYSCTL_HANDLER_ARGS
75 {
76 #pragma unused(oidp, arg1, arg2)
77 int ret_val = 1;
78 int error = 0;
79 boolean_t exclusive;
80 error = sysctl_io_number(req, ret_val, sizeof(int), &ret_val, NULL);
81 if (error || !req->newptr) {
82 return error;
83 }
84 /* The test doesn't support being run multiple times in parallel. */
85 exclusive = os_atomic_cmpxchg(&scalable_counter_test_running, FALSE, TRUE, seq_cst);
86 if (!exclusive) {
87 os_log(OS_LOG_DEFAULT, "scalable_counter_test: Caught attempt to run the test in parallel.");
88 return EINVAL;
89 }
90 counter_alloc(&test_scalable_counter);
91 return 0;
92 }
93
94 static int
95 sysctl_scalable_counter_test_finish SYSCTL_HANDLER_ARGS
96 {
97 #pragma unused(oidp, arg1, arg2)
98 boolean_t exclusive;
99 int ret_val = 0;
100 int error = 0;
101 error = sysctl_io_number(req, ret_val, sizeof(int), &ret_val, NULL);
102 if (error || !req->newptr) {
103 return error;
104 }
105
106 /* The test doesn't support being run multiple times in parallel. */
107 exclusive = os_atomic_cmpxchg(&scalable_counter_test_running, TRUE, FALSE, seq_cst);
108 if (!exclusive) {
109 /* Finish called without start. */
110 return EINVAL;
111 }
112 return 0;
113 }
114
115 static int
116 sysctl_scalable_counter_add SYSCTL_HANDLER_ARGS
117 {
118 #pragma unused(oidp, arg1, arg2)
119 int64_t value = 0;
120 int error = 0;
121 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
122 /* Must call start */
123 return EINVAL;
124 }
125 error = sysctl_io_number(req, value, sizeof(int64_t), &value, NULL);
126 if (error || !req->newptr) {
127 return error;
128 }
129 counter_add(&test_scalable_counter, value);
130 return 0;
131 }
132
133 static int
134 sysctl_static_scalable_counter_add SYSCTL_HANDLER_ARGS
135 {
136 #pragma unused(oidp, arg1, arg2)
137 int64_t value = 0;
138 int error = 0;
139 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
140 /* Must call start */
141 return EINVAL;
142 }
143 error = sysctl_io_number(req, value, sizeof(int64_t), &value, NULL);
144 if (error || !req->newptr) {
145 return error;
146 }
147 counter_add(&test_static_scalable_counter, value);
148 return 0;
149 }
150
151 static int
152 sysctl_scalable_counter_load SYSCTL_HANDLER_ARGS
153 {
154 #pragma unused(oidp, arg1, arg2)
155 uint64_t value;
156 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
157 /* Must call start */
158 return EINVAL;
159 }
160 value = counter_load(&test_scalable_counter);
161 return SYSCTL_OUT(req, &value, sizeof(value));
162 }
163
164 static int
165 sysctl_scalable_counter_write_benchmark SYSCTL_HANDLER_ARGS
166 {
167 #pragma unused(oidp, arg1, arg2)
168 int error;
169 int64_t iterations;
170 int ret_val = 0;
171 if (!os_atomic_load(&scalable_counter_test_running, seq_cst)) {
172 /* Must call start */
173 return EINVAL;
174 }
175 error = sysctl_io_number(req, ret_val, sizeof(int), &iterations, NULL);
176 if (error || !req->newptr) {
177 return error;
178 }
179 for (int64_t i = 0; i < iterations; i++) {
180 counter_inc(&test_scalable_counter);
181 }
182 return 0;
183 }
184
185 static volatile uint64_t racy_counter;
186
187 static int
188 sysctl_racy_counter_write_benchmark SYSCTL_HANDLER_ARGS
189 {
190 #pragma unused(oidp, arg1, arg2)
191 int error;
192 int64_t iterations;
193 int ret_val = 0;
194 error = sysctl_io_number(req, ret_val, sizeof(int), &iterations, NULL);
195 if (error || !req->newptr) {
196 return error;
197 }
198 for (int64_t i = 0; i < iterations; i++) {
199 racy_counter++;
200 }
201 return 0;
202 }
203
204 static int
205 sysctl_racy_counter_load SYSCTL_HANDLER_ARGS
206 {
207 #pragma unused(oidp, arg1, arg2)
208 uint64_t value = racy_counter;
209 return SYSCTL_OUT(req, &value, sizeof(value));
210 }
211
212 static _Atomic uint64_t atomic_counter;
213
214 static int
215 sysctl_atomic_counter_write_benchmark SYSCTL_HANDLER_ARGS
216 {
217 #pragma unused(oidp, arg1, arg2)
218 int error;
219 int64_t iterations;
220 int ret_val = 0;
221 error = sysctl_io_number(req, ret_val, sizeof(int), &iterations, NULL);
222 if (error || !req->newptr) {
223 return error;
224 }
225 for (int64_t i = 0; i < iterations; i++) {
226 os_atomic_add(&atomic_counter, 1, relaxed);
227 }
228 return 0;
229 }
230
231 static int
232 sysctl_atomic_counter_load SYSCTL_HANDLER_ARGS
233 {
234 #pragma unused(oidp, arg1, arg2)
235 uint64_t value = os_atomic_load_wide(&atomic_counter, relaxed);
236 return SYSCTL_OUT(req, &value, sizeof(value));
237 }
238
239 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_test_start,
240 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
241 0, 0, sysctl_scalable_counter_test_start, "I", "Setup per-cpu counter test");
242
243 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_test_finish,
244 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
245 0, 0, sysctl_scalable_counter_test_finish, "I", "Finish per-cpu counter test");
246
247 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_test_add,
248 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
249 0, 0, sysctl_scalable_counter_add, "I", "Perform an add on the per-cpu counter");
250
251 SYSCTL_PROC(_kern, OID_AUTO, static_scalable_counter_test_add,
252 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
253 0, 0, sysctl_static_scalable_counter_add, "I", "Perform an add on the static per-cpu counter");
254
255 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_test_load,
256 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
257 0, 0, sysctl_scalable_counter_load, "I", "Load the current per-cpu counter value.");
258
259 SYSCTL_SCALABLE_COUNTER(_kern, static_scalable_counter_test_load,
260 test_static_scalable_counter, "Load the current static per-cpu counter value.");
261
262 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_write_benchmark,
263 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
264 0, 0, sysctl_scalable_counter_write_benchmark, "I", "Per-cpu counter write benchmark");
265
266 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_racy_counter_benchmark,
267 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
268 0, 0, sysctl_racy_counter_write_benchmark, "I", "Global counter racy benchmark");
269
270 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_racy_counter_load,
271 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
272 0, 0, sysctl_racy_counter_load, "I", "Global counter racy load");
273
274 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_atomic_counter_write_benchmark,
275 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
276 0, 0, sysctl_atomic_counter_write_benchmark, "I", "Atomic counter write benchmark");
277
278 SYSCTL_PROC(_kern, OID_AUTO, scalable_counter_atomic_counter_load,
279 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED,
280 0, 0, sysctl_atomic_counter_load, "I", "Atomic counter load");