1 /* * Copyright (c) 2020 Apple Inc. All rights reserved.
3 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 * This file contains Original Code and/or Modifications of Original Code
6 * as defined in and that are subject to the Apple Public Source License
7 * Version 2.0 (the 'License'). You may not use this file except in
8 * compliance with the License. The rights granted to you under the License
9 * may not be used to create, or enable the creation or redistribution of,
10 * unlawful or unlicensed copies of an Apple operating system, or to
11 * circumvent, violate, or enable the circumvention or violation of, any
12 * terms of an Apple operating system software license agreement.
14 * Please obtain a copy of the License at
15 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 * The Original Code and all software distributed under the License are
18 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 * Please see the License for the specific language governing rights and
23 * limitations under the License.
25 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* sysctl interface for testing percpu counters in DEBUG or DEVELOPMENT kernel only. */
29 #if !(DEVELOPMENT || DEBUG)
30 #error "Counter testing is not enabled on RELEASE configurations"
33 #include <sys/sysctl.h>
34 #include <kern/counter.h>
35 #include <machine/atomic.h>
36 #include <libkern/libkern.h>
37 #include <machine/machine_routines.h>
38 #include <kern/cpu_data.h>
43 #include <tests/xnupost.h>
44 #endif /* CONFIG_XNUPOST */
46 static _Atomic boolean_t scalable_counter_test_running
= FALSE
;
47 scalable_counter_t test_scalable_counter
;
49 SCALABLE_COUNTER_DEFINE(test_static_scalable_counter
);
52 kern_return_t
counter_tests(void);
54 * Sanity test that a counter can be modified before zalloc is initialized.
57 bump_static_counter(void* arg
)
60 counter_inc(&test_static_scalable_counter
);
63 STARTUP_ARG(PMAP_STEAL
, STARTUP_RANK_MIDDLE
, bump_static_counter
, NULL
);
68 T_ASSERT_EQ_ULLONG(counter_load(&test_static_scalable_counter
), 1, "Counter was incremented");
71 #endif /* CONFIG_XNUPOST */
74 sysctl_scalable_counter_test_start SYSCTL_HANDLER_ARGS
76 #pragma unused(oidp, arg1, arg2)
80 error
= sysctl_io_number(req
, ret_val
, sizeof(int), &ret_val
, NULL
);
81 if (error
|| !req
->newptr
) {
84 /* The test doesn't support being run multiple times in parallel. */
85 exclusive
= os_atomic_cmpxchg(&scalable_counter_test_running
, FALSE
, TRUE
, seq_cst
);
87 os_log(OS_LOG_DEFAULT
, "scalable_counter_test: Caught attempt to run the test in parallel.");
90 counter_alloc(&test_scalable_counter
);
95 sysctl_scalable_counter_test_finish SYSCTL_HANDLER_ARGS
97 #pragma unused(oidp, arg1, arg2)
101 error
= sysctl_io_number(req
, ret_val
, sizeof(int), &ret_val
, NULL
);
102 if (error
|| !req
->newptr
) {
106 /* The test doesn't support being run multiple times in parallel. */
107 exclusive
= os_atomic_cmpxchg(&scalable_counter_test_running
, TRUE
, FALSE
, seq_cst
);
109 /* Finish called without start. */
116 sysctl_scalable_counter_add SYSCTL_HANDLER_ARGS
118 #pragma unused(oidp, arg1, arg2)
121 if (!os_atomic_load(&scalable_counter_test_running
, seq_cst
)) {
122 /* Must call start */
125 error
= sysctl_io_number(req
, value
, sizeof(int64_t), &value
, NULL
);
126 if (error
|| !req
->newptr
) {
129 counter_add(&test_scalable_counter
, value
);
134 sysctl_static_scalable_counter_add SYSCTL_HANDLER_ARGS
136 #pragma unused(oidp, arg1, arg2)
139 if (!os_atomic_load(&scalable_counter_test_running
, seq_cst
)) {
140 /* Must call start */
143 error
= sysctl_io_number(req
, value
, sizeof(int64_t), &value
, NULL
);
144 if (error
|| !req
->newptr
) {
147 counter_add(&test_static_scalable_counter
, value
);
152 sysctl_scalable_counter_load SYSCTL_HANDLER_ARGS
154 #pragma unused(oidp, arg1, arg2)
156 if (!os_atomic_load(&scalable_counter_test_running
, seq_cst
)) {
157 /* Must call start */
160 value
= counter_load(&test_scalable_counter
);
161 return SYSCTL_OUT(req
, &value
, sizeof(value
));
165 sysctl_scalable_counter_write_benchmark SYSCTL_HANDLER_ARGS
167 #pragma unused(oidp, arg1, arg2)
171 if (!os_atomic_load(&scalable_counter_test_running
, seq_cst
)) {
172 /* Must call start */
175 error
= sysctl_io_number(req
, ret_val
, sizeof(int), &iterations
, NULL
);
176 if (error
|| !req
->newptr
) {
179 for (int64_t i
= 0; i
< iterations
; i
++) {
180 counter_inc(&test_scalable_counter
);
185 static volatile uint64_t racy_counter
;
188 sysctl_racy_counter_write_benchmark SYSCTL_HANDLER_ARGS
190 #pragma unused(oidp, arg1, arg2)
194 error
= sysctl_io_number(req
, ret_val
, sizeof(int), &iterations
, NULL
);
195 if (error
|| !req
->newptr
) {
198 for (int64_t i
= 0; i
< iterations
; i
++) {
205 sysctl_racy_counter_load SYSCTL_HANDLER_ARGS
207 #pragma unused(oidp, arg1, arg2)
208 uint64_t value
= racy_counter
;
209 return SYSCTL_OUT(req
, &value
, sizeof(value
));
212 static _Atomic
uint64_t atomic_counter
;
215 sysctl_atomic_counter_write_benchmark SYSCTL_HANDLER_ARGS
217 #pragma unused(oidp, arg1, arg2)
221 error
= sysctl_io_number(req
, ret_val
, sizeof(int), &iterations
, NULL
);
222 if (error
|| !req
->newptr
) {
225 for (int64_t i
= 0; i
< iterations
; i
++) {
226 os_atomic_add(&atomic_counter
, 1, relaxed
);
232 sysctl_atomic_counter_load SYSCTL_HANDLER_ARGS
234 #pragma unused(oidp, arg1, arg2)
235 uint64_t value
= os_atomic_load_wide(&atomic_counter
, relaxed
);
236 return SYSCTL_OUT(req
, &value
, sizeof(value
));
239 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_test_start
,
240 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
241 0, 0, sysctl_scalable_counter_test_start
, "I", "Setup per-cpu counter test");
243 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_test_finish
,
244 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
245 0, 0, sysctl_scalable_counter_test_finish
, "I", "Finish per-cpu counter test");
247 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_test_add
,
248 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
249 0, 0, sysctl_scalable_counter_add
, "I", "Perform an add on the per-cpu counter");
251 SYSCTL_PROC(_kern
, OID_AUTO
, static_scalable_counter_test_add
,
252 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
253 0, 0, sysctl_static_scalable_counter_add
, "I", "Perform an add on the static per-cpu counter");
255 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_test_load
,
256 CTLTYPE_QUAD
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
257 0, 0, sysctl_scalable_counter_load
, "I", "Load the current per-cpu counter value.");
259 SYSCTL_SCALABLE_COUNTER(_kern
, static_scalable_counter_test_load
,
260 test_static_scalable_counter
, "Load the current static per-cpu counter value.");
262 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_write_benchmark
,
263 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
264 0, 0, sysctl_scalable_counter_write_benchmark
, "I", "Per-cpu counter write benchmark");
266 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_racy_counter_benchmark
,
267 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
268 0, 0, sysctl_racy_counter_write_benchmark
, "I", "Global counter racy benchmark");
270 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_racy_counter_load
,
271 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
272 0, 0, sysctl_racy_counter_load
, "I", "Global counter racy load");
274 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_atomic_counter_write_benchmark
,
275 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
276 0, 0, sysctl_atomic_counter_write_benchmark
, "I", "Atomic counter write benchmark");
278 SYSCTL_PROC(_kern
, OID_AUTO
, scalable_counter_atomic_counter_load
,
279 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MASKED
| CTLFLAG_LOCKED
,
280 0, 0, sysctl_atomic_counter_load
, "I", "Atomic counter load");