]> git.saurik.com Git - apple/xnu.git/blob - osfmk/tests/vfp_state_test.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / tests / vfp_state_test.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if !(DEVELOPMENT || DEBUG)
30 #error "Testing is not enabled on RELEASE configurations"
31 #endif
32
33 #include <tests/xnupost.h>
34 #include <kern/kalloc.h>
35 #include <kern/clock.h>
36 #include <kern/thread.h>
37 #include <sys/random.h>
38
39 #define VFP_STATE_TEST_N_THREADS 4
40 #define VFP_STATE_TEST_N_REGS 8
41 #define VFP_STATE_TEST_N_ITER 100
42 #define VFP_STATE_TEST_DELAY_USEC 10000
43 #if __arm__
44 #define VFP_STATE_TEST_NZCV_SHIFT 28
45 #define VFP_STATE_TEST_NZCV_MAX 16
46 #else
47 #define VFP_STATE_TEST_RMODE_STRIDE_SHIFT 20
48 #define VFP_STATE_TEST_RMODE_STRIDE_MAX 16
49 #endif
50
51 #if __ARM_VFP__
52 extern kern_return_t vfp_state_test(void);
53
54 const uint64_t vfp_state_test_regs[VFP_STATE_TEST_N_REGS] = {
55 0x6a4cac4427ab5658, 0x51200e9ebbe0c9d1,
56 0xa94d20c2bbe367bc, 0xfee45035460927db,
57 0x64f3f1f7e93d019f, 0x02a625f02b890a40,
58 0xf5e42399d8480de8, 0xc38cdde520908d6b,
59 };
60
61 struct vfp_state_test_args {
62 uint64_t vfp_reg_rand;
63 #if __arm__
64 uint32_t fp_control_mask;
65 #else
66 uint64_t fp_control_mask;
67 #endif
68 int result;
69 int *start_barrier;
70 int *end_barrier;
71 };
72
73 static void
74 wait_threads(
75 int* var,
76 int num)
77 {
78 if (var != NULL) {
79 while (os_atomic_load(var, acquire) != num) {
80 assert_wait((event_t) var, THREAD_UNINT);
81 if (os_atomic_load(var, acquire) != num) {
82 (void) thread_block(THREAD_CONTINUE_NULL);
83 } else {
84 clear_wait(current_thread(), THREAD_AWAKENED);
85 }
86 }
87 }
88 }
89
90 static void
91 wake_threads(
92 int* var)
93 {
94 if (var) {
95 os_atomic_inc(var, relaxed);
96 thread_wakeup((event_t) var);
97 }
98 }
99
100 static void
101 vfp_state_test_thread_routine(void *args, __unused wait_result_t wr)
102 {
103 struct vfp_state_test_args *vfp_state_test_args = (struct vfp_state_test_args *)args;
104 uint64_t *vfp_regs, *vfp_regs_expected;
105 int retval;
106 #if __arm__
107 uint32_t fp_control, fp_control_expected;
108 #else
109 uint64_t fp_control, fp_control_expected;
110 #endif
111
112 vfp_state_test_args->result = -1;
113
114 /* Allocate memory to store expected and actual VFP register values */
115 vfp_regs = kalloc(sizeof(vfp_state_test_regs));
116 if (vfp_regs == NULL) {
117 goto vfp_state_thread_kalloc1_failure;
118 }
119
120 vfp_regs_expected = kalloc(sizeof(vfp_state_test_regs));
121 if (vfp_regs_expected == NULL) {
122 goto vfp_state_thread_kalloc2_failure;
123 }
124
125 /* Preload VFP registers with unique, per-thread patterns */
126 bcopy(vfp_state_test_regs, vfp_regs_expected, sizeof(vfp_state_test_regs));
127 for (int i = 0; i < VFP_STATE_TEST_N_REGS; i++) {
128 vfp_regs_expected[i] ^= vfp_state_test_args->vfp_reg_rand;
129 }
130
131 #if __arm__
132 asm volatile ("vldr d8, [%0, #0] \t\n vldr d9, [%0, #8] \t\n\
133 vldr d10, [%0, #16] \t\n vldr d11, [%0, #24] \t\n\
134 vldr d12, [%0, #32] \t\n vldr d13, [%0, #40] \t\n\
135 vldr d14, [%0, #48] \t\n vldr d15, [%0, #56]" \
136 : : "r"(vfp_regs_expected) : \
137 "memory", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
138
139 /*
140 * Set FPSCR to a known value, so we can validate the save/restore path.
141 * Only touch NZCV flags, since 1) writing them does not have visible side-effects
142 * and 2) they're only set by the CPU as a result of executing an FP comparison,
143 * which do not exist in this function.
144 */
145 asm volatile ("fmrx %0, fpscr" : "=r"(fp_control_expected));
146 fp_control_expected |= vfp_state_test_args->fp_control_mask;
147 asm volatile ("fmxr fpscr, %0" : : "r"(fp_control_expected));
148 #else
149 asm volatile ("ldr d8, [%0, #0] \t\n ldr d9, [%0, #8] \t\n\
150 ldr d10, [%0, #16] \t\n ldr d11, [%0, #24] \t\n\
151 ldr d12, [%0, #32] \t\n ldr d13, [%0, #40] \t\n\
152 ldr d14, [%0, #48] \t\n ldr d15, [%0, #56]" \
153 : : "r"(vfp_regs_expected) : \
154 "memory", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
155
156 asm volatile ("mrs %0, fpcr" : "=r"(fp_control_expected));
157 fp_control_expected |= vfp_state_test_args->fp_control_mask;
158 asm volatile ("msr fpcr, %0" : : "r"(fp_control_expected));
159 #endif
160
161 /* Make sure all threads start at roughly the same time */
162 wake_threads(vfp_state_test_args->start_barrier);
163 wait_threads(vfp_state_test_args->start_barrier, VFP_STATE_TEST_N_THREADS);
164
165 /* Check VFP registers against expected values, and go to sleep */
166 for (int i = 0; i < VFP_STATE_TEST_N_ITER; i++) {
167 bzero(vfp_regs, sizeof(vfp_state_test_regs));
168
169 #if __arm__
170 asm volatile ("vstr d8, [%0, #0] \t\n vstr d9, [%0, #8] \t\n\
171 vstr d10, [%0, #16] \t\n vstr d11, [%0, #24] \t\n\
172 vstr d12, [%0, #32] \t\n vstr d13, [%0, #40] \t\n\
173 vstr d14, [%0, #48] \t\n vstr d15, [%0, #56]" \
174 : : "r"(vfp_regs) : "memory");
175 asm volatile ("fmrx %0, fpscr" : "=r"(fp_control));
176 #else
177 asm volatile ("str d8, [%0, #0] \t\n str d9, [%0, #8] \t\n\
178 str d10, [%0, #16] \t\n str d11, [%0, #24] \t\n\
179 str d12, [%0, #32] \t\n str d13, [%0, #40] \t\n\
180 str d14, [%0, #48] \t\n str d15, [%0, #56]" \
181 : : "r"(vfp_regs) : "memory");
182 asm volatile ("mrs %0, fpcr" : "=r"(fp_control));
183 #endif
184
185 retval = bcmp(vfp_regs, vfp_regs_expected, sizeof(vfp_state_test_regs));
186 if ((retval != 0) || (fp_control != fp_control_expected)) {
187 goto vfp_state_thread_cmp_failure;
188 }
189
190 delay(VFP_STATE_TEST_DELAY_USEC);
191 }
192
193 vfp_state_test_args->result = 0;
194
195 vfp_state_thread_cmp_failure:
196 kfree(vfp_regs_expected, sizeof(vfp_state_test_regs));
197 vfp_state_thread_kalloc2_failure:
198 kfree(vfp_regs, sizeof(vfp_state_test_regs));
199 vfp_state_thread_kalloc1_failure:
200
201 /* Signal that the thread has finished, and terminate */
202 wake_threads(vfp_state_test_args->end_barrier);
203 thread_terminate_self();
204 }
205
206 /*
207 * This test spawns N threads that preload unique values into
208 * callee-saved VFP registers and then repeatedly check them
209 * for correctness after waking up from delay()
210 */
211 kern_return_t
212 vfp_state_test(void)
213 {
214 thread_t vfp_state_thread[VFP_STATE_TEST_N_THREADS];
215 struct vfp_state_test_args vfp_state_test_args[VFP_STATE_TEST_N_THREADS];
216 kern_return_t retval;
217 int start_barrier = 0, end_barrier = 0;
218
219 /* Spawn threads */
220 for (int i = 0; i < VFP_STATE_TEST_N_THREADS; i++) {
221 vfp_state_test_args[i].start_barrier = &start_barrier;
222 vfp_state_test_args[i].end_barrier = &end_barrier;
223 #if __arm__
224 vfp_state_test_args[i].fp_control_mask = (i % VFP_STATE_TEST_NZCV_MAX) << VFP_STATE_TEST_NZCV_SHIFT;
225 #else
226 vfp_state_test_args[i].fp_control_mask = (i % VFP_STATE_TEST_RMODE_STRIDE_MAX) << VFP_STATE_TEST_RMODE_STRIDE_SHIFT;
227 #endif
228 read_random(&vfp_state_test_args[i].vfp_reg_rand, sizeof(uint64_t));
229
230 retval = kernel_thread_start((thread_continue_t)vfp_state_test_thread_routine,
231 (void *)&vfp_state_test_args[i],
232 &vfp_state_thread[i]);
233
234 T_EXPECT((retval == KERN_SUCCESS), "thread %d started", i);
235 }
236
237 /* Wait for all threads to finish */
238 wait_threads(&end_barrier, VFP_STATE_TEST_N_THREADS);
239
240 /* Check if all threads completed successfully */
241 for (int i = 0; i < VFP_STATE_TEST_N_THREADS; i++) {
242 T_EXPECT((vfp_state_test_args[i].result == 0), "thread %d finished", i);
243 }
244
245 return KERN_SUCCESS;
246 }
247 #endif /* __ARM_VFP__ */