2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #if !(DEVELOPMENT || DEBUG)
30 #error "Testing is not enabled on RELEASE configurations"
33 #include <tests/xnupost.h>
34 #include <kern/kalloc.h>
35 #include <kern/clock.h>
36 #include <kern/thread.h>
37 #include <sys/random.h>
39 #define VFP_STATE_TEST_N_THREADS 4
40 #define VFP_STATE_TEST_N_REGS 8
41 #define VFP_STATE_TEST_N_ITER 100
42 #define VFP_STATE_TEST_DELAY_USEC 10000
44 #define VFP_STATE_TEST_NZCV_SHIFT 28
45 #define VFP_STATE_TEST_NZCV_MAX 16
47 #define VFP_STATE_TEST_RMODE_STRIDE_SHIFT 20
48 #define VFP_STATE_TEST_RMODE_STRIDE_MAX 16
52 extern kern_return_t
vfp_state_test(void);
54 const uint64_t vfp_state_test_regs
[VFP_STATE_TEST_N_REGS
] = {
55 0x6a4cac4427ab5658, 0x51200e9ebbe0c9d1,
56 0xa94d20c2bbe367bc, 0xfee45035460927db,
57 0x64f3f1f7e93d019f, 0x02a625f02b890a40,
58 0xf5e42399d8480de8, 0xc38cdde520908d6b,
61 struct vfp_state_test_args
{
62 uint64_t vfp_reg_rand
;
64 uint32_t fp_control_mask
;
66 uint64_t fp_control_mask
;
79 while (os_atomic_load(var
, acquire
) != num
) {
80 assert_wait((event_t
) var
, THREAD_UNINT
);
81 if (os_atomic_load(var
, acquire
) != num
) {
82 (void) thread_block(THREAD_CONTINUE_NULL
);
84 clear_wait(current_thread(), THREAD_AWAKENED
);
95 os_atomic_inc(var
, relaxed
);
96 thread_wakeup((event_t
) var
);
101 vfp_state_test_thread_routine(void *args
, __unused wait_result_t wr
)
103 struct vfp_state_test_args
*vfp_state_test_args
= (struct vfp_state_test_args
*)args
;
104 uint64_t *vfp_regs
, *vfp_regs_expected
;
107 uint32_t fp_control
, fp_control_expected
;
109 uint64_t fp_control
, fp_control_expected
;
112 vfp_state_test_args
->result
= -1;
114 /* Allocate memory to store expected and actual VFP register values */
115 vfp_regs
= kalloc(sizeof(vfp_state_test_regs
));
116 if (vfp_regs
== NULL
) {
117 goto vfp_state_thread_kalloc1_failure
;
120 vfp_regs_expected
= kalloc(sizeof(vfp_state_test_regs
));
121 if (vfp_regs_expected
== NULL
) {
122 goto vfp_state_thread_kalloc2_failure
;
125 /* Preload VFP registers with unique, per-thread patterns */
126 bcopy(vfp_state_test_regs
, vfp_regs_expected
, sizeof(vfp_state_test_regs
));
127 for (int i
= 0; i
< VFP_STATE_TEST_N_REGS
; i
++) {
128 vfp_regs_expected
[i
] ^= vfp_state_test_args
->vfp_reg_rand
;
132 asm volatile ("vldr d8, [%0, #0] \t\n vldr d9, [%0, #8] \t\n\
133 vldr d10, [%0, #16] \t\n vldr d11, [%0, #24] \t\n\
134 vldr d12, [%0, #32] \t\n vldr d13, [%0, #40] \t\n\
135 vldr d14, [%0, #48] \t\n vldr d15, [%0, #56]" \
136 : : "r"(vfp_regs_expected
) : \
137 "memory", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
140 * Set FPSCR to a known value, so we can validate the save/restore path.
141 * Only touch NZCV flags, since 1) writing them does not have visible side-effects
142 * and 2) they're only set by the CPU as a result of executing an FP comparison,
143 * which do not exist in this function.
145 asm volatile ("fmrx %0, fpscr" : "=r"(fp_control_expected
));
146 fp_control_expected
|= vfp_state_test_args
->fp_control_mask
;
147 asm volatile ("fmxr fpscr, %0" : : "r"(fp_control_expected
));
149 asm volatile ("ldr d8, [%0, #0] \t\n ldr d9, [%0, #8] \t\n\
150 ldr d10, [%0, #16] \t\n ldr d11, [%0, #24] \t\n\
151 ldr d12, [%0, #32] \t\n ldr d13, [%0, #40] \t\n\
152 ldr d14, [%0, #48] \t\n ldr d15, [%0, #56]" \
153 : : "r"(vfp_regs_expected
) : \
154 "memory", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
156 asm volatile ("mrs %0, fpcr" : "=r"(fp_control_expected
));
157 fp_control_expected
|= vfp_state_test_args
->fp_control_mask
;
158 asm volatile ("msr fpcr, %0" : : "r"(fp_control_expected
));
161 /* Make sure all threads start at roughly the same time */
162 wake_threads(vfp_state_test_args
->start_barrier
);
163 wait_threads(vfp_state_test_args
->start_barrier
, VFP_STATE_TEST_N_THREADS
);
165 /* Check VFP registers against expected values, and go to sleep */
166 for (int i
= 0; i
< VFP_STATE_TEST_N_ITER
; i
++) {
167 bzero(vfp_regs
, sizeof(vfp_state_test_regs
));
170 asm volatile ("vstr d8, [%0, #0] \t\n vstr d9, [%0, #8] \t\n\
171 vstr d10, [%0, #16] \t\n vstr d11, [%0, #24] \t\n\
172 vstr d12, [%0, #32] \t\n vstr d13, [%0, #40] \t\n\
173 vstr d14, [%0, #48] \t\n vstr d15, [%0, #56]" \
174 : : "r"(vfp_regs
) : "memory");
175 asm volatile ("fmrx %0, fpscr" : "=r"(fp_control
));
177 asm volatile ("str d8, [%0, #0] \t\n str d9, [%0, #8] \t\n\
178 str d10, [%0, #16] \t\n str d11, [%0, #24] \t\n\
179 str d12, [%0, #32] \t\n str d13, [%0, #40] \t\n\
180 str d14, [%0, #48] \t\n str d15, [%0, #56]" \
181 : : "r"(vfp_regs
) : "memory");
182 asm volatile ("mrs %0, fpcr" : "=r"(fp_control
));
185 retval
= bcmp(vfp_regs
, vfp_regs_expected
, sizeof(vfp_state_test_regs
));
186 if ((retval
!= 0) || (fp_control
!= fp_control_expected
)) {
187 goto vfp_state_thread_cmp_failure
;
190 delay(VFP_STATE_TEST_DELAY_USEC
);
193 vfp_state_test_args
->result
= 0;
195 vfp_state_thread_cmp_failure
:
196 kfree(vfp_regs_expected
, sizeof(vfp_state_test_regs
));
197 vfp_state_thread_kalloc2_failure
:
198 kfree(vfp_regs
, sizeof(vfp_state_test_regs
));
199 vfp_state_thread_kalloc1_failure
:
201 /* Signal that the thread has finished, and terminate */
202 wake_threads(vfp_state_test_args
->end_barrier
);
203 thread_terminate_self();
207 * This test spawns N threads that preload unique values into
208 * callee-saved VFP registers and then repeatedly check them
209 * for correctness after waking up from delay()
214 thread_t vfp_state_thread
[VFP_STATE_TEST_N_THREADS
];
215 struct vfp_state_test_args vfp_state_test_args
[VFP_STATE_TEST_N_THREADS
];
216 kern_return_t retval
;
217 int start_barrier
= 0, end_barrier
= 0;
220 for (int i
= 0; i
< VFP_STATE_TEST_N_THREADS
; i
++) {
221 vfp_state_test_args
[i
].start_barrier
= &start_barrier
;
222 vfp_state_test_args
[i
].end_barrier
= &end_barrier
;
224 vfp_state_test_args
[i
].fp_control_mask
= (i
% VFP_STATE_TEST_NZCV_MAX
) << VFP_STATE_TEST_NZCV_SHIFT
;
226 vfp_state_test_args
[i
].fp_control_mask
= (i
% VFP_STATE_TEST_RMODE_STRIDE_MAX
) << VFP_STATE_TEST_RMODE_STRIDE_SHIFT
;
228 read_random(&vfp_state_test_args
[i
].vfp_reg_rand
, sizeof(uint64_t));
230 retval
= kernel_thread_start((thread_continue_t
)vfp_state_test_thread_routine
,
231 (void *)&vfp_state_test_args
[i
],
232 &vfp_state_thread
[i
]);
234 T_EXPECT((retval
== KERN_SUCCESS
), "thread %d started", i
);
237 /* Wait for all threads to finish */
238 wait_threads(&end_barrier
, VFP_STATE_TEST_N_THREADS
);
240 /* Check if all threads completed successfully */
241 for (int i
= 0; i
< VFP_STATE_TEST_N_THREADS
; i
++) {
242 T_EXPECT((vfp_state_test_args
[i
].result
== 0), "thread %d finished", i
);
247 #endif /* __ARM_VFP__ */