]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
cb323159 | 2 | * Copyright (c) 2011-2018 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie | |
33 | * Mellon University All Rights Reserved. | |
0a7de745 | 34 | * |
5ba3f43e A |
35 | * Permission to use, copy, modify and distribute this software and its |
36 | * documentation is hereby granted, provided that both the copyright notice | |
37 | * and this permission notice appear in all copies of the software, | |
38 | * derivative works or modified versions, and any portions thereof, and that | |
39 | * both notices appear in supporting documentation. | |
0a7de745 | 40 | * |
5ba3f43e A |
41 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. |
42 | * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES | |
43 | * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 44 | * |
5ba3f43e | 45 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 46 | * |
5ba3f43e A |
47 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
48 | * School of Computer Science Carnegie Mellon University Pittsburgh PA | |
49 | * 15213-3890 | |
0a7de745 | 50 | * |
5ba3f43e A |
51 | * any improvements or extensions that they make and grant Carnegie Mellon the |
52 | * rights to redistribute these changes. | |
53 | */ | |
54 | ||
55 | #include <mach_ldebug.h> | |
56 | ||
57 | #define LOCK_PRIVATE 1 | |
58 | ||
d9a64523 | 59 | #include <vm/pmap.h> |
5ba3f43e | 60 | #include <kern/kalloc.h> |
f427ee49 | 61 | #include <kern/cpu_number.h> |
5ba3f43e A |
62 | #include <kern/locks.h> |
63 | #include <kern/misc_protos.h> | |
64 | #include <kern/thread.h> | |
65 | #include <kern/processor.h> | |
66 | #include <kern/sched_prim.h> | |
5ba3f43e A |
67 | #include <kern/debug.h> |
68 | #include <string.h> | |
69 | #include <tests/xnupost.h> | |
70 | ||
0a7de745 | 71 | #if MACH_KDB |
5ba3f43e A |
72 | #include <ddb/db_command.h> |
73 | #include <ddb/db_output.h> | |
74 | #include <ddb/db_sym.h> | |
75 | #include <ddb/db_print.h> | |
0a7de745 | 76 | #endif /* MACH_KDB */ |
5ba3f43e A |
77 | |
78 | #include <sys/kdebug.h> | |
79 | #include <sys/munge.h> | |
80 | #include <machine/cpu_capabilities.h> | |
81 | #include <arm/cpu_data_internal.h> | |
d9a64523 | 82 | #include <arm/pmap.h> |
5ba3f43e | 83 | |
f427ee49 A |
84 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
85 | #include <arm64/amcc_rorgn.h> | |
86 | #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) | |
87 | ||
5ba3f43e A |
88 | kern_return_t arm64_lock_test(void); |
89 | kern_return_t arm64_munger_test(void); | |
90 | kern_return_t ex_cb_test(void); | |
91 | kern_return_t arm64_pan_test(void); | |
cb323159 A |
92 | kern_return_t arm64_late_pan_test(void); |
93 | #if defined(HAS_APPLE_PAC) | |
94 | #include <ptrauth.h> | |
95 | kern_return_t arm64_ropjop_test(void); | |
96 | #endif | |
c6bf4f31 A |
97 | #if defined(KERNEL_INTEGRITY_CTRR) |
98 | kern_return_t ctrr_test(void); | |
99 | kern_return_t ctrr_test_cpu(void); | |
100 | #endif | |
cb323159 A |
101 | #if HAS_TWO_STAGE_SPR_LOCK |
102 | kern_return_t arm64_spr_lock_test(void); | |
103 | extern void arm64_msr_lock_test(uint64_t); | |
104 | #endif | |
5ba3f43e A |
105 | |
106 | // exception handler ignores this fault address during PAN test | |
107 | #if __ARM_PAN_AVAILABLE__ | |
cc8bc92a A |
108 | const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF; |
109 | vm_offset_t pan_test_addr = 0; | |
110 | vm_offset_t pan_ro_addr = 0; | |
111 | volatile int pan_exception_level = 0; | |
112 | volatile char pan_fault_value = 0; | |
5ba3f43e A |
113 | #endif |
114 | ||
115 | #include <libkern/OSAtomic.h> | |
116 | #define LOCK_TEST_ITERATIONS 50 | |
0a7de745 A |
117 | static hw_lock_data_t lt_hw_lock; |
118 | static lck_spin_t lt_lck_spin_t; | |
119 | static lck_mtx_t lt_mtx; | |
120 | static lck_rw_t lt_rwlock; | |
5ba3f43e | 121 | static volatile uint32_t lt_counter = 0; |
0a7de745 | 122 | static volatile int lt_spinvolatile; |
5ba3f43e A |
123 | static volatile uint32_t lt_max_holders = 0; |
124 | static volatile uint32_t lt_upgrade_holders = 0; | |
125 | static volatile uint32_t lt_max_upgrade_holders = 0; | |
126 | static volatile uint32_t lt_num_holders = 0; | |
127 | static volatile uint32_t lt_done_threads; | |
128 | static volatile uint32_t lt_target_done_threads; | |
129 | static volatile uint32_t lt_cpu_bind_id = 0; | |
130 | ||
131 | static void | |
0a7de745 | 132 | lt_note_another_blocking_lock_holder() |
5ba3f43e | 133 | { |
0a7de745 | 134 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
135 | lt_num_holders++; |
136 | lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders; | |
137 | hw_lock_unlock(<_hw_lock); | |
138 | } | |
139 | ||
140 | static void | |
0a7de745 | 141 | lt_note_blocking_lock_release() |
5ba3f43e | 142 | { |
0a7de745 | 143 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
144 | lt_num_holders--; |
145 | hw_lock_unlock(<_hw_lock); | |
146 | } | |
147 | ||
148 | static void | |
0a7de745 | 149 | lt_spin_a_little_bit() |
5ba3f43e A |
150 | { |
151 | uint32_t i; | |
0a7de745 | 152 | |
5ba3f43e A |
153 | for (i = 0; i < 10000; i++) { |
154 | lt_spinvolatile++; | |
155 | } | |
156 | } | |
157 | ||
158 | static void | |
0a7de745 | 159 | lt_sleep_a_little_bit() |
5ba3f43e A |
160 | { |
161 | delay(100); | |
162 | } | |
163 | ||
164 | static void | |
0a7de745 | 165 | lt_grab_mutex() |
5ba3f43e A |
166 | { |
167 | lck_mtx_lock(<_mtx); | |
168 | lt_note_another_blocking_lock_holder(); | |
169 | lt_sleep_a_little_bit(); | |
170 | lt_counter++; | |
171 | lt_note_blocking_lock_release(); | |
172 | lck_mtx_unlock(<_mtx); | |
173 | } | |
174 | ||
175 | static void | |
176 | lt_grab_mutex_with_try() | |
177 | { | |
0a7de745 A |
178 | while (0 == lck_mtx_try_lock(<_mtx)) { |
179 | ; | |
180 | } | |
5ba3f43e A |
181 | lt_note_another_blocking_lock_holder(); |
182 | lt_sleep_a_little_bit(); | |
183 | lt_counter++; | |
184 | lt_note_blocking_lock_release(); | |
185 | lck_mtx_unlock(<_mtx); | |
5ba3f43e A |
186 | } |
187 | ||
188 | static void | |
189 | lt_grab_rw_exclusive() | |
190 | { | |
191 | lck_rw_lock_exclusive(<_rwlock); | |
192 | lt_note_another_blocking_lock_holder(); | |
193 | lt_sleep_a_little_bit(); | |
194 | lt_counter++; | |
195 | lt_note_blocking_lock_release(); | |
196 | lck_rw_done(<_rwlock); | |
197 | } | |
198 | ||
199 | static void | |
200 | lt_grab_rw_exclusive_with_try() | |
201 | { | |
0a7de745 | 202 | while (0 == lck_rw_try_lock_exclusive(<_rwlock)) { |
5ba3f43e A |
203 | lt_sleep_a_little_bit(); |
204 | } | |
205 | ||
206 | lt_note_another_blocking_lock_holder(); | |
207 | lt_sleep_a_little_bit(); | |
208 | lt_counter++; | |
209 | lt_note_blocking_lock_release(); | |
210 | lck_rw_done(<_rwlock); | |
211 | } | |
212 | ||
213 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
214 | * static void |
215 | * lt_grab_rw_shared() | |
216 | * { | |
217 | * lck_rw_lock_shared(<_rwlock); | |
218 | * lt_counter++; | |
219 | * | |
220 | * lt_note_another_blocking_lock_holder(); | |
221 | * lt_sleep_a_little_bit(); | |
222 | * lt_note_blocking_lock_release(); | |
223 | * | |
224 | * lck_rw_done(<_rwlock); | |
225 | * } | |
226 | */ | |
5ba3f43e A |
227 | |
228 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
229 | * static void |
230 | * lt_grab_rw_shared_with_try() | |
231 | * { | |
232 | * while(0 == lck_rw_try_lock_shared(<_rwlock)); | |
233 | * lt_counter++; | |
234 | * | |
235 | * lt_note_another_blocking_lock_holder(); | |
236 | * lt_sleep_a_little_bit(); | |
237 | * lt_note_blocking_lock_release(); | |
238 | * | |
239 | * lck_rw_done(<_rwlock); | |
240 | * } | |
241 | */ | |
5ba3f43e A |
242 | |
243 | static void | |
0a7de745 | 244 | lt_upgrade_downgrade_rw() |
5ba3f43e A |
245 | { |
246 | boolean_t upgraded, success; | |
247 | ||
248 | success = lck_rw_try_lock_shared(<_rwlock); | |
249 | if (!success) { | |
250 | lck_rw_lock_shared(<_rwlock); | |
251 | } | |
252 | ||
253 | lt_note_another_blocking_lock_holder(); | |
254 | lt_sleep_a_little_bit(); | |
255 | lt_note_blocking_lock_release(); | |
0a7de745 | 256 | |
5ba3f43e A |
257 | upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock); |
258 | if (!upgraded) { | |
259 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
260 | ||
261 | if (!success) { | |
262 | lck_rw_lock_exclusive(<_rwlock); | |
263 | } | |
264 | } | |
265 | ||
266 | lt_upgrade_holders++; | |
267 | if (lt_upgrade_holders > lt_max_upgrade_holders) { | |
268 | lt_max_upgrade_holders = lt_upgrade_holders; | |
269 | } | |
270 | ||
271 | lt_counter++; | |
272 | lt_sleep_a_little_bit(); | |
273 | ||
274 | lt_upgrade_holders--; | |
0a7de745 | 275 | |
5ba3f43e A |
276 | lck_rw_lock_exclusive_to_shared(<_rwlock); |
277 | ||
278 | lt_spin_a_little_bit(); | |
279 | lck_rw_done(<_rwlock); | |
280 | } | |
281 | ||
c6bf4f31 A |
282 | #if __AMP__ |
283 | const int limit = 1000000; | |
284 | static int lt_stress_local_counters[MAX_CPUS]; | |
285 | ||
286 | lck_ticket_t lt_ticket_lock; | |
f427ee49 | 287 | lck_grp_t lt_ticket_grp; |
c6bf4f31 A |
288 | |
289 | static void | |
290 | lt_stress_ticket_lock() | |
291 | { | |
292 | int local_counter = 0; | |
293 | ||
f427ee49 | 294 | uint cpuid = cpu_number(); |
c6bf4f31 A |
295 | |
296 | kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid); | |
297 | ||
f427ee49 | 298 | lck_ticket_lock(<_ticket_lock, <_ticket_grp); |
c6bf4f31 A |
299 | lt_counter++; |
300 | local_counter++; | |
301 | lck_ticket_unlock(<_ticket_lock); | |
302 | ||
303 | while (lt_counter < lt_target_done_threads) { | |
304 | ; | |
305 | } | |
306 | ||
307 | kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid); | |
308 | ||
309 | while (lt_counter < limit) { | |
f427ee49 | 310 | lck_ticket_lock(<_ticket_lock, <_ticket_grp); |
c6bf4f31 A |
311 | if (lt_counter < limit) { |
312 | lt_counter++; | |
313 | local_counter++; | |
314 | } | |
315 | lck_ticket_unlock(<_ticket_lock); | |
316 | } | |
317 | ||
318 | lt_stress_local_counters[cpuid] = local_counter; | |
319 | ||
320 | kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter); | |
321 | } | |
322 | #endif | |
5ba3f43e A |
323 | |
324 | static void | |
0a7de745 | 325 | lt_grab_hw_lock() |
5ba3f43e | 326 | { |
0a7de745 | 327 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
328 | lt_counter++; |
329 | lt_spin_a_little_bit(); | |
330 | hw_lock_unlock(<_hw_lock); | |
331 | } | |
332 | ||
333 | static void | |
334 | lt_grab_hw_lock_with_try() | |
335 | { | |
0a7de745 A |
336 | while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) { |
337 | ; | |
338 | } | |
5ba3f43e A |
339 | lt_counter++; |
340 | lt_spin_a_little_bit(); | |
341 | hw_lock_unlock(<_hw_lock); | |
342 | } | |
343 | ||
344 | static void | |
345 | lt_grab_hw_lock_with_to() | |
346 | { | |
0a7de745 | 347 | while (0 == hw_lock_to(<_hw_lock, LockTimeOut, LCK_GRP_NULL)) { |
5ba3f43e | 348 | mp_enable_preemption(); |
0a7de745 | 349 | } |
5ba3f43e A |
350 | lt_counter++; |
351 | lt_spin_a_little_bit(); | |
352 | hw_lock_unlock(<_hw_lock); | |
353 | } | |
354 | ||
355 | static void | |
0a7de745 | 356 | lt_grab_spin_lock() |
5ba3f43e A |
357 | { |
358 | lck_spin_lock(<_lck_spin_t); | |
359 | lt_counter++; | |
360 | lt_spin_a_little_bit(); | |
361 | lck_spin_unlock(<_lck_spin_t); | |
362 | } | |
363 | ||
364 | static void | |
0a7de745 | 365 | lt_grab_spin_lock_with_try() |
5ba3f43e | 366 | { |
0a7de745 A |
367 | while (0 == lck_spin_try_lock(<_lck_spin_t)) { |
368 | ; | |
369 | } | |
5ba3f43e A |
370 | lt_counter++; |
371 | lt_spin_a_little_bit(); | |
372 | lck_spin_unlock(<_lck_spin_t); | |
373 | } | |
374 | ||
375 | static volatile boolean_t lt_thread_lock_grabbed; | |
376 | static volatile boolean_t lt_thread_lock_success; | |
377 | ||
378 | static void | |
379 | lt_reset() | |
380 | { | |
381 | lt_counter = 0; | |
382 | lt_max_holders = 0; | |
383 | lt_num_holders = 0; | |
384 | lt_max_upgrade_holders = 0; | |
385 | lt_upgrade_holders = 0; | |
386 | lt_done_threads = 0; | |
387 | lt_target_done_threads = 0; | |
388 | lt_cpu_bind_id = 0; | |
389 | ||
390 | OSMemoryBarrier(); | |
391 | } | |
392 | ||
393 | static void | |
394 | lt_trylock_hw_lock_with_to() | |
395 | { | |
396 | OSMemoryBarrier(); | |
397 | while (!lt_thread_lock_grabbed) { | |
398 | lt_sleep_a_little_bit(); | |
399 | OSMemoryBarrier(); | |
400 | } | |
0a7de745 | 401 | lt_thread_lock_success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL); |
5ba3f43e A |
402 | OSMemoryBarrier(); |
403 | mp_enable_preemption(); | |
404 | } | |
405 | ||
406 | static void | |
407 | lt_trylock_spin_try_lock() | |
408 | { | |
409 | OSMemoryBarrier(); | |
410 | while (!lt_thread_lock_grabbed) { | |
411 | lt_sleep_a_little_bit(); | |
412 | OSMemoryBarrier(); | |
413 | } | |
414 | lt_thread_lock_success = lck_spin_try_lock(<_lck_spin_t); | |
415 | OSMemoryBarrier(); | |
416 | } | |
417 | ||
418 | static void | |
419 | lt_trylock_thread(void *arg, wait_result_t wres __unused) | |
420 | { | |
0a7de745 | 421 | void (*func)(void) = (void (*)(void))arg; |
5ba3f43e A |
422 | |
423 | func(); | |
424 | ||
425 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
426 | } | |
427 | ||
428 | static void | |
429 | lt_start_trylock_thread(thread_continue_t func) | |
430 | { | |
431 | thread_t thread; | |
432 | kern_return_t kr; | |
433 | ||
434 | kr = kernel_thread_start(lt_trylock_thread, func, &thread); | |
435 | assert(kr == KERN_SUCCESS); | |
436 | ||
437 | thread_deallocate(thread); | |
438 | } | |
439 | ||
440 | static void | |
441 | lt_wait_for_lock_test_threads() | |
442 | { | |
443 | OSMemoryBarrier(); | |
444 | /* Spin to reduce dependencies */ | |
445 | while (lt_done_threads < lt_target_done_threads) { | |
446 | lt_sleep_a_little_bit(); | |
447 | OSMemoryBarrier(); | |
448 | } | |
449 | OSMemoryBarrier(); | |
450 | } | |
451 | ||
452 | static kern_return_t | |
453 | lt_test_trylocks() | |
454 | { | |
0a7de745 | 455 | boolean_t success; |
a39ff7e2 | 456 | extern unsigned int real_ncpus; |
0a7de745 A |
457 | |
458 | /* | |
5ba3f43e A |
459 | * First mtx try lock succeeds, second fails. |
460 | */ | |
461 | success = lck_mtx_try_lock(<_mtx); | |
462 | T_ASSERT_NOTNULL(success, "First mtx try lock"); | |
463 | success = lck_mtx_try_lock(<_mtx); | |
464 | T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx"); | |
465 | lck_mtx_unlock(<_mtx); | |
466 | ||
467 | /* | |
468 | * After regular grab, can't try lock. | |
469 | */ | |
470 | lck_mtx_lock(<_mtx); | |
471 | success = lck_mtx_try_lock(<_mtx); | |
472 | T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock"); | |
473 | lck_mtx_unlock(<_mtx); | |
474 | ||
475 | /* | |
0a7de745 | 476 | * Two shared try locks on a previously unheld rwlock suceed, and a |
5ba3f43e A |
477 | * subsequent exclusive attempt fails. |
478 | */ | |
479 | success = lck_rw_try_lock_shared(<_rwlock); | |
480 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
481 | success = lck_rw_try_lock_shared(<_rwlock); | |
482 | T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed"); | |
483 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
484 | T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail"); | |
485 | lck_rw_done(<_rwlock); | |
486 | lck_rw_done(<_rwlock); | |
487 | ||
488 | /* | |
489 | * After regular shared grab, can trylock | |
490 | * for shared but not for exclusive. | |
491 | */ | |
492 | lck_rw_lock_shared(<_rwlock); | |
493 | success = lck_rw_try_lock_shared(<_rwlock); | |
494 | T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed."); | |
495 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
496 | T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail."); | |
497 | lck_rw_done(<_rwlock); | |
498 | lck_rw_done(<_rwlock); | |
499 | ||
500 | /* | |
501 | * An exclusive try lock succeeds, subsequent shared and exclusive | |
502 | * attempts fail. | |
503 | */ | |
504 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
505 | T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed"); | |
506 | success = lck_rw_try_lock_shared(<_rwlock); | |
507 | T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail"); | |
508 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
509 | T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail"); | |
510 | lck_rw_done(<_rwlock); | |
511 | ||
512 | /* | |
513 | * After regular exclusive grab, neither kind of trylock succeeds. | |
514 | */ | |
515 | lck_rw_lock_exclusive(<_rwlock); | |
516 | success = lck_rw_try_lock_shared(<_rwlock); | |
517 | T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed"); | |
518 | success = lck_rw_try_lock_exclusive(<_rwlock); | |
519 | T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed"); | |
520 | lck_rw_done(<_rwlock); | |
521 | ||
0a7de745 | 522 | /* |
5ba3f43e A |
523 | * First spin lock attempts succeed, second attempts fail. |
524 | */ | |
0a7de745 | 525 | success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e | 526 | T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed"); |
0a7de745 | 527 | success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); |
5ba3f43e A |
528 | T_ASSERT_NULL(success, "Second attempt to spin lock should fail"); |
529 | hw_lock_unlock(<_hw_lock); | |
0a7de745 A |
530 | |
531 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); | |
532 | success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); | |
5ba3f43e A |
533 | T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail"); |
534 | hw_lock_unlock(<_hw_lock); | |
535 | ||
536 | lt_reset(); | |
537 | lt_thread_lock_grabbed = false; | |
538 | lt_thread_lock_success = true; | |
539 | lt_target_done_threads = 1; | |
540 | OSMemoryBarrier(); | |
541 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
0a7de745 | 542 | success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL); |
5ba3f43e | 543 | T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed"); |
a39ff7e2 A |
544 | if (real_ncpus == 1) { |
545 | mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ | |
546 | } | |
5ba3f43e A |
547 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); |
548 | lt_wait_for_lock_test_threads(); | |
549 | T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout"); | |
a39ff7e2 A |
550 | if (real_ncpus == 1) { |
551 | mp_disable_preemption(); /* don't double-enable when we unlock */ | |
552 | } | |
5ba3f43e A |
553 | hw_lock_unlock(<_hw_lock); |
554 | ||
555 | lt_reset(); | |
556 | lt_thread_lock_grabbed = false; | |
557 | lt_thread_lock_success = true; | |
558 | lt_target_done_threads = 1; | |
559 | OSMemoryBarrier(); | |
560 | lt_start_trylock_thread(lt_trylock_hw_lock_with_to); | |
0a7de745 | 561 | hw_lock_lock(<_hw_lock, LCK_GRP_NULL); |
a39ff7e2 A |
562 | if (real_ncpus == 1) { |
563 | mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ | |
564 | } | |
5ba3f43e A |
565 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); |
566 | lt_wait_for_lock_test_threads(); | |
567 | T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail"); | |
a39ff7e2 A |
568 | if (real_ncpus == 1) { |
569 | mp_disable_preemption(); /* don't double-enable when we unlock */ | |
570 | } | |
5ba3f43e A |
571 | hw_lock_unlock(<_hw_lock); |
572 | ||
573 | success = lck_spin_try_lock(<_lck_spin_t); | |
574 | T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed"); | |
575 | success = lck_spin_try_lock(<_lck_spin_t); | |
576 | T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail"); | |
577 | lck_spin_unlock(<_lck_spin_t); | |
578 | ||
579 | lt_reset(); | |
580 | lt_thread_lock_grabbed = false; | |
581 | lt_thread_lock_success = true; | |
582 | lt_target_done_threads = 1; | |
583 | lt_start_trylock_thread(lt_trylock_spin_try_lock); | |
584 | lck_spin_lock(<_lck_spin_t); | |
a39ff7e2 A |
585 | if (real_ncpus == 1) { |
586 | mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ | |
587 | } | |
5ba3f43e A |
588 | OSIncrementAtomic((volatile SInt32*)<_thread_lock_grabbed); |
589 | lt_wait_for_lock_test_threads(); | |
590 | T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail"); | |
a39ff7e2 A |
591 | if (real_ncpus == 1) { |
592 | mp_disable_preemption(); /* don't double-enable when we unlock */ | |
593 | } | |
5ba3f43e A |
594 | lck_spin_unlock(<_lck_spin_t); |
595 | ||
596 | return KERN_SUCCESS; | |
597 | } | |
598 | ||
599 | static void | |
0a7de745 | 600 | lt_thread(void *arg, wait_result_t wres __unused) |
5ba3f43e | 601 | { |
0a7de745 | 602 | void (*func)(void) = (void (*)(void))arg; |
5ba3f43e A |
603 | uint32_t i; |
604 | ||
605 | for (i = 0; i < LOCK_TEST_ITERATIONS; i++) { | |
606 | func(); | |
607 | } | |
608 | ||
609 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
610 | } | |
611 | ||
5ba3f43e A |
612 | static void |
613 | lt_start_lock_thread(thread_continue_t func) | |
614 | { | |
615 | thread_t thread; | |
616 | kern_return_t kr; | |
617 | ||
618 | kr = kernel_thread_start(lt_thread, func, &thread); | |
619 | assert(kr == KERN_SUCCESS); | |
620 | ||
621 | thread_deallocate(thread); | |
622 | } | |
623 | ||
c6bf4f31 A |
624 | #if __AMP__ |
625 | static void | |
626 | lt_bound_thread(void *arg, wait_result_t wres __unused) | |
627 | { | |
628 | void (*func)(void) = (void (*)(void))arg; | |
629 | ||
630 | int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id); | |
631 | ||
632 | processor_t processor = processor_list; | |
633 | while ((processor != NULL) && (processor->cpu_id != cpuid)) { | |
634 | processor = processor->processor_list; | |
635 | } | |
636 | ||
637 | if (processor != NULL) { | |
638 | thread_bind(processor); | |
639 | } | |
640 | ||
641 | thread_block(THREAD_CONTINUE_NULL); | |
642 | ||
643 | func(); | |
644 | ||
645 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
646 | } | |
647 | ||
648 | static void | |
649 | lt_e_thread(void *arg, wait_result_t wres __unused) | |
650 | { | |
651 | void (*func)(void) = (void (*)(void))arg; | |
652 | ||
653 | thread_t thread = current_thread(); | |
654 | ||
655 | spl_t s = splsched(); | |
656 | thread_lock(thread); | |
657 | thread->sched_flags |= TH_SFLAG_ECORE_ONLY; | |
658 | thread_unlock(thread); | |
659 | splx(s); | |
660 | ||
661 | thread_block(THREAD_CONTINUE_NULL); | |
662 | ||
663 | func(); | |
664 | ||
665 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
666 | } | |
667 | ||
668 | static void | |
669 | lt_p_thread(void *arg, wait_result_t wres __unused) | |
670 | { | |
671 | void (*func)(void) = (void (*)(void))arg; | |
672 | ||
673 | thread_t thread = current_thread(); | |
674 | ||
675 | spl_t s = splsched(); | |
676 | thread_lock(thread); | |
677 | thread->sched_flags |= TH_SFLAG_PCORE_ONLY; | |
678 | thread_unlock(thread); | |
679 | splx(s); | |
680 | ||
681 | thread_block(THREAD_CONTINUE_NULL); | |
682 | ||
683 | func(); | |
684 | ||
685 | OSIncrementAtomic((volatile SInt32*) <_done_threads); | |
686 | } | |
687 | ||
688 | static void | |
689 | lt_start_lock_thread_e(thread_continue_t func) | |
690 | { | |
691 | thread_t thread; | |
692 | kern_return_t kr; | |
693 | ||
694 | kr = kernel_thread_start(lt_e_thread, func, &thread); | |
695 | assert(kr == KERN_SUCCESS); | |
696 | ||
697 | thread_deallocate(thread); | |
698 | } | |
699 | ||
700 | static void | |
701 | lt_start_lock_thread_p(thread_continue_t func) | |
702 | { | |
703 | thread_t thread; | |
704 | kern_return_t kr; | |
705 | ||
706 | kr = kernel_thread_start(lt_p_thread, func, &thread); | |
707 | assert(kr == KERN_SUCCESS); | |
708 | ||
709 | thread_deallocate(thread); | |
710 | } | |
711 | ||
712 | static void | |
713 | lt_start_lock_thread_bound(thread_continue_t func) | |
714 | { | |
715 | thread_t thread; | |
716 | kern_return_t kr; | |
717 | ||
718 | kr = kernel_thread_start(lt_bound_thread, func, &thread); | |
719 | assert(kr == KERN_SUCCESS); | |
720 | ||
721 | thread_deallocate(thread); | |
722 | } | |
723 | #endif | |
5ba3f43e | 724 | |
5ba3f43e A |
725 | static kern_return_t |
726 | lt_test_locks() | |
727 | { | |
728 | kern_return_t kr = KERN_SUCCESS; | |
729 | lck_grp_attr_t *lga = lck_grp_attr_alloc_init(); | |
730 | lck_grp_t *lg = lck_grp_alloc_init("lock test", lga); | |
731 | ||
732 | lck_mtx_init(<_mtx, lg, LCK_ATTR_NULL); | |
733 | lck_rw_init(<_rwlock, lg, LCK_ATTR_NULL); | |
734 | lck_spin_init(<_lck_spin_t, lg, LCK_ATTR_NULL); | |
735 | hw_lock_init(<_hw_lock); | |
736 | ||
737 | T_LOG("Testing locks."); | |
738 | ||
739 | /* Try locks (custom) */ | |
740 | lt_reset(); | |
741 | ||
742 | T_LOG("Running try lock test."); | |
743 | kr = lt_test_trylocks(); | |
744 | T_EXPECT_NULL(kr, "try lock test failed."); | |
745 | ||
746 | /* Uncontended mutex */ | |
747 | T_LOG("Running uncontended mutex test."); | |
748 | lt_reset(); | |
749 | lt_target_done_threads = 1; | |
750 | lt_start_lock_thread(lt_grab_mutex); | |
751 | lt_wait_for_lock_test_threads(); | |
752 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
753 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
754 | ||
755 | /* Contended mutex:try locks*/ | |
756 | T_LOG("Running contended mutex test."); | |
757 | lt_reset(); | |
758 | lt_target_done_threads = 3; | |
759 | lt_start_lock_thread(lt_grab_mutex); | |
760 | lt_start_lock_thread(lt_grab_mutex); | |
761 | lt_start_lock_thread(lt_grab_mutex); | |
762 | lt_wait_for_lock_test_threads(); | |
763 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
764 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
765 | ||
766 | /* Contended mutex: try locks*/ | |
767 | T_LOG("Running contended mutex trylock test."); | |
768 | lt_reset(); | |
769 | lt_target_done_threads = 3; | |
770 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
771 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
772 | lt_start_lock_thread(lt_grab_mutex_with_try); | |
773 | lt_wait_for_lock_test_threads(); | |
774 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
775 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
776 | ||
777 | /* Uncontended exclusive rwlock */ | |
778 | T_LOG("Running uncontended exclusive rwlock test."); | |
779 | lt_reset(); | |
780 | lt_target_done_threads = 1; | |
781 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
782 | lt_wait_for_lock_test_threads(); | |
783 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
784 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
785 | ||
786 | /* Uncontended shared rwlock */ | |
787 | ||
788 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
789 | * T_LOG("Running uncontended shared rwlock test."); |
790 | * lt_reset(); | |
791 | * lt_target_done_threads = 1; | |
792 | * lt_start_lock_thread(lt_grab_rw_shared); | |
793 | * lt_wait_for_lock_test_threads(); | |
794 | * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
795 | * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
796 | */ | |
5ba3f43e A |
797 | |
798 | /* Contended exclusive rwlock */ | |
799 | T_LOG("Running contended exclusive rwlock test."); | |
800 | lt_reset(); | |
801 | lt_target_done_threads = 3; | |
802 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
803 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
804 | lt_start_lock_thread(lt_grab_rw_exclusive); | |
805 | lt_wait_for_lock_test_threads(); | |
806 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
807 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
808 | ||
809 | /* One shared, two exclusive */ | |
810 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
811 | * T_LOG("Running test with one shared and two exclusive rw lock threads."); |
812 | * lt_reset(); | |
813 | * lt_target_done_threads = 3; | |
814 | * lt_start_lock_thread(lt_grab_rw_shared); | |
815 | * lt_start_lock_thread(lt_grab_rw_exclusive); | |
816 | * lt_start_lock_thread(lt_grab_rw_exclusive); | |
817 | * lt_wait_for_lock_test_threads(); | |
818 | * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
819 | * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
820 | */ | |
5ba3f43e A |
821 | |
822 | /* Four shared */ | |
823 | /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) | |
0a7de745 A |
824 | * T_LOG("Running test with four shared holders."); |
825 | * lt_reset(); | |
826 | * lt_target_done_threads = 4; | |
827 | * lt_start_lock_thread(lt_grab_rw_shared); | |
828 | * lt_start_lock_thread(lt_grab_rw_shared); | |
829 | * lt_start_lock_thread(lt_grab_rw_shared); | |
830 | * lt_start_lock_thread(lt_grab_rw_shared); | |
831 | * lt_wait_for_lock_test_threads(); | |
832 | * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL); | |
833 | */ | |
5ba3f43e A |
834 | |
835 | /* Three doing upgrades and downgrades */ | |
836 | T_LOG("Running test with threads upgrading and downgrading."); | |
837 | lt_reset(); | |
838 | lt_target_done_threads = 3; | |
839 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
840 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
841 | lt_start_lock_thread(lt_upgrade_downgrade_rw); | |
842 | lt_wait_for_lock_test_threads(); | |
843 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
844 | T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
845 | T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL); | |
846 | ||
847 | /* Uncontended - exclusive trylocks */ | |
848 | T_LOG("Running test with single thread doing exclusive rwlock trylocks."); | |
849 | lt_reset(); | |
850 | lt_target_done_threads = 1; | |
851 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
852 | lt_wait_for_lock_test_threads(); | |
853 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
854 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
855 | ||
856 | /* Uncontended - shared trylocks */ | |
857 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
858 | * T_LOG("Running test with single thread doing shared rwlock trylocks."); |
859 | * lt_reset(); | |
860 | * lt_target_done_threads = 1; | |
861 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
862 | * lt_wait_for_lock_test_threads(); | |
863 | * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
864 | * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
865 | */ | |
5ba3f43e A |
866 | |
867 | /* Three doing exclusive trylocks */ | |
868 | T_LOG("Running test with threads doing exclusive rwlock trylocks."); | |
869 | lt_reset(); | |
870 | lt_target_done_threads = 3; | |
871 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
872 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
873 | lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
874 | lt_wait_for_lock_test_threads(); | |
875 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
876 | T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); | |
877 | ||
878 | /* Three doing shared trylocks */ | |
879 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
880 | * T_LOG("Running test with threads doing shared rwlock trylocks."); |
881 | * lt_reset(); | |
882 | * lt_target_done_threads = 3; | |
883 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
884 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
885 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
886 | * lt_wait_for_lock_test_threads(); | |
887 | * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
888 | * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); | |
889 | */ | |
5ba3f43e A |
890 | |
891 | /* Three doing various trylocks */ | |
892 | /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) | |
0a7de745 A |
893 | * T_LOG("Running test with threads doing mixed rwlock trylocks."); |
894 | * lt_reset(); | |
895 | * lt_target_done_threads = 4; | |
896 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
897 | * lt_start_lock_thread(lt_grab_rw_shared_with_try); | |
898 | * lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
899 | * lt_start_lock_thread(lt_grab_rw_exclusive_with_try); | |
900 | * lt_wait_for_lock_test_threads(); | |
901 | * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
902 | * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL); | |
903 | */ | |
5ba3f43e A |
904 | |
905 | /* HW locks */ | |
906 | T_LOG("Running test with hw_lock_lock()"); | |
907 | lt_reset(); | |
908 | lt_target_done_threads = 3; | |
909 | lt_start_lock_thread(lt_grab_hw_lock); | |
910 | lt_start_lock_thread(lt_grab_hw_lock); | |
911 | lt_start_lock_thread(lt_grab_hw_lock); | |
912 | lt_wait_for_lock_test_threads(); | |
913 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
914 | ||
c6bf4f31 A |
915 | #if __AMP__ |
916 | /* Ticket locks stress test */ | |
917 | T_LOG("Running Ticket locks stress test with lck_ticket_lock()"); | |
918 | extern unsigned int real_ncpus; | |
f427ee49 A |
919 | lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL); |
920 | lck_ticket_init(<_ticket_lock, <_ticket_grp); | |
c6bf4f31 A |
921 | lt_reset(); |
922 | lt_target_done_threads = real_ncpus; | |
923 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
924 | lt_start_lock_thread_bound(lt_stress_ticket_lock); | |
925 | } | |
926 | lt_wait_for_lock_test_threads(); | |
927 | bool starvation = false; | |
928 | uint total_local_count = 0; | |
929 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
930 | starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10); | |
931 | total_local_count += lt_stress_local_counters[processor->cpu_id]; | |
932 | } | |
933 | if (total_local_count != lt_counter) { | |
934 | T_FAIL("Lock failure\n"); | |
935 | } else if (starvation) { | |
936 | T_FAIL("Lock starvation found\n"); | |
937 | } else { | |
938 | T_PASS("Ticket locks stress test with lck_ticket_lock()"); | |
939 | } | |
940 | ||
941 | /* AMP ticket locks stress test */ | |
942 | T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()"); | |
943 | lt_reset(); | |
944 | lt_target_done_threads = real_ncpus; | |
945 | for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { | |
946 | processor_set_t pset = processor->processor_set; | |
947 | if (pset->pset_cluster_type == PSET_AMP_P) { | |
948 | lt_start_lock_thread_p(lt_stress_ticket_lock); | |
949 | } else if (pset->pset_cluster_type == PSET_AMP_E) { | |
950 | lt_start_lock_thread_e(lt_stress_ticket_lock); | |
951 | } else { | |
952 | lt_start_lock_thread(lt_stress_ticket_lock); | |
953 | } | |
954 | } | |
955 | lt_wait_for_lock_test_threads(); | |
956 | #endif | |
5ba3f43e A |
957 | |
958 | /* HW locks: trylocks */ | |
959 | T_LOG("Running test with hw_lock_try()"); | |
960 | lt_reset(); | |
961 | lt_target_done_threads = 3; | |
962 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
963 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
964 | lt_start_lock_thread(lt_grab_hw_lock_with_try); | |
965 | lt_wait_for_lock_test_threads(); | |
966 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
967 | ||
968 | /* HW locks: with timeout */ | |
969 | T_LOG("Running test with hw_lock_to()"); | |
970 | lt_reset(); | |
971 | lt_target_done_threads = 3; | |
972 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
973 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
974 | lt_start_lock_thread(lt_grab_hw_lock_with_to); | |
975 | lt_wait_for_lock_test_threads(); | |
976 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
977 | ||
978 | /* Spin locks */ | |
979 | T_LOG("Running test with lck_spin_lock()"); | |
980 | lt_reset(); | |
981 | lt_target_done_threads = 3; | |
982 | lt_start_lock_thread(lt_grab_spin_lock); | |
983 | lt_start_lock_thread(lt_grab_spin_lock); | |
984 | lt_start_lock_thread(lt_grab_spin_lock); | |
985 | lt_wait_for_lock_test_threads(); | |
986 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
987 | ||
988 | /* Spin locks: trylocks */ | |
989 | T_LOG("Running test with lck_spin_try_lock()"); | |
990 | lt_reset(); | |
991 | lt_target_done_threads = 3; | |
992 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
993 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
994 | lt_start_lock_thread(lt_grab_spin_lock_with_try); | |
995 | lt_wait_for_lock_test_threads(); | |
996 | T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); | |
997 | ||
998 | return KERN_SUCCESS; | |
999 | } | |
1000 | ||
0a7de745 A |
1001 | #define MT_MAX_ARGS 8 |
1002 | #define MT_INITIAL_VALUE 0xfeedbeef | |
1003 | #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */ | |
1004 | #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */ | |
1005 | #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */ | |
5ba3f43e A |
1006 | |
1007 | typedef void (*sy_munge_t)(void*); | |
1008 | ||
1009 | #define MT_FUNC(x) #x, x | |
1010 | struct munger_test { | |
0a7de745 A |
1011 | const char *mt_name; |
1012 | sy_munge_t mt_func; | |
1013 | uint32_t mt_in_words; | |
1014 | uint32_t mt_nout; | |
1015 | uint64_t mt_expected[MT_MAX_ARGS]; | |
5ba3f43e | 1016 | } munger_tests[] = { |
0a7de745 A |
1017 | {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}}, |
1018 | {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}}, | |
1019 | {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1020 | {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1021 | {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1022 | {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1023 | {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1024 | {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1025 | {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}}, | |
1026 | {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1027 | {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1028 | {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1029 | {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1030 | {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
1031 | {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1032 | {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1033 | {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1034 | {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1035 | {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1036 | {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
f427ee49 | 1037 | {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, |
0a7de745 A |
1038 | {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, |
1039 | {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
f427ee49 | 1040 | {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, |
0a7de745 A |
1041 | {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, |
1042 | {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1043 | {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, | |
1044 | {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, | |
1045 | {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
1046 | {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1047 | {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, | |
1048 | {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, | |
1049 | {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
1050 | {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}}, | |
1051 | {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}}, | |
1052 | {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, | |
c3c9b80d | 1053 | {MT_FUNC(munge_llll), 8, 4, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, |
0a7de745 A |
1054 | {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}}, |
1055 | {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}}, | |
1056 | {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1057 | {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, | |
1058 | {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, | |
1059 | {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}} | |
5ba3f43e A |
1060 | }; |
1061 | ||
1062 | #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test)) | |
1063 | ||
1064 | static void | |
0a7de745 | 1065 | mt_reset(uint32_t in_words, size_t total_size, uint32_t *data) |
5ba3f43e A |
1066 | { |
1067 | uint32_t i; | |
1068 | ||
1069 | for (i = 0; i < in_words; i++) { | |
1070 | data[i] = MT_INITIAL_VALUE; | |
1071 | } | |
1072 | ||
1073 | if (in_words * sizeof(uint32_t) < total_size) { | |
1074 | bzero(&data[in_words], total_size - in_words * sizeof(uint32_t)); | |
1075 | } | |
1076 | } | |
1077 | ||
1078 | static void | |
1079 | mt_test_mungers() | |
1080 | { | |
1081 | uint64_t data[MT_MAX_ARGS]; | |
1082 | uint32_t i, j; | |
1083 | ||
1084 | for (i = 0; i < MT_TEST_COUNT; i++) { | |
1085 | struct munger_test *test = &munger_tests[i]; | |
1086 | int pass = 1; | |
1087 | ||
1088 | T_LOG("Testing %s", test->mt_name); | |
1089 | ||
1090 | mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data); | |
1091 | test->mt_func(data); | |
1092 | ||
1093 | for (j = 0; j < test->mt_nout; j++) { | |
1094 | if (data[j] != test->mt_expected[j]) { | |
1095 | T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]); | |
1096 | pass = 0; | |
1097 | } | |
1098 | } | |
1099 | if (pass) { | |
1100 | T_PASS(test->mt_name); | |
1101 | } | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | /* Exception Callback Test */ | |
0a7de745 A |
1106 | static ex_cb_action_t |
1107 | excb_test_action( | |
1108 | ex_cb_class_t cb_class, | |
1109 | void *refcon, | |
1110 | const ex_cb_state_t *state | |
5ba3f43e A |
1111 | ) |
1112 | { | |
1113 | ex_cb_state_t *context = (ex_cb_state_t *)refcon; | |
1114 | ||
0a7de745 | 1115 | if ((NULL == refcon) || (NULL == state)) { |
5ba3f43e A |
1116 | return EXCB_ACTION_TEST_FAIL; |
1117 | } | |
1118 | ||
1119 | context->far = state->far; | |
1120 | ||
0a7de745 A |
1121 | switch (cb_class) { |
1122 | case EXCB_CLASS_TEST1: | |
1123 | return EXCB_ACTION_RERUN; | |
1124 | case EXCB_CLASS_TEST2: | |
1125 | return EXCB_ACTION_NONE; | |
1126 | default: | |
1127 | return EXCB_ACTION_TEST_FAIL; | |
5ba3f43e A |
1128 | } |
1129 | } | |
1130 | ||
1131 | ||
1132 | kern_return_t | |
1133 | ex_cb_test() | |
1134 | { | |
1135 | const vm_offset_t far1 = 0xdead0001; | |
1136 | const vm_offset_t far2 = 0xdead0002; | |
1137 | kern_return_t kr; | |
1138 | ex_cb_state_t test_context_1 = {0xdeadbeef}; | |
1139 | ex_cb_state_t test_context_2 = {0xdeadbeef}; | |
1140 | ex_cb_action_t action; | |
1141 | ||
1142 | T_LOG("Testing Exception Callback."); | |
0a7de745 | 1143 | |
5ba3f43e A |
1144 | T_LOG("Running registration test."); |
1145 | ||
1146 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1147 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback"); | |
1148 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1149 | T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback"); | |
1150 | ||
1151 | kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2); | |
1152 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback"); | |
1153 | kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); | |
1154 | T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback"); | |
1155 | ||
1156 | T_LOG("Running invocation test."); | |
1157 | ||
1158 | action = ex_cb_invoke(EXCB_CLASS_TEST1, far1); | |
1159 | T_ASSERT(EXCB_ACTION_RERUN == action, NULL); | |
1160 | T_ASSERT(far1 == test_context_1.far, NULL); | |
1161 | ||
1162 | action = ex_cb_invoke(EXCB_CLASS_TEST2, far2); | |
1163 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1164 | T_ASSERT(far2 == test_context_2.far, NULL); | |
1165 | ||
1166 | action = ex_cb_invoke(EXCB_CLASS_TEST3, 0); | |
1167 | T_ASSERT(EXCB_ACTION_NONE == action, NULL); | |
1168 | ||
1169 | return KERN_SUCCESS; | |
1170 | } | |
1171 | ||
cb323159 A |
1172 | #if defined(HAS_APPLE_PAC) |
1173 | ||
cb323159 A |
1174 | |
1175 | kern_return_t | |
1176 | arm64_ropjop_test() | |
1177 | { | |
1178 | T_LOG("Testing ROP/JOP"); | |
1179 | ||
1180 | /* how is ROP/JOP configured */ | |
1181 | boolean_t config_rop_enabled = TRUE; | |
f427ee49 | 1182 | boolean_t config_jop_enabled = TRUE; |
cb323159 A |
1183 | |
1184 | ||
cb323159 A |
1185 | if (config_jop_enabled) { |
1186 | /* jop key */ | |
c3c9b80d A |
1187 | uint64_t apiakey_hi = __builtin_arm_rsr64("APIAKEYHI_EL1"); |
1188 | uint64_t apiakey_lo = __builtin_arm_rsr64("APIAKEYLO_EL1"); | |
cb323159 | 1189 | |
cb323159 A |
1190 | T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL); |
1191 | } | |
1192 | ||
1193 | if (config_rop_enabled) { | |
1194 | /* rop key */ | |
c3c9b80d A |
1195 | uint64_t apibkey_hi = __builtin_arm_rsr64("APIBKEYHI_EL1"); |
1196 | uint64_t apibkey_lo = __builtin_arm_rsr64("APIBKEYLO_EL1"); | |
cb323159 | 1197 | |
cb323159 A |
1198 | T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL); |
1199 | ||
1200 | /* sign a KVA (the address of this function) */ | |
1201 | uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0); | |
1202 | ||
1203 | /* assert it was signed (changed) */ | |
1204 | T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL); | |
1205 | ||
1206 | /* authenticate the newly signed KVA */ | |
1207 | uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0); | |
1208 | ||
1209 | /* assert the authed KVA is the original KVA */ | |
1210 | T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL); | |
1211 | ||
1212 | /* corrupt a signed ptr, auth it, ensure auth failed */ | |
1213 | uint64_t kva_corrupted = kva_signed ^ 1; | |
1214 | ||
1215 | /* authenticate the corrupted pointer */ | |
1216 | kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0); | |
1217 | ||
1218 | /* when AuthIB fails, bits 63:62 will be set to 2'b10 */ | |
1219 | uint64_t auth_fail_mask = 3ULL << 61; | |
1220 | uint64_t authib_fail = 2ULL << 61; | |
1221 | ||
1222 | /* assert the failed authIB of corrupted pointer is tagged */ | |
1223 | T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL); | |
1224 | } | |
1225 | ||
1226 | return KERN_SUCCESS; | |
1227 | } | |
1228 | #endif /* defined(HAS_APPLE_PAC) */ | |
d9a64523 | 1229 | |
5ba3f43e | 1230 | #if __ARM_PAN_AVAILABLE__ |
cb323159 A |
1231 | |
1232 | struct pan_test_thread_args { | |
1233 | volatile bool join; | |
1234 | }; | |
1235 | ||
1236 | static void | |
1237 | arm64_pan_test_thread(void *arg, wait_result_t __unused wres) | |
1238 | { | |
1239 | T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL); | |
1240 | ||
1241 | struct pan_test_thread_args *args = arg; | |
1242 | ||
1243 | for (processor_t p = processor_list; p != NULL; p = p->processor_list) { | |
1244 | thread_bind(p); | |
1245 | thread_block(THREAD_CONTINUE_NULL); | |
1246 | kprintf("Running PAN test on cpu %d\n", p->cpu_id); | |
1247 | arm64_pan_test(); | |
1248 | } | |
1249 | ||
1250 | /* unbind thread from specific cpu */ | |
1251 | thread_bind(PROCESSOR_NULL); | |
1252 | thread_block(THREAD_CONTINUE_NULL); | |
1253 | ||
1254 | while (!args->join) { | |
1255 | ; | |
1256 | } | |
1257 | ||
1258 | thread_wakeup(args); | |
1259 | } | |
1260 | ||
1261 | kern_return_t | |
1262 | arm64_late_pan_test() | |
1263 | { | |
1264 | thread_t thread; | |
1265 | kern_return_t kr; | |
1266 | ||
1267 | struct pan_test_thread_args args; | |
1268 | args.join = false; | |
1269 | ||
1270 | kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread); | |
1271 | assert(kr == KERN_SUCCESS); | |
1272 | ||
1273 | thread_deallocate(thread); | |
1274 | ||
1275 | assert_wait(&args, THREAD_UNINT); | |
1276 | args.join = true; | |
1277 | thread_block(THREAD_CONTINUE_NULL); | |
1278 | return KERN_SUCCESS; | |
1279 | } | |
1280 | ||
f427ee49 A |
1281 | static bool |
1282 | arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state) | |
1283 | { | |
1284 | bool retval = false; | |
1285 | uint32_t esr = get_saved_state_esr(state); | |
1286 | esr_exception_class_t class = ESR_EC(esr); | |
1287 | fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); | |
1288 | uint32_t cpsr = get_saved_state_cpsr(state); | |
1289 | uint64_t far = get_saved_state_far(state); | |
1290 | ||
1291 | if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) && | |
1292 | (cpsr & PSR64_PAN) && | |
1293 | ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) { | |
1294 | ++pan_exception_level; | |
1295 | // read the user-accessible value to make sure | |
1296 | // pan is enabled and produces a 2nd fault from | |
1297 | // the exception handler | |
1298 | if (pan_exception_level == 1) { | |
1299 | ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far); | |
1300 | pan_fault_value = *(volatile char *)far; | |
1301 | ml_expect_fault_end(); | |
1302 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context | |
1303 | } | |
1304 | // this fault address is used for PAN test | |
1305 | // disable PAN and rerun | |
1306 | mask_saved_state_cpsr(state, 0, PSR64_PAN); | |
1307 | ||
1308 | retval = true; | |
1309 | } | |
1310 | ||
1311 | return retval; | |
1312 | } | |
1313 | ||
1314 | static bool | |
1315 | arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state) | |
1316 | { | |
1317 | bool retval = false; | |
1318 | uint32_t esr = get_saved_state_esr(state); | |
1319 | esr_exception_class_t class = ESR_EC(esr); | |
1320 | fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); | |
1321 | uint32_t cpsr = get_saved_state_cpsr(state); | |
1322 | ||
1323 | if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) && | |
1324 | !(cpsr & PSR64_PAN)) { | |
1325 | ++pan_exception_level; | |
1326 | // On an exception taken from a PAN-disabled context, verify | |
1327 | // that PAN is re-enabled for the exception handler and that | |
1328 | // accessing the test address produces a PAN fault. | |
1329 | ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr); | |
1330 | pan_fault_value = *(volatile char *)pan_test_addr; | |
1331 | ml_expect_fault_end(); | |
1332 | __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context | |
1333 | add_saved_state_pc(state, 4); | |
1334 | ||
1335 | retval = true; | |
1336 | } | |
1337 | ||
1338 | return retval; | |
1339 | } | |
1340 | ||
5ba3f43e A |
1341 | kern_return_t |
1342 | arm64_pan_test() | |
1343 | { | |
f427ee49 | 1344 | bool values_match = false; |
5ba3f43e A |
1345 | vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE; |
1346 | ||
1347 | T_LOG("Testing PAN."); | |
1348 | ||
cb323159 A |
1349 | |
1350 | T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared"); | |
1351 | ||
5ba3f43e A |
1352 | T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL); |
1353 | ||
cc8bc92a A |
1354 | pan_exception_level = 0; |
1355 | pan_fault_value = 0xDE; | |
5ba3f43e | 1356 | // convert priv_addr to one that is accessible from user mode |
5c9f4661 | 1357 | pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS - |
0a7de745 | 1358 | _COMM_PAGE_START_ADDRESS; |
5ba3f43e | 1359 | |
f427ee49 A |
1360 | // Context-switch with PAN disabled is prohibited; prevent test logging from |
1361 | // triggering a voluntary context switch. | |
1362 | mp_disable_preemption(); | |
1363 | ||
5c9f4661 | 1364 | // Below should trigger a PAN exception as pan_test_addr is accessible |
5ba3f43e A |
1365 | // in user mode |
1366 | // The exception handler, upon recognizing the fault address is pan_test_addr, | |
1367 | // will disable PAN and rerun this instruction successfully | |
f427ee49 A |
1368 | ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr); |
1369 | values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr); | |
1370 | ml_expect_fault_end(); | |
1371 | T_ASSERT(values_match, NULL); | |
cc8bc92a A |
1372 | |
1373 | T_ASSERT(pan_exception_level == 2, NULL); | |
5ba3f43e A |
1374 | |
1375 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1376 | ||
cc8bc92a A |
1377 | T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL); |
1378 | ||
1379 | pan_exception_level = 0; | |
1380 | pan_fault_value = 0xAD; | |
1381 | pan_ro_addr = (vm_offset_t) &pan_ro_value; | |
1382 | ||
1383 | // Force a permission fault while PAN is disabled to make sure PAN is | |
1384 | // re-enabled during the exception handler. | |
f427ee49 | 1385 | ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr); |
cc8bc92a | 1386 | *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE; |
f427ee49 | 1387 | ml_expect_fault_end(); |
cc8bc92a A |
1388 | |
1389 | T_ASSERT(pan_exception_level == 2, NULL); | |
1390 | ||
1391 | T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL); | |
1392 | ||
1393 | T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL); | |
1394 | ||
1395 | pan_test_addr = 0; | |
1396 | pan_ro_addr = 0; | |
5ba3f43e | 1397 | |
cc8bc92a | 1398 | __builtin_arm_wsr("pan", 1); |
cb323159 | 1399 | |
f427ee49 A |
1400 | mp_enable_preemption(); |
1401 | ||
5ba3f43e A |
1402 | return KERN_SUCCESS; |
1403 | } | |
cb323159 | 1404 | #endif /* __ARM_PAN_AVAILABLE__ */ |
5ba3f43e A |
1405 | |
1406 | ||
1407 | kern_return_t | |
1408 | arm64_lock_test() | |
1409 | { | |
1410 | return lt_test_locks(); | |
1411 | } | |
1412 | ||
1413 | kern_return_t | |
1414 | arm64_munger_test() | |
1415 | { | |
1416 | mt_test_mungers(); | |
1417 | return 0; | |
1418 | } | |
1419 | ||
c6bf4f31 A |
1420 | #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) |
1421 | SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test; | |
1422 | uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */ | |
1423 | volatile uint64_t ctrr_exception_esr; | |
1424 | vm_offset_t ctrr_test_va; | |
1425 | vm_offset_t ctrr_test_page; | |
1426 | ||
1427 | kern_return_t | |
1428 | ctrr_test(void) | |
1429 | { | |
1430 | processor_t p; | |
1431 | boolean_t ctrr_disable = FALSE; | |
1432 | ||
1433 | PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable)); | |
1434 | ||
f427ee49 A |
1435 | #if CONFIG_CSR_FROM_DT |
1436 | if (csr_unsafe_kernel_text) { | |
1437 | ctrr_disable = TRUE; | |
1438 | } | |
1439 | #endif /* CONFIG_CSR_FROM_DT */ | |
1440 | ||
c6bf4f31 A |
1441 | if (ctrr_disable) { |
1442 | T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present"); | |
1443 | return KERN_SUCCESS; | |
1444 | } | |
1445 | ||
1446 | T_LOG("Running CTRR test."); | |
1447 | ||
1448 | for (p = processor_list; p != NULL; p = p->processor_list) { | |
1449 | thread_bind(p); | |
1450 | thread_block(THREAD_CONTINUE_NULL); | |
1451 | T_LOG("Running CTRR test on cpu %d\n", p->cpu_id); | |
1452 | ctrr_test_cpu(); | |
1453 | } | |
1454 | ||
1455 | /* unbind thread from specific cpu */ | |
1456 | thread_bind(PROCESSOR_NULL); | |
1457 | thread_block(THREAD_CONTINUE_NULL); | |
1458 | ||
1459 | return KERN_SUCCESS; | |
1460 | } | |
1461 | ||
f427ee49 A |
1462 | static bool |
1463 | ctrr_test_ro_fault_handler(arm_saved_state_t * state) | |
1464 | { | |
1465 | bool retval = false; | |
1466 | uint32_t esr = get_saved_state_esr(state); | |
1467 | esr_exception_class_t class = ESR_EC(esr); | |
1468 | fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr)); | |
1469 | ||
1470 | if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) { | |
1471 | ctrr_exception_esr = esr; | |
1472 | add_saved_state_pc(state, 4); | |
1473 | retval = true; | |
1474 | } | |
1475 | ||
1476 | return retval; | |
1477 | } | |
1478 | ||
1479 | static bool | |
1480 | ctrr_test_nx_fault_handler(arm_saved_state_t * state) | |
1481 | { | |
1482 | bool retval = false; | |
1483 | uint32_t esr = get_saved_state_esr(state); | |
1484 | esr_exception_class_t class = ESR_EC(esr); | |
1485 | fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); | |
1486 | ||
1487 | if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) { | |
1488 | ctrr_exception_esr = esr; | |
1489 | /* return to the instruction immediately after the call to NX page */ | |
1490 | set_saved_state_pc(state, get_saved_state_lr(state)); | |
1491 | retval = true; | |
1492 | } | |
1493 | ||
1494 | return retval; | |
1495 | } | |
1496 | ||
c6bf4f31 A |
1497 | /* test CTRR on a cpu, caller to bind thread to desired cpu */ |
1498 | /* ctrr_test_page was reserved during bootstrap process */ | |
1499 | kern_return_t | |
1500 | ctrr_test_cpu(void) | |
1501 | { | |
1502 | ppnum_t ro_pn, nx_pn; | |
1503 | uint64_t *ctrr_ro_test_ptr; | |
1504 | void (*ctrr_nx_test_ptr)(void); | |
1505 | kern_return_t kr; | |
1506 | uint64_t prot = 0; | |
c6bf4f31 A |
1507 | extern vm_offset_t virtual_space_start; |
1508 | ||
f427ee49 | 1509 | /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */ |
c6bf4f31 | 1510 | |
f427ee49 A |
1511 | vm_offset_t rorgn_begin_va = phystokv(ctrr_begin); |
1512 | vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1; | |
c6bf4f31 A |
1513 | vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test; |
1514 | vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test; | |
1515 | ||
1516 | T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region"); | |
1517 | T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region"); | |
1518 | ||
1519 | ro_pn = pmap_find_phys(kernel_pmap, ro_test_va); | |
1520 | nx_pn = pmap_find_phys(kernel_pmap, nx_test_va); | |
1521 | T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero"); | |
1522 | ||
1523 | T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ", | |
1524 | (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn); | |
1525 | ||
1526 | prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page); | |
1527 | T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped"); | |
1528 | ||
1529 | T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn); | |
1530 | kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn, | |
1531 | VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); | |
1532 | T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed"); | |
1533 | ||
1534 | // assert entire mmu prot path (Hierarchical protection model) is NOT RO | |
1535 | // fetch effective block level protections from table/block entries | |
1536 | prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page); | |
1537 | T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX"); | |
1538 | ||
1539 | ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK); | |
1540 | ctrr_ro_test_ptr = (void *)ctrr_test_va; | |
1541 | ||
1542 | T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr); | |
1543 | ||
1544 | // should cause data abort | |
f427ee49 | 1545 | ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va); |
c6bf4f31 | 1546 | *ctrr_ro_test_ptr = 1; |
f427ee49 | 1547 | ml_expect_fault_end(); |
c6bf4f31 A |
1548 | |
1549 | // ensure write permission fault at expected level | |
1550 | // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault | |
1551 | ||
1552 | T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected"); | |
1553 | T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected"); | |
1554 | T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected"); | |
1555 | ||
1556 | ctrr_test_va = 0; | |
1557 | ctrr_exception_esr = 0; | |
1558 | pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE); | |
1559 | ||
1560 | T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn); | |
1561 | ||
1562 | kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn, | |
1563 | VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); | |
1564 | T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed"); | |
1565 | ||
1566 | // assert entire mmu prot path (Hierarchical protection model) is NOT XN | |
1567 | prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page); | |
1568 | T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX"); | |
1569 | ||
1570 | ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK); | |
f427ee49 A |
1571 | #if __has_feature(ptrauth_calls) |
1572 | ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0); | |
1573 | #else | |
c6bf4f31 | 1574 | ctrr_nx_test_ptr = (void *)ctrr_test_va; |
f427ee49 | 1575 | #endif |
c6bf4f31 A |
1576 | |
1577 | T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr); | |
1578 | ||
c6bf4f31 | 1579 | // should cause prefetch abort |
f427ee49 | 1580 | ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va); |
c6bf4f31 | 1581 | ctrr_nx_test_ptr(); |
f427ee49 | 1582 | ml_expect_fault_end(); |
c6bf4f31 A |
1583 | |
1584 | // TODO: ensure execute permission fault at expected level | |
1585 | T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected"); | |
1586 | T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected"); | |
1587 | ||
1588 | ctrr_test_va = 0; | |
1589 | ctrr_exception_esr = 0; | |
f427ee49 | 1590 | |
c6bf4f31 | 1591 | pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE); |
f427ee49 A |
1592 | |
1593 | T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits"); | |
1594 | for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) { | |
1595 | volatile uint64_t x = *(uint64_t *)addr; | |
1596 | (void) x; /* read for side effect only */ | |
1597 | } | |
1598 | ||
c6bf4f31 A |
1599 | return KERN_SUCCESS; |
1600 | } | |
1601 | #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */ | |
cb323159 A |
1602 | |
1603 | #if HAS_TWO_STAGE_SPR_LOCK | |
1604 | ||
1605 | #define STR1(x) #x | |
1606 | #define STR(x) STR1(x) | |
1607 | ||
1608 | volatile vm_offset_t spr_lock_test_addr; | |
1609 | volatile uint32_t spr_lock_exception_esr; | |
1610 | ||
1611 | kern_return_t | |
1612 | arm64_spr_lock_test() | |
1613 | { | |
1614 | processor_t p; | |
1615 | ||
1616 | for (p = processor_list; p != NULL; p = p->processor_list) { | |
1617 | thread_bind(p); | |
1618 | thread_block(THREAD_CONTINUE_NULL); | |
1619 | T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id); | |
1620 | ||
c3c9b80d | 1621 | uint64_t orig_value = __builtin_arm_rsr64(STR(S3_0_C15_C8_0)); |
cb323159 A |
1622 | spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test); |
1623 | spr_lock_exception_esr = 0; | |
1624 | arm64_msr_lock_test(~orig_value); | |
1625 | T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort"); | |
1626 | ||
c3c9b80d | 1627 | uint64_t new_value = __builtin_arm_rsr64(STR(S3_0_C15_C8_0)); |
cb323159 A |
1628 | T_EXPECT(orig_value == new_value, "MSR write did not succeed"); |
1629 | ||
1630 | spr_lock_test_addr = 0; | |
1631 | } | |
1632 | ||
1633 | /* unbind thread from specific cpu */ | |
1634 | thread_bind(PROCESSOR_NULL); | |
1635 | thread_block(THREAD_CONTINUE_NULL); | |
1636 | ||
1637 | T_PASS("Done running SPR lock tests"); | |
1638 | ||
1639 | return KERN_SUCCESS; | |
1640 | } | |
1641 | ||
1642 | #endif /* HAS_TWO_STAGE_SPR_LOCK */ |