]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/platform_tests.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
CommitLineData
5ba3f43e 1/*
cb323159 2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
5ba3f43e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
0a7de745 34 *
5ba3f43e
A
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
0a7de745 40 *
5ba3f43e
A
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 44 *
5ba3f43e 45 * Carnegie Mellon requests users of this software to return to
0a7de745 46 *
5ba3f43e
A
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
49 * 15213-3890
0a7de745 50 *
5ba3f43e
A
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
53 */
54
55#include <mach_ldebug.h>
56
57#define LOCK_PRIVATE 1
58
d9a64523 59#include <vm/pmap.h>
5ba3f43e
A
60#include <kern/kalloc.h>
61#include <kern/locks.h>
62#include <kern/misc_protos.h>
63#include <kern/thread.h>
64#include <kern/processor.h>
65#include <kern/sched_prim.h>
5ba3f43e
A
66#include <kern/debug.h>
67#include <string.h>
68#include <tests/xnupost.h>
69
0a7de745 70#if MACH_KDB
5ba3f43e
A
71#include <ddb/db_command.h>
72#include <ddb/db_output.h>
73#include <ddb/db_sym.h>
74#include <ddb/db_print.h>
0a7de745 75#endif /* MACH_KDB */
5ba3f43e
A
76
77#include <sys/kdebug.h>
78#include <sys/munge.h>
79#include <machine/cpu_capabilities.h>
80#include <arm/cpu_data_internal.h>
d9a64523 81#include <arm/pmap.h>
5ba3f43e 82
5ba3f43e
A
83kern_return_t arm64_lock_test(void);
84kern_return_t arm64_munger_test(void);
85kern_return_t ex_cb_test(void);
86kern_return_t arm64_pan_test(void);
cb323159
A
87kern_return_t arm64_late_pan_test(void);
88#if defined(HAS_APPLE_PAC)
89#include <ptrauth.h>
90kern_return_t arm64_ropjop_test(void);
91#endif
c6bf4f31
A
92#if defined(KERNEL_INTEGRITY_CTRR)
93kern_return_t ctrr_test(void);
94kern_return_t ctrr_test_cpu(void);
95#endif
cb323159
A
96#if HAS_TWO_STAGE_SPR_LOCK
97kern_return_t arm64_spr_lock_test(void);
98extern void arm64_msr_lock_test(uint64_t);
99#endif
5ba3f43e
A
100
101// exception handler ignores this fault address during PAN test
102#if __ARM_PAN_AVAILABLE__
cc8bc92a
A
103const uint64_t pan_ro_value = 0xFEEDB0B0DEADBEEF;
104vm_offset_t pan_test_addr = 0;
105vm_offset_t pan_ro_addr = 0;
106volatile int pan_exception_level = 0;
107volatile char pan_fault_value = 0;
5ba3f43e
A
108#endif
109
110#include <libkern/OSAtomic.h>
111#define LOCK_TEST_ITERATIONS 50
0a7de745
A
112static hw_lock_data_t lt_hw_lock;
113static lck_spin_t lt_lck_spin_t;
114static lck_mtx_t lt_mtx;
115static lck_rw_t lt_rwlock;
5ba3f43e 116static volatile uint32_t lt_counter = 0;
0a7de745 117static volatile int lt_spinvolatile;
5ba3f43e
A
118static volatile uint32_t lt_max_holders = 0;
119static volatile uint32_t lt_upgrade_holders = 0;
120static volatile uint32_t lt_max_upgrade_holders = 0;
121static volatile uint32_t lt_num_holders = 0;
122static volatile uint32_t lt_done_threads;
123static volatile uint32_t lt_target_done_threads;
124static volatile uint32_t lt_cpu_bind_id = 0;
125
126static void
0a7de745 127lt_note_another_blocking_lock_holder()
5ba3f43e 128{
0a7de745 129 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
5ba3f43e
A
130 lt_num_holders++;
131 lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders;
132 hw_lock_unlock(&lt_hw_lock);
133}
134
135static void
0a7de745 136lt_note_blocking_lock_release()
5ba3f43e 137{
0a7de745 138 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
5ba3f43e
A
139 lt_num_holders--;
140 hw_lock_unlock(&lt_hw_lock);
141}
142
143static void
0a7de745 144lt_spin_a_little_bit()
5ba3f43e
A
145{
146 uint32_t i;
0a7de745 147
5ba3f43e
A
148 for (i = 0; i < 10000; i++) {
149 lt_spinvolatile++;
150 }
151}
152
153static void
0a7de745 154lt_sleep_a_little_bit()
5ba3f43e
A
155{
156 delay(100);
157}
158
159static void
0a7de745 160lt_grab_mutex()
5ba3f43e
A
161{
162 lck_mtx_lock(&lt_mtx);
163 lt_note_another_blocking_lock_holder();
164 lt_sleep_a_little_bit();
165 lt_counter++;
166 lt_note_blocking_lock_release();
167 lck_mtx_unlock(&lt_mtx);
168}
169
170static void
171lt_grab_mutex_with_try()
172{
0a7de745
A
173 while (0 == lck_mtx_try_lock(&lt_mtx)) {
174 ;
175 }
5ba3f43e
A
176 lt_note_another_blocking_lock_holder();
177 lt_sleep_a_little_bit();
178 lt_counter++;
179 lt_note_blocking_lock_release();
180 lck_mtx_unlock(&lt_mtx);
5ba3f43e
A
181}
182
183static void
184lt_grab_rw_exclusive()
185{
186 lck_rw_lock_exclusive(&lt_rwlock);
187 lt_note_another_blocking_lock_holder();
188 lt_sleep_a_little_bit();
189 lt_counter++;
190 lt_note_blocking_lock_release();
191 lck_rw_done(&lt_rwlock);
192}
193
194static void
195lt_grab_rw_exclusive_with_try()
196{
0a7de745 197 while (0 == lck_rw_try_lock_exclusive(&lt_rwlock)) {
5ba3f43e
A
198 lt_sleep_a_little_bit();
199 }
200
201 lt_note_another_blocking_lock_holder();
202 lt_sleep_a_little_bit();
203 lt_counter++;
204 lt_note_blocking_lock_release();
205 lck_rw_done(&lt_rwlock);
206}
207
208/* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
0a7de745
A
209 * static void
210 * lt_grab_rw_shared()
211 * {
212 * lck_rw_lock_shared(&lt_rwlock);
213 * lt_counter++;
214 *
215 * lt_note_another_blocking_lock_holder();
216 * lt_sleep_a_little_bit();
217 * lt_note_blocking_lock_release();
218 *
219 * lck_rw_done(&lt_rwlock);
220 * }
221 */
5ba3f43e
A
222
223/* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
0a7de745
A
224 * static void
225 * lt_grab_rw_shared_with_try()
226 * {
227 * while(0 == lck_rw_try_lock_shared(&lt_rwlock));
228 * lt_counter++;
229 *
230 * lt_note_another_blocking_lock_holder();
231 * lt_sleep_a_little_bit();
232 * lt_note_blocking_lock_release();
233 *
234 * lck_rw_done(&lt_rwlock);
235 * }
236 */
5ba3f43e
A
237
238static void
0a7de745 239lt_upgrade_downgrade_rw()
5ba3f43e
A
240{
241 boolean_t upgraded, success;
242
243 success = lck_rw_try_lock_shared(&lt_rwlock);
244 if (!success) {
245 lck_rw_lock_shared(&lt_rwlock);
246 }
247
248 lt_note_another_blocking_lock_holder();
249 lt_sleep_a_little_bit();
250 lt_note_blocking_lock_release();
0a7de745 251
5ba3f43e
A
252 upgraded = lck_rw_lock_shared_to_exclusive(&lt_rwlock);
253 if (!upgraded) {
254 success = lck_rw_try_lock_exclusive(&lt_rwlock);
255
256 if (!success) {
257 lck_rw_lock_exclusive(&lt_rwlock);
258 }
259 }
260
261 lt_upgrade_holders++;
262 if (lt_upgrade_holders > lt_max_upgrade_holders) {
263 lt_max_upgrade_holders = lt_upgrade_holders;
264 }
265
266 lt_counter++;
267 lt_sleep_a_little_bit();
268
269 lt_upgrade_holders--;
0a7de745 270
5ba3f43e
A
271 lck_rw_lock_exclusive_to_shared(&lt_rwlock);
272
273 lt_spin_a_little_bit();
274 lck_rw_done(&lt_rwlock);
275}
276
c6bf4f31
A
277#if __AMP__
278const int limit = 1000000;
279static int lt_stress_local_counters[MAX_CPUS];
280
281lck_ticket_t lt_ticket_lock;
282
283static void
284lt_stress_ticket_lock()
285{
286 int local_counter = 0;
287
288 uint cpuid = current_processor()->cpu_id;
289
290 kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid);
291
292 lck_ticket_lock(&lt_ticket_lock);
293 lt_counter++;
294 local_counter++;
295 lck_ticket_unlock(&lt_ticket_lock);
296
297 while (lt_counter < lt_target_done_threads) {
298 ;
299 }
300
301 kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid);
302
303 while (lt_counter < limit) {
304 lck_ticket_lock(&lt_ticket_lock);
305 if (lt_counter < limit) {
306 lt_counter++;
307 local_counter++;
308 }
309 lck_ticket_unlock(&lt_ticket_lock);
310 }
311
312 lt_stress_local_counters[cpuid] = local_counter;
313
314 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__, lt_counter, cpuid, local_counter);
315}
316#endif
5ba3f43e
A
317
318static void
0a7de745 319lt_grab_hw_lock()
5ba3f43e 320{
0a7de745 321 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
5ba3f43e
A
322 lt_counter++;
323 lt_spin_a_little_bit();
324 hw_lock_unlock(&lt_hw_lock);
325}
326
327static void
328lt_grab_hw_lock_with_try()
329{
0a7de745
A
330 while (0 == hw_lock_try(&lt_hw_lock, LCK_GRP_NULL)) {
331 ;
332 }
5ba3f43e
A
333 lt_counter++;
334 lt_spin_a_little_bit();
335 hw_lock_unlock(&lt_hw_lock);
336}
337
338static void
339lt_grab_hw_lock_with_to()
340{
0a7de745 341 while (0 == hw_lock_to(&lt_hw_lock, LockTimeOut, LCK_GRP_NULL)) {
5ba3f43e 342 mp_enable_preemption();
0a7de745 343 }
5ba3f43e
A
344 lt_counter++;
345 lt_spin_a_little_bit();
346 hw_lock_unlock(&lt_hw_lock);
347}
348
349static void
0a7de745 350lt_grab_spin_lock()
5ba3f43e
A
351{
352 lck_spin_lock(&lt_lck_spin_t);
353 lt_counter++;
354 lt_spin_a_little_bit();
355 lck_spin_unlock(&lt_lck_spin_t);
356}
357
358static void
0a7de745 359lt_grab_spin_lock_with_try()
5ba3f43e 360{
0a7de745
A
361 while (0 == lck_spin_try_lock(&lt_lck_spin_t)) {
362 ;
363 }
5ba3f43e
A
364 lt_counter++;
365 lt_spin_a_little_bit();
366 lck_spin_unlock(&lt_lck_spin_t);
367}
368
369static volatile boolean_t lt_thread_lock_grabbed;
370static volatile boolean_t lt_thread_lock_success;
371
372static void
373lt_reset()
374{
375 lt_counter = 0;
376 lt_max_holders = 0;
377 lt_num_holders = 0;
378 lt_max_upgrade_holders = 0;
379 lt_upgrade_holders = 0;
380 lt_done_threads = 0;
381 lt_target_done_threads = 0;
382 lt_cpu_bind_id = 0;
383
384 OSMemoryBarrier();
385}
386
387static void
388lt_trylock_hw_lock_with_to()
389{
390 OSMemoryBarrier();
391 while (!lt_thread_lock_grabbed) {
392 lt_sleep_a_little_bit();
393 OSMemoryBarrier();
394 }
0a7de745 395 lt_thread_lock_success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
5ba3f43e
A
396 OSMemoryBarrier();
397 mp_enable_preemption();
398}
399
400static void
401lt_trylock_spin_try_lock()
402{
403 OSMemoryBarrier();
404 while (!lt_thread_lock_grabbed) {
405 lt_sleep_a_little_bit();
406 OSMemoryBarrier();
407 }
408 lt_thread_lock_success = lck_spin_try_lock(&lt_lck_spin_t);
409 OSMemoryBarrier();
410}
411
412static void
413lt_trylock_thread(void *arg, wait_result_t wres __unused)
414{
0a7de745 415 void (*func)(void) = (void (*)(void))arg;
5ba3f43e
A
416
417 func();
418
419 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
420}
421
422static void
423lt_start_trylock_thread(thread_continue_t func)
424{
425 thread_t thread;
426 kern_return_t kr;
427
428 kr = kernel_thread_start(lt_trylock_thread, func, &thread);
429 assert(kr == KERN_SUCCESS);
430
431 thread_deallocate(thread);
432}
433
434static void
435lt_wait_for_lock_test_threads()
436{
437 OSMemoryBarrier();
438 /* Spin to reduce dependencies */
439 while (lt_done_threads < lt_target_done_threads) {
440 lt_sleep_a_little_bit();
441 OSMemoryBarrier();
442 }
443 OSMemoryBarrier();
444}
445
446static kern_return_t
447lt_test_trylocks()
448{
0a7de745 449 boolean_t success;
a39ff7e2 450 extern unsigned int real_ncpus;
0a7de745
A
451
452 /*
5ba3f43e
A
453 * First mtx try lock succeeds, second fails.
454 */
455 success = lck_mtx_try_lock(&lt_mtx);
456 T_ASSERT_NOTNULL(success, "First mtx try lock");
457 success = lck_mtx_try_lock(&lt_mtx);
458 T_ASSERT_NULL(success, "Second mtx try lock for a locked mtx");
459 lck_mtx_unlock(&lt_mtx);
460
461 /*
462 * After regular grab, can't try lock.
463 */
464 lck_mtx_lock(&lt_mtx);
465 success = lck_mtx_try_lock(&lt_mtx);
466 T_ASSERT_NULL(success, "try lock should fail after regular lck_mtx_lock");
467 lck_mtx_unlock(&lt_mtx);
468
469 /*
0a7de745 470 * Two shared try locks on a previously unheld rwlock suceed, and a
5ba3f43e
A
471 * subsequent exclusive attempt fails.
472 */
473 success = lck_rw_try_lock_shared(&lt_rwlock);
474 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
475 success = lck_rw_try_lock_shared(&lt_rwlock);
476 T_ASSERT_NOTNULL(success, "Two shared try locks on a previously unheld rwlock should succeed");
477 success = lck_rw_try_lock_exclusive(&lt_rwlock);
478 T_ASSERT_NULL(success, "exclusive lock attempt on previously held lock should fail");
479 lck_rw_done(&lt_rwlock);
480 lck_rw_done(&lt_rwlock);
481
482 /*
483 * After regular shared grab, can trylock
484 * for shared but not for exclusive.
485 */
486 lck_rw_lock_shared(&lt_rwlock);
487 success = lck_rw_try_lock_shared(&lt_rwlock);
488 T_ASSERT_NOTNULL(success, "After regular shared grab another shared try lock should succeed.");
489 success = lck_rw_try_lock_exclusive(&lt_rwlock);
490 T_ASSERT_NULL(success, "After regular shared grab an exclusive lock attempt should fail.");
491 lck_rw_done(&lt_rwlock);
492 lck_rw_done(&lt_rwlock);
493
494 /*
495 * An exclusive try lock succeeds, subsequent shared and exclusive
496 * attempts fail.
497 */
498 success = lck_rw_try_lock_exclusive(&lt_rwlock);
499 T_ASSERT_NOTNULL(success, "An exclusive try lock should succeed");
500 success = lck_rw_try_lock_shared(&lt_rwlock);
501 T_ASSERT_NULL(success, "try lock in shared mode attempt after an exclusive grab should fail");
502 success = lck_rw_try_lock_exclusive(&lt_rwlock);
503 T_ASSERT_NULL(success, "try lock in exclusive mode attempt after an exclusive grab should fail");
504 lck_rw_done(&lt_rwlock);
505
506 /*
507 * After regular exclusive grab, neither kind of trylock succeeds.
508 */
509 lck_rw_lock_exclusive(&lt_rwlock);
510 success = lck_rw_try_lock_shared(&lt_rwlock);
511 T_ASSERT_NULL(success, "After regular exclusive grab, shared trylock should not succeed");
512 success = lck_rw_try_lock_exclusive(&lt_rwlock);
513 T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed");
514 lck_rw_done(&lt_rwlock);
515
0a7de745 516 /*
5ba3f43e
A
517 * First spin lock attempts succeed, second attempts fail.
518 */
0a7de745 519 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
5ba3f43e 520 T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed");
0a7de745 521 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
5ba3f43e
A
522 T_ASSERT_NULL(success, "Second attempt to spin lock should fail");
523 hw_lock_unlock(&lt_hw_lock);
0a7de745
A
524
525 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
526 success = hw_lock_try(&lt_hw_lock, LCK_GRP_NULL);
5ba3f43e
A
527 T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail");
528 hw_lock_unlock(&lt_hw_lock);
529
530 lt_reset();
531 lt_thread_lock_grabbed = false;
532 lt_thread_lock_success = true;
533 lt_target_done_threads = 1;
534 OSMemoryBarrier();
535 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
0a7de745 536 success = hw_lock_to(&lt_hw_lock, 100, LCK_GRP_NULL);
5ba3f43e 537 T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
a39ff7e2
A
538 if (real_ncpus == 1) {
539 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
540 }
5ba3f43e
A
541 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
542 lt_wait_for_lock_test_threads();
543 T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
a39ff7e2
A
544 if (real_ncpus == 1) {
545 mp_disable_preemption(); /* don't double-enable when we unlock */
546 }
5ba3f43e
A
547 hw_lock_unlock(&lt_hw_lock);
548
549 lt_reset();
550 lt_thread_lock_grabbed = false;
551 lt_thread_lock_success = true;
552 lt_target_done_threads = 1;
553 OSMemoryBarrier();
554 lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
0a7de745 555 hw_lock_lock(&lt_hw_lock, LCK_GRP_NULL);
a39ff7e2
A
556 if (real_ncpus == 1) {
557 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
558 }
5ba3f43e
A
559 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
560 lt_wait_for_lock_test_threads();
561 T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
a39ff7e2
A
562 if (real_ncpus == 1) {
563 mp_disable_preemption(); /* don't double-enable when we unlock */
564 }
5ba3f43e
A
565 hw_lock_unlock(&lt_hw_lock);
566
567 success = lck_spin_try_lock(&lt_lck_spin_t);
568 T_ASSERT_NOTNULL(success, "spin trylock of previously unheld lock should succeed");
569 success = lck_spin_try_lock(&lt_lck_spin_t);
570 T_ASSERT_NULL(success, "spin trylock attempt of previously held lock (with trylock) should fail");
571 lck_spin_unlock(&lt_lck_spin_t);
572
573 lt_reset();
574 lt_thread_lock_grabbed = false;
575 lt_thread_lock_success = true;
576 lt_target_done_threads = 1;
577 lt_start_trylock_thread(lt_trylock_spin_try_lock);
578 lck_spin_lock(&lt_lck_spin_t);
a39ff7e2
A
579 if (real_ncpus == 1) {
580 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
581 }
5ba3f43e
A
582 OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
583 lt_wait_for_lock_test_threads();
584 T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
a39ff7e2
A
585 if (real_ncpus == 1) {
586 mp_disable_preemption(); /* don't double-enable when we unlock */
587 }
5ba3f43e
A
588 lck_spin_unlock(&lt_lck_spin_t);
589
590 return KERN_SUCCESS;
591}
592
593static void
0a7de745 594lt_thread(void *arg, wait_result_t wres __unused)
5ba3f43e 595{
0a7de745 596 void (*func)(void) = (void (*)(void))arg;
5ba3f43e
A
597 uint32_t i;
598
599 for (i = 0; i < LOCK_TEST_ITERATIONS; i++) {
600 func();
601 }
602
603 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
604}
605
5ba3f43e
A
606static void
607lt_start_lock_thread(thread_continue_t func)
608{
609 thread_t thread;
610 kern_return_t kr;
611
612 kr = kernel_thread_start(lt_thread, func, &thread);
613 assert(kr == KERN_SUCCESS);
614
615 thread_deallocate(thread);
616}
617
c6bf4f31
A
618#if __AMP__
619static void
620lt_bound_thread(void *arg, wait_result_t wres __unused)
621{
622 void (*func)(void) = (void (*)(void))arg;
623
624 int cpuid = OSIncrementAtomic((volatile SInt32 *)&lt_cpu_bind_id);
625
626 processor_t processor = processor_list;
627 while ((processor != NULL) && (processor->cpu_id != cpuid)) {
628 processor = processor->processor_list;
629 }
630
631 if (processor != NULL) {
632 thread_bind(processor);
633 }
634
635 thread_block(THREAD_CONTINUE_NULL);
636
637 func();
638
639 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
640}
641
642static void
643lt_e_thread(void *arg, wait_result_t wres __unused)
644{
645 void (*func)(void) = (void (*)(void))arg;
646
647 thread_t thread = current_thread();
648
649 spl_t s = splsched();
650 thread_lock(thread);
651 thread->sched_flags |= TH_SFLAG_ECORE_ONLY;
652 thread_unlock(thread);
653 splx(s);
654
655 thread_block(THREAD_CONTINUE_NULL);
656
657 func();
658
659 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
660}
661
662static void
663lt_p_thread(void *arg, wait_result_t wres __unused)
664{
665 void (*func)(void) = (void (*)(void))arg;
666
667 thread_t thread = current_thread();
668
669 spl_t s = splsched();
670 thread_lock(thread);
671 thread->sched_flags |= TH_SFLAG_PCORE_ONLY;
672 thread_unlock(thread);
673 splx(s);
674
675 thread_block(THREAD_CONTINUE_NULL);
676
677 func();
678
679 OSIncrementAtomic((volatile SInt32*) &lt_done_threads);
680}
681
682static void
683lt_start_lock_thread_e(thread_continue_t func)
684{
685 thread_t thread;
686 kern_return_t kr;
687
688 kr = kernel_thread_start(lt_e_thread, func, &thread);
689 assert(kr == KERN_SUCCESS);
690
691 thread_deallocate(thread);
692}
693
694static void
695lt_start_lock_thread_p(thread_continue_t func)
696{
697 thread_t thread;
698 kern_return_t kr;
699
700 kr = kernel_thread_start(lt_p_thread, func, &thread);
701 assert(kr == KERN_SUCCESS);
702
703 thread_deallocate(thread);
704}
705
706static void
707lt_start_lock_thread_bound(thread_continue_t func)
708{
709 thread_t thread;
710 kern_return_t kr;
711
712 kr = kernel_thread_start(lt_bound_thread, func, &thread);
713 assert(kr == KERN_SUCCESS);
714
715 thread_deallocate(thread);
716}
717#endif
5ba3f43e 718
5ba3f43e
A
719static kern_return_t
720lt_test_locks()
721{
722 kern_return_t kr = KERN_SUCCESS;
723 lck_grp_attr_t *lga = lck_grp_attr_alloc_init();
724 lck_grp_t *lg = lck_grp_alloc_init("lock test", lga);
725
726 lck_mtx_init(&lt_mtx, lg, LCK_ATTR_NULL);
727 lck_rw_init(&lt_rwlock, lg, LCK_ATTR_NULL);
728 lck_spin_init(&lt_lck_spin_t, lg, LCK_ATTR_NULL);
729 hw_lock_init(&lt_hw_lock);
730
731 T_LOG("Testing locks.");
732
733 /* Try locks (custom) */
734 lt_reset();
735
736 T_LOG("Running try lock test.");
737 kr = lt_test_trylocks();
738 T_EXPECT_NULL(kr, "try lock test failed.");
739
740 /* Uncontended mutex */
741 T_LOG("Running uncontended mutex test.");
742 lt_reset();
743 lt_target_done_threads = 1;
744 lt_start_lock_thread(lt_grab_mutex);
745 lt_wait_for_lock_test_threads();
746 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
747 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
748
749 /* Contended mutex:try locks*/
750 T_LOG("Running contended mutex test.");
751 lt_reset();
752 lt_target_done_threads = 3;
753 lt_start_lock_thread(lt_grab_mutex);
754 lt_start_lock_thread(lt_grab_mutex);
755 lt_start_lock_thread(lt_grab_mutex);
756 lt_wait_for_lock_test_threads();
757 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
758 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
759
760 /* Contended mutex: try locks*/
761 T_LOG("Running contended mutex trylock test.");
762 lt_reset();
763 lt_target_done_threads = 3;
764 lt_start_lock_thread(lt_grab_mutex_with_try);
765 lt_start_lock_thread(lt_grab_mutex_with_try);
766 lt_start_lock_thread(lt_grab_mutex_with_try);
767 lt_wait_for_lock_test_threads();
768 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
769 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
770
771 /* Uncontended exclusive rwlock */
772 T_LOG("Running uncontended exclusive rwlock test.");
773 lt_reset();
774 lt_target_done_threads = 1;
775 lt_start_lock_thread(lt_grab_rw_exclusive);
776 lt_wait_for_lock_test_threads();
777 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
778 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
779
780 /* Uncontended shared rwlock */
781
782 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
0a7de745
A
783 * T_LOG("Running uncontended shared rwlock test.");
784 * lt_reset();
785 * lt_target_done_threads = 1;
786 * lt_start_lock_thread(lt_grab_rw_shared);
787 * lt_wait_for_lock_test_threads();
788 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
789 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
790 */
5ba3f43e
A
791
792 /* Contended exclusive rwlock */
793 T_LOG("Running contended exclusive rwlock test.");
794 lt_reset();
795 lt_target_done_threads = 3;
796 lt_start_lock_thread(lt_grab_rw_exclusive);
797 lt_start_lock_thread(lt_grab_rw_exclusive);
798 lt_start_lock_thread(lt_grab_rw_exclusive);
799 lt_wait_for_lock_test_threads();
800 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
801 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
802
803 /* One shared, two exclusive */
804 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
0a7de745
A
805 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
806 * lt_reset();
807 * lt_target_done_threads = 3;
808 * lt_start_lock_thread(lt_grab_rw_shared);
809 * lt_start_lock_thread(lt_grab_rw_exclusive);
810 * lt_start_lock_thread(lt_grab_rw_exclusive);
811 * lt_wait_for_lock_test_threads();
812 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
813 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
814 */
5ba3f43e
A
815
816 /* Four shared */
817 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
0a7de745
A
818 * T_LOG("Running test with four shared holders.");
819 * lt_reset();
820 * lt_target_done_threads = 4;
821 * lt_start_lock_thread(lt_grab_rw_shared);
822 * lt_start_lock_thread(lt_grab_rw_shared);
823 * lt_start_lock_thread(lt_grab_rw_shared);
824 * lt_start_lock_thread(lt_grab_rw_shared);
825 * lt_wait_for_lock_test_threads();
826 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
827 */
5ba3f43e
A
828
829 /* Three doing upgrades and downgrades */
830 T_LOG("Running test with threads upgrading and downgrading.");
831 lt_reset();
832 lt_target_done_threads = 3;
833 lt_start_lock_thread(lt_upgrade_downgrade_rw);
834 lt_start_lock_thread(lt_upgrade_downgrade_rw);
835 lt_start_lock_thread(lt_upgrade_downgrade_rw);
836 lt_wait_for_lock_test_threads();
837 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
838 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
839 T_EXPECT_EQ_UINT(lt_max_upgrade_holders, 1, NULL);
840
841 /* Uncontended - exclusive trylocks */
842 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
843 lt_reset();
844 lt_target_done_threads = 1;
845 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
846 lt_wait_for_lock_test_threads();
847 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
848 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
849
850 /* Uncontended - shared trylocks */
851 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
0a7de745
A
852 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
853 * lt_reset();
854 * lt_target_done_threads = 1;
855 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
856 * lt_wait_for_lock_test_threads();
857 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
858 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
859 */
5ba3f43e
A
860
861 /* Three doing exclusive trylocks */
862 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
863 lt_reset();
864 lt_target_done_threads = 3;
865 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
866 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
867 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
868 lt_wait_for_lock_test_threads();
869 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
870 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
871
872 /* Three doing shared trylocks */
873 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
0a7de745
A
874 * T_LOG("Running test with threads doing shared rwlock trylocks.");
875 * lt_reset();
876 * lt_target_done_threads = 3;
877 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
878 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
879 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
880 * lt_wait_for_lock_test_threads();
881 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
882 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
883 */
5ba3f43e
A
884
885 /* Three doing various trylocks */
886 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
0a7de745
A
887 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
888 * lt_reset();
889 * lt_target_done_threads = 4;
890 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
891 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
892 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
893 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
894 * lt_wait_for_lock_test_threads();
895 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
896 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
897 */
5ba3f43e
A
898
899 /* HW locks */
900 T_LOG("Running test with hw_lock_lock()");
901 lt_reset();
902 lt_target_done_threads = 3;
903 lt_start_lock_thread(lt_grab_hw_lock);
904 lt_start_lock_thread(lt_grab_hw_lock);
905 lt_start_lock_thread(lt_grab_hw_lock);
906 lt_wait_for_lock_test_threads();
907 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
908
c6bf4f31
A
909#if __AMP__
910 /* Ticket locks stress test */
911 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
912 extern unsigned int real_ncpus;
913 lck_ticket_init(&lt_ticket_lock);
914 lt_reset();
915 lt_target_done_threads = real_ncpus;
916 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
917 lt_start_lock_thread_bound(lt_stress_ticket_lock);
918 }
919 lt_wait_for_lock_test_threads();
920 bool starvation = false;
921 uint total_local_count = 0;
922 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
923 starvation = starvation || (lt_stress_local_counters[processor->cpu_id] < 10);
924 total_local_count += lt_stress_local_counters[processor->cpu_id];
925 }
926 if (total_local_count != lt_counter) {
927 T_FAIL("Lock failure\n");
928 } else if (starvation) {
929 T_FAIL("Lock starvation found\n");
930 } else {
931 T_PASS("Ticket locks stress test with lck_ticket_lock()");
932 }
933
934 /* AMP ticket locks stress test */
935 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
936 lt_reset();
937 lt_target_done_threads = real_ncpus;
938 for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) {
939 processor_set_t pset = processor->processor_set;
940 if (pset->pset_cluster_type == PSET_AMP_P) {
941 lt_start_lock_thread_p(lt_stress_ticket_lock);
942 } else if (pset->pset_cluster_type == PSET_AMP_E) {
943 lt_start_lock_thread_e(lt_stress_ticket_lock);
944 } else {
945 lt_start_lock_thread(lt_stress_ticket_lock);
946 }
947 }
948 lt_wait_for_lock_test_threads();
949#endif
5ba3f43e
A
950
951 /* HW locks: trylocks */
952 T_LOG("Running test with hw_lock_try()");
953 lt_reset();
954 lt_target_done_threads = 3;
955 lt_start_lock_thread(lt_grab_hw_lock_with_try);
956 lt_start_lock_thread(lt_grab_hw_lock_with_try);
957 lt_start_lock_thread(lt_grab_hw_lock_with_try);
958 lt_wait_for_lock_test_threads();
959 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
960
961 /* HW locks: with timeout */
962 T_LOG("Running test with hw_lock_to()");
963 lt_reset();
964 lt_target_done_threads = 3;
965 lt_start_lock_thread(lt_grab_hw_lock_with_to);
966 lt_start_lock_thread(lt_grab_hw_lock_with_to);
967 lt_start_lock_thread(lt_grab_hw_lock_with_to);
968 lt_wait_for_lock_test_threads();
969 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
970
971 /* Spin locks */
972 T_LOG("Running test with lck_spin_lock()");
973 lt_reset();
974 lt_target_done_threads = 3;
975 lt_start_lock_thread(lt_grab_spin_lock);
976 lt_start_lock_thread(lt_grab_spin_lock);
977 lt_start_lock_thread(lt_grab_spin_lock);
978 lt_wait_for_lock_test_threads();
979 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
980
981 /* Spin locks: trylocks */
982 T_LOG("Running test with lck_spin_try_lock()");
983 lt_reset();
984 lt_target_done_threads = 3;
985 lt_start_lock_thread(lt_grab_spin_lock_with_try);
986 lt_start_lock_thread(lt_grab_spin_lock_with_try);
987 lt_start_lock_thread(lt_grab_spin_lock_with_try);
988 lt_wait_for_lock_test_threads();
989 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
990
991 return KERN_SUCCESS;
992}
993
0a7de745
A
994#define MT_MAX_ARGS 8
995#define MT_INITIAL_VALUE 0xfeedbeef
996#define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
997#define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
998#define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
5ba3f43e
A
999
1000typedef void (*sy_munge_t)(void*);
1001
1002#define MT_FUNC(x) #x, x
1003struct munger_test {
0a7de745
A
1004 const char *mt_name;
1005 sy_munge_t mt_func;
1006 uint32_t mt_in_words;
1007 uint32_t mt_nout;
1008 uint64_t mt_expected[MT_MAX_ARGS];
5ba3f43e 1009} munger_tests[] = {
0a7de745
A
1010 {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}},
1011 {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}},
1012 {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1013 {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1014 {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1015 {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1016 {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1017 {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1018 {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}},
1019 {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1020 {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1021 {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1022 {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1023 {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1024 {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1025 {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1026 {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1027 {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1028 {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1029 {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1030 {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1031 {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1032 {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1033 {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1034 {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}},
1035 {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}},
1036 {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1037 {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1038 {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}},
1039 {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}},
1040 {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1041 {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}},
1042 {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}},
1043 {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}},
1044 {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}},
1045 {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}},
1046 {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1047 {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}},
1048 {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}},
1049 {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}
5ba3f43e
A
1050};
1051
1052#define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1053
1054static void
0a7de745 1055mt_reset(uint32_t in_words, size_t total_size, uint32_t *data)
5ba3f43e
A
1056{
1057 uint32_t i;
1058
1059 for (i = 0; i < in_words; i++) {
1060 data[i] = MT_INITIAL_VALUE;
1061 }
1062
1063 if (in_words * sizeof(uint32_t) < total_size) {
1064 bzero(&data[in_words], total_size - in_words * sizeof(uint32_t));
1065 }
1066}
1067
1068static void
1069mt_test_mungers()
1070{
1071 uint64_t data[MT_MAX_ARGS];
1072 uint32_t i, j;
1073
1074 for (i = 0; i < MT_TEST_COUNT; i++) {
1075 struct munger_test *test = &munger_tests[i];
1076 int pass = 1;
1077
1078 T_LOG("Testing %s", test->mt_name);
1079
1080 mt_reset(test->mt_in_words, sizeof(data), (uint32_t*)data);
1081 test->mt_func(data);
1082
1083 for (j = 0; j < test->mt_nout; j++) {
1084 if (data[j] != test->mt_expected[j]) {
1085 T_FAIL("Index %d: expected %llx, got %llx.", j, test->mt_expected[j], data[j]);
1086 pass = 0;
1087 }
1088 }
1089 if (pass) {
1090 T_PASS(test->mt_name);
1091 }
1092 }
1093}
1094
1095/* Exception Callback Test */
0a7de745
A
1096static ex_cb_action_t
1097excb_test_action(
1098 ex_cb_class_t cb_class,
1099 void *refcon,
1100 const ex_cb_state_t *state
5ba3f43e
A
1101 )
1102{
1103 ex_cb_state_t *context = (ex_cb_state_t *)refcon;
1104
0a7de745 1105 if ((NULL == refcon) || (NULL == state)) {
5ba3f43e
A
1106 return EXCB_ACTION_TEST_FAIL;
1107 }
1108
1109 context->far = state->far;
1110
0a7de745
A
1111 switch (cb_class) {
1112 case EXCB_CLASS_TEST1:
1113 return EXCB_ACTION_RERUN;
1114 case EXCB_CLASS_TEST2:
1115 return EXCB_ACTION_NONE;
1116 default:
1117 return EXCB_ACTION_TEST_FAIL;
5ba3f43e
A
1118 }
1119}
1120
1121
1122kern_return_t
1123ex_cb_test()
1124{
1125 const vm_offset_t far1 = 0xdead0001;
1126 const vm_offset_t far2 = 0xdead0002;
1127 kern_return_t kr;
1128 ex_cb_state_t test_context_1 = {0xdeadbeef};
1129 ex_cb_state_t test_context_2 = {0xdeadbeef};
1130 ex_cb_action_t action;
1131
1132 T_LOG("Testing Exception Callback.");
0a7de745 1133
5ba3f43e
A
1134 T_LOG("Running registration test.");
1135
1136 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1137 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST1 exception callback");
1138 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1139 T_ASSERT(KERN_SUCCESS == kr, "First registration of TEST2 exception callback");
1140
1141 kr = ex_cb_register(EXCB_CLASS_TEST2, &excb_test_action, &test_context_2);
1142 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST2 exception callback");
1143 kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1);
1144 T_ASSERT(KERN_SUCCESS != kr, "Second registration of TEST1 exception callback");
1145
1146 T_LOG("Running invocation test.");
1147
1148 action = ex_cb_invoke(EXCB_CLASS_TEST1, far1);
1149 T_ASSERT(EXCB_ACTION_RERUN == action, NULL);
1150 T_ASSERT(far1 == test_context_1.far, NULL);
1151
1152 action = ex_cb_invoke(EXCB_CLASS_TEST2, far2);
1153 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1154 T_ASSERT(far2 == test_context_2.far, NULL);
1155
1156 action = ex_cb_invoke(EXCB_CLASS_TEST3, 0);
1157 T_ASSERT(EXCB_ACTION_NONE == action, NULL);
1158
1159 return KERN_SUCCESS;
1160}
1161
cb323159
A
1162#if defined(HAS_APPLE_PAC)
1163
1164/*
1165 *
1166 * arm64_ropjop_test - basic xnu ROP/JOP test plan
1167 *
1168 * - assert ROP/JOP configured and running status match
1169 * - assert all AppleMode ROP/JOP features enabled
1170 * - ensure ROP/JOP keys are set and diversified
1171 * - sign a KVA (the address of this function),assert it was signed (changed)
1172 * - authenticate the newly signed KVA
1173 * - assert the authed KVA is the original KVA
1174 * - corrupt a signed ptr, auth it, ensure auth failed
1175 * - assert the failed authIB of corrupted pointer is tagged
1176 *
1177 */
1178
1179kern_return_t
1180arm64_ropjop_test()
1181{
1182 T_LOG("Testing ROP/JOP");
1183
1184 /* how is ROP/JOP configured */
1185 boolean_t config_rop_enabled = TRUE;
1186 boolean_t config_jop_enabled = !(BootArgs->bootFlags & kBootFlagsDisableJOP);
1187
1188
1189 /* assert all AppleMode ROP/JOP features enabled */
1190 uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
1191#if __APSTS_SUPPORTED__
1192 uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
1193 T_ASSERT(apsts & APSTS_EL1_MKEYVld, NULL);
1194#else
1195 T_ASSERT(apctl & APCTL_EL1_MKEYVld, NULL);
1196#endif /* __APSTS_SUPPORTED__ */
1197 T_ASSERT(apctl & APCTL_EL1_AppleMode, NULL);
1198 T_ASSERT(apctl & APCTL_EL1_KernKeyEn, NULL);
1199
1200 /* ROP/JOP keys enabled current status */
1201 bool status_jop_enabled, status_rop_enabled;
1202#if __APSTS_SUPPORTED__ /* H13+ */
1203 // TODO: update unit test to understand ROP/JOP enabled config for H13+
1204 status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
1205#elif __APCFG_SUPPORTED__ /* H12 */
1206 uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
1207 status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
1208#else /* !__APCFG_SUPPORTED__ H11 */
1209 uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
1210 status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
1211 status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
1212#endif /* __APSTS_SUPPORTED__ */
1213
1214 /* assert configured and running status match */
1215 T_ASSERT(config_rop_enabled == status_rop_enabled, NULL);
1216 T_ASSERT(config_jop_enabled == status_jop_enabled, NULL);
1217
1218
1219 if (config_jop_enabled) {
1220 /* jop key */
1221 uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
1222 uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
1223
1224 /* ensure JOP key is set and diversified */
1225 T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
1226 T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
1227 }
1228
1229 if (config_rop_enabled) {
1230 /* rop key */
1231 uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
1232 uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
1233
1234 /* ensure ROP key is set and diversified */
1235 T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
1236 T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
1237
1238 /* sign a KVA (the address of this function) */
1239 uint64_t kva_signed = (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled, ptrauth_key_asib, 0);
1240
1241 /* assert it was signed (changed) */
1242 T_EXPECT(kva_signed != (uint64_t)&config_rop_enabled, NULL);
1243
1244 /* authenticate the newly signed KVA */
1245 uint64_t kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed, ptrauth_key_asib, 0);
1246
1247 /* assert the authed KVA is the original KVA */
1248 T_EXPECT(kva_authed == (uint64_t)&config_rop_enabled, NULL);
1249
1250 /* corrupt a signed ptr, auth it, ensure auth failed */
1251 uint64_t kva_corrupted = kva_signed ^ 1;
1252
1253 /* authenticate the corrupted pointer */
1254 kva_authed = (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted, ptrauth_key_asib, 0);
1255
1256 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1257 uint64_t auth_fail_mask = 3ULL << 61;
1258 uint64_t authib_fail = 2ULL << 61;
1259
1260 /* assert the failed authIB of corrupted pointer is tagged */
1261 T_EXPECT((kva_authed & auth_fail_mask) == authib_fail, NULL);
1262 }
1263
1264 return KERN_SUCCESS;
1265}
1266#endif /* defined(HAS_APPLE_PAC) */
d9a64523 1267
5ba3f43e 1268#if __ARM_PAN_AVAILABLE__
cb323159
A
1269
1270struct pan_test_thread_args {
1271 volatile bool join;
1272};
1273
1274static void
1275arm64_pan_test_thread(void *arg, wait_result_t __unused wres)
1276{
1277 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1278
1279 struct pan_test_thread_args *args = arg;
1280
1281 for (processor_t p = processor_list; p != NULL; p = p->processor_list) {
1282 thread_bind(p);
1283 thread_block(THREAD_CONTINUE_NULL);
1284 kprintf("Running PAN test on cpu %d\n", p->cpu_id);
1285 arm64_pan_test();
1286 }
1287
1288 /* unbind thread from specific cpu */
1289 thread_bind(PROCESSOR_NULL);
1290 thread_block(THREAD_CONTINUE_NULL);
1291
1292 while (!args->join) {
1293 ;
1294 }
1295
1296 thread_wakeup(args);
1297}
1298
1299kern_return_t
1300arm64_late_pan_test()
1301{
1302 thread_t thread;
1303 kern_return_t kr;
1304
1305 struct pan_test_thread_args args;
1306 args.join = false;
1307
1308 kr = kernel_thread_start(arm64_pan_test_thread, &args, &thread);
1309 assert(kr == KERN_SUCCESS);
1310
1311 thread_deallocate(thread);
1312
1313 assert_wait(&args, THREAD_UNINT);
1314 args.join = true;
1315 thread_block(THREAD_CONTINUE_NULL);
1316 return KERN_SUCCESS;
1317}
1318
5ba3f43e
A
1319kern_return_t
1320arm64_pan_test()
1321{
5ba3f43e
A
1322 vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE;
1323
1324 T_LOG("Testing PAN.");
1325
cb323159
A
1326
1327 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED) == 0, "SCTLR_EL1.SPAN must be cleared");
1328
5ba3f43e
A
1329 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL);
1330
cc8bc92a
A
1331 pan_exception_level = 0;
1332 pan_fault_value = 0xDE;
5ba3f43e 1333 // convert priv_addr to one that is accessible from user mode
5c9f4661 1334 pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS -
0a7de745 1335 _COMM_PAGE_START_ADDRESS;
5ba3f43e 1336
5c9f4661 1337 // Below should trigger a PAN exception as pan_test_addr is accessible
5ba3f43e
A
1338 // in user mode
1339 // The exception handler, upon recognizing the fault address is pan_test_addr,
1340 // will disable PAN and rerun this instruction successfully
1341 T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL);
cc8bc92a
A
1342
1343 T_ASSERT(pan_exception_level == 2, NULL);
5ba3f43e
A
1344
1345 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1346
cc8bc92a
A
1347 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1348
1349 pan_exception_level = 0;
1350 pan_fault_value = 0xAD;
1351 pan_ro_addr = (vm_offset_t) &pan_ro_value;
1352
1353 // Force a permission fault while PAN is disabled to make sure PAN is
1354 // re-enabled during the exception handler.
1355 *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE;
1356
1357 T_ASSERT(pan_exception_level == 2, NULL);
1358
1359 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL);
1360
1361 T_ASSERT(pan_fault_value == *(char *)priv_addr, NULL);
1362
1363 pan_test_addr = 0;
1364 pan_ro_addr = 0;
5ba3f43e 1365
cc8bc92a 1366 __builtin_arm_wsr("pan", 1);
cb323159 1367
5ba3f43e
A
1368 return KERN_SUCCESS;
1369}
cb323159 1370#endif /* __ARM_PAN_AVAILABLE__ */
5ba3f43e
A
1371
1372
1373kern_return_t
1374arm64_lock_test()
1375{
1376 return lt_test_locks();
1377}
1378
1379kern_return_t
1380arm64_munger_test()
1381{
1382 mt_test_mungers();
1383 return 0;
1384}
1385
c6bf4f31
A
1386#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1387SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test;
1388uint64_t ctrr_nx_test = 0xd65f03c0; /* RET */
1389volatile uint64_t ctrr_exception_esr;
1390vm_offset_t ctrr_test_va;
1391vm_offset_t ctrr_test_page;
1392
1393kern_return_t
1394ctrr_test(void)
1395{
1396 processor_t p;
1397 boolean_t ctrr_disable = FALSE;
1398
1399 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable));
1400
1401 if (ctrr_disable) {
1402 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1403 return KERN_SUCCESS;
1404 }
1405
1406 T_LOG("Running CTRR test.");
1407
1408 for (p = processor_list; p != NULL; p = p->processor_list) {
1409 thread_bind(p);
1410 thread_block(THREAD_CONTINUE_NULL);
1411 T_LOG("Running CTRR test on cpu %d\n", p->cpu_id);
1412 ctrr_test_cpu();
1413 }
1414
1415 /* unbind thread from specific cpu */
1416 thread_bind(PROCESSOR_NULL);
1417 thread_block(THREAD_CONTINUE_NULL);
1418
1419 return KERN_SUCCESS;
1420}
1421
1422/* test CTRR on a cpu, caller to bind thread to desired cpu */
1423/* ctrr_test_page was reserved during bootstrap process */
1424kern_return_t
1425ctrr_test_cpu(void)
1426{
1427 ppnum_t ro_pn, nx_pn;
1428 uint64_t *ctrr_ro_test_ptr;
1429 void (*ctrr_nx_test_ptr)(void);
1430 kern_return_t kr;
1431 uint64_t prot = 0;
1432 extern uint64_t rorgn_begin, rorgn_end;
1433 extern vm_offset_t virtual_space_start;
1434
1435 /* rorgn = [rorgn_begin_va, rorgn_end_va) */
1436
1437 vm_offset_t rorgn_begin_va = phystokv(rorgn_begin);
1438 vm_offset_t rorgn_end_va = phystokv(rorgn_end) + PAGE_SIZE;
1439 vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test;
1440 vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test;
1441
1442 T_EXPECT(rorgn_begin_va <= ro_test_va && ro_test_va < rorgn_end_va, "Expect ro_test_va to be inside the CTRR region");
1443 T_EXPECT((nx_test_va < rorgn_begin_va) ^ (nx_test_va >= rorgn_end_va), "Expect nx_test_va to be outside the CTRR region");
1444
1445 ro_pn = pmap_find_phys(kernel_pmap, ro_test_va);
1446 nx_pn = pmap_find_phys(kernel_pmap, nx_test_va);
1447 T_EXPECT(ro_pn && nx_pn, "Expect ro page number and nx page number to be non zero");
1448
1449 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1450 (void *)ctrr_test_page, &ctrr_ro_test, &ctrr_nx_test, ro_pn, nx_pn);
1451
1452 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1453 T_EXPECT(~prot & ARM_TTE_VALID, "Expect ctrr_test_page to be unmapped");
1454
1455 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page, ro_pn);
1456 kr = pmap_enter(kernel_pmap, ctrr_test_page, ro_pn,
1457 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
1458 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RW mapping to succeed");
1459
1460 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1461 // fetch effective block level protections from table/block entries
1462 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1463 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RWNA && (prot & ARM_PTE_PNX), "Mapping is EL1 RWNX");
1464
1465 ctrr_test_va = ctrr_test_page + (ro_test_va & PAGE_MASK);
1466 ctrr_ro_test_ptr = (void *)ctrr_test_va;
1467
1468 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr);
1469
1470 // should cause data abort
1471 *ctrr_ro_test_ptr = 1;
1472
1473 // ensure write permission fault at expected level
1474 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1475
1476 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_DABORT_EL1, "Data Abort from EL1 expected");
1477 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1478 T_EXPECT(ESR_ISS(ctrr_exception_esr) & ISS_DA_WNR, "Write Fault Expected");
1479
1480 ctrr_test_va = 0;
1481 ctrr_exception_esr = 0;
1482 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1483
1484 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page, nx_pn);
1485
1486 kr = pmap_enter(kernel_pmap, ctrr_test_page, nx_pn,
1487 VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
1488 T_EXPECT(kr == KERN_SUCCESS, "Expect pmap_enter of RX mapping to succeed");
1489
1490 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1491 prot = pmap_get_arm64_prot(kernel_pmap, ctrr_test_page);
1492 T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX");
1493
1494 ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK);
1495 ctrr_nx_test_ptr = (void *)ctrr_test_va;
1496
1497 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr);
1498
1499#if __has_feature(ptrauth_calls)
1500 // must sign before calling if we're creating function pointers out of thin air
1501 ctrr_nx_test_ptr = ptrauth_sign_unauthenticated(ctrr_nx_test_ptr, ptrauth_key_function_pointer, 0);
1502#endif
1503 // should cause prefetch abort
1504 ctrr_nx_test_ptr();
1505
1506 // TODO: ensure execute permission fault at expected level
1507 T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected");
1508 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr)) == FSC_PERMISSION_FAULT_L3, "Permission Fault Expected");
1509
1510 ctrr_test_va = 0;
1511 ctrr_exception_esr = 0;
1512 pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE);
1513 return KERN_SUCCESS;
1514}
1515#endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
cb323159
A
1516
1517#if HAS_TWO_STAGE_SPR_LOCK
1518
1519#define STR1(x) #x
1520#define STR(x) STR1(x)
1521
1522volatile vm_offset_t spr_lock_test_addr;
1523volatile uint32_t spr_lock_exception_esr;
1524
1525kern_return_t
1526arm64_spr_lock_test()
1527{
1528 processor_t p;
1529
1530 for (p = processor_list; p != NULL; p = p->processor_list) {
1531 thread_bind(p);
1532 thread_block(THREAD_CONTINUE_NULL);
1533 T_LOG("Running SPR lock test on cpu %d\n", p->cpu_id);
1534
1535 uint64_t orig_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1536 spr_lock_test_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test);
1537 spr_lock_exception_esr = 0;
1538 arm64_msr_lock_test(~orig_value);
1539 T_EXPECT(spr_lock_exception_esr != 0, "MSR write generated synchronous abort");
1540
1541 uint64_t new_value = __builtin_arm_rsr64(STR(ARM64_REG_HID8));
1542 T_EXPECT(orig_value == new_value, "MSR write did not succeed");
1543
1544 spr_lock_test_addr = 0;
1545 }
1546
1547 /* unbind thread from specific cpu */
1548 thread_bind(PROCESSOR_NULL);
1549 thread_block(THREAD_CONTINUE_NULL);
1550
1551 T_PASS("Done running SPR lock tests");
1552
1553 return KERN_SUCCESS;
1554}
1555
1556#endif /* HAS_TWO_STAGE_SPR_LOCK */