2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * Carnegie Mellon requests users of this software to return to
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
55 #include <mach_ldebug.h>
57 #define LOCK_PRIVATE 1
60 #include <kern/kalloc.h>
61 #include <kern/cpu_number.h>
62 #include <kern/locks.h>
63 #include <kern/misc_protos.h>
64 #include <kern/thread.h>
65 #include <kern/processor.h>
66 #include <kern/sched_prim.h>
67 #include <kern/debug.h>
69 #include <tests/xnupost.h>
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
78 #include <sys/kdebug.h>
79 #include <sys/munge.h>
80 #include <machine/cpu_capabilities.h>
81 #include <arm/cpu_data_internal.h>
84 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
85 #include <arm64/amcc_rorgn.h>
86 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
88 kern_return_t
arm64_lock_test(void);
89 kern_return_t
arm64_munger_test(void);
90 kern_return_t
ex_cb_test(void);
91 kern_return_t
arm64_pan_test(void);
92 kern_return_t
arm64_late_pan_test(void);
93 #if defined(HAS_APPLE_PAC)
95 kern_return_t
arm64_ropjop_test(void);
97 #if defined(KERNEL_INTEGRITY_CTRR)
98 kern_return_t
ctrr_test(void);
99 kern_return_t
ctrr_test_cpu(void);
101 #if HAS_TWO_STAGE_SPR_LOCK
102 kern_return_t
arm64_spr_lock_test(void);
103 extern void arm64_msr_lock_test(uint64_t);
106 // exception handler ignores this fault address during PAN test
107 #if __ARM_PAN_AVAILABLE__
108 const uint64_t pan_ro_value
= 0xFEEDB0B0DEADBEEF;
109 vm_offset_t pan_test_addr
= 0;
110 vm_offset_t pan_ro_addr
= 0;
111 volatile int pan_exception_level
= 0;
112 volatile char pan_fault_value
= 0;
115 #include <libkern/OSAtomic.h>
116 #define LOCK_TEST_ITERATIONS 50
117 static hw_lock_data_t lt_hw_lock
;
118 static lck_spin_t lt_lck_spin_t
;
119 static lck_mtx_t lt_mtx
;
120 static lck_rw_t lt_rwlock
;
121 static volatile uint32_t lt_counter
= 0;
122 static volatile int lt_spinvolatile
;
123 static volatile uint32_t lt_max_holders
= 0;
124 static volatile uint32_t lt_upgrade_holders
= 0;
125 static volatile uint32_t lt_max_upgrade_holders
= 0;
126 static volatile uint32_t lt_num_holders
= 0;
127 static volatile uint32_t lt_done_threads
;
128 static volatile uint32_t lt_target_done_threads
;
129 static volatile uint32_t lt_cpu_bind_id
= 0;
132 lt_note_another_blocking_lock_holder()
134 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
136 lt_max_holders
= (lt_max_holders
< lt_num_holders
) ? lt_num_holders
: lt_max_holders
;
137 hw_lock_unlock(<_hw_lock
);
141 lt_note_blocking_lock_release()
143 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
145 hw_lock_unlock(<_hw_lock
);
149 lt_spin_a_little_bit()
153 for (i
= 0; i
< 10000; i
++) {
159 lt_sleep_a_little_bit()
167 lck_mtx_lock(<_mtx
);
168 lt_note_another_blocking_lock_holder();
169 lt_sleep_a_little_bit();
171 lt_note_blocking_lock_release();
172 lck_mtx_unlock(<_mtx
);
176 lt_grab_mutex_with_try()
178 while (0 == lck_mtx_try_lock(<_mtx
)) {
181 lt_note_another_blocking_lock_holder();
182 lt_sleep_a_little_bit();
184 lt_note_blocking_lock_release();
185 lck_mtx_unlock(<_mtx
);
189 lt_grab_rw_exclusive()
191 lck_rw_lock_exclusive(<_rwlock
);
192 lt_note_another_blocking_lock_holder();
193 lt_sleep_a_little_bit();
195 lt_note_blocking_lock_release();
196 lck_rw_done(<_rwlock
);
200 lt_grab_rw_exclusive_with_try()
202 while (0 == lck_rw_try_lock_exclusive(<_rwlock
)) {
203 lt_sleep_a_little_bit();
206 lt_note_another_blocking_lock_holder();
207 lt_sleep_a_little_bit();
209 lt_note_blocking_lock_release();
210 lck_rw_done(<_rwlock
);
213 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
215 * lt_grab_rw_shared()
217 * lck_rw_lock_shared(<_rwlock);
220 * lt_note_another_blocking_lock_holder();
221 * lt_sleep_a_little_bit();
222 * lt_note_blocking_lock_release();
224 * lck_rw_done(<_rwlock);
228 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
230 * lt_grab_rw_shared_with_try()
232 * while(0 == lck_rw_try_lock_shared(<_rwlock));
235 * lt_note_another_blocking_lock_holder();
236 * lt_sleep_a_little_bit();
237 * lt_note_blocking_lock_release();
239 * lck_rw_done(<_rwlock);
244 lt_upgrade_downgrade_rw()
246 boolean_t upgraded
, success
;
248 success
= lck_rw_try_lock_shared(<_rwlock
);
250 lck_rw_lock_shared(<_rwlock
);
253 lt_note_another_blocking_lock_holder();
254 lt_sleep_a_little_bit();
255 lt_note_blocking_lock_release();
257 upgraded
= lck_rw_lock_shared_to_exclusive(<_rwlock
);
259 success
= lck_rw_try_lock_exclusive(<_rwlock
);
262 lck_rw_lock_exclusive(<_rwlock
);
266 lt_upgrade_holders
++;
267 if (lt_upgrade_holders
> lt_max_upgrade_holders
) {
268 lt_max_upgrade_holders
= lt_upgrade_holders
;
272 lt_sleep_a_little_bit();
274 lt_upgrade_holders
--;
276 lck_rw_lock_exclusive_to_shared(<_rwlock
);
278 lt_spin_a_little_bit();
279 lck_rw_done(<_rwlock
);
283 const int limit
= 1000000;
284 static int lt_stress_local_counters
[MAX_CPUS
];
286 lck_ticket_t lt_ticket_lock
;
287 lck_grp_t lt_ticket_grp
;
290 lt_stress_ticket_lock()
292 int local_counter
= 0;
294 uint cpuid
= cpu_number();
296 kprintf("%s>cpu %d starting\n", __FUNCTION__
, cpuid
);
298 lck_ticket_lock(<_ticket_lock
, <_ticket_grp
);
301 lck_ticket_unlock(<_ticket_lock
);
303 while (lt_counter
< lt_target_done_threads
) {
307 kprintf("%s>cpu %d started\n", __FUNCTION__
, cpuid
);
309 while (lt_counter
< limit
) {
310 lck_ticket_lock(<_ticket_lock
, <_ticket_grp
);
311 if (lt_counter
< limit
) {
315 lck_ticket_unlock(<_ticket_lock
);
318 lt_stress_local_counters
[cpuid
] = local_counter
;
320 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__
, lt_counter
, cpuid
, local_counter
);
327 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
329 lt_spin_a_little_bit();
330 hw_lock_unlock(<_hw_lock
);
334 lt_grab_hw_lock_with_try()
336 while (0 == hw_lock_try(<_hw_lock
, LCK_GRP_NULL
)) {
340 lt_spin_a_little_bit();
341 hw_lock_unlock(<_hw_lock
);
345 lt_grab_hw_lock_with_to()
347 while (0 == hw_lock_to(<_hw_lock
, LockTimeOut
, LCK_GRP_NULL
)) {
348 mp_enable_preemption();
351 lt_spin_a_little_bit();
352 hw_lock_unlock(<_hw_lock
);
358 lck_spin_lock(<_lck_spin_t
);
360 lt_spin_a_little_bit();
361 lck_spin_unlock(<_lck_spin_t
);
365 lt_grab_spin_lock_with_try()
367 while (0 == lck_spin_try_lock(<_lck_spin_t
)) {
371 lt_spin_a_little_bit();
372 lck_spin_unlock(<_lck_spin_t
);
375 static volatile boolean_t lt_thread_lock_grabbed
;
376 static volatile boolean_t lt_thread_lock_success
;
384 lt_max_upgrade_holders
= 0;
385 lt_upgrade_holders
= 0;
387 lt_target_done_threads
= 0;
394 lt_trylock_hw_lock_with_to()
397 while (!lt_thread_lock_grabbed
) {
398 lt_sleep_a_little_bit();
401 lt_thread_lock_success
= hw_lock_to(<_hw_lock
, 100, LCK_GRP_NULL
);
403 mp_enable_preemption();
407 lt_trylock_spin_try_lock()
410 while (!lt_thread_lock_grabbed
) {
411 lt_sleep_a_little_bit();
414 lt_thread_lock_success
= lck_spin_try_lock(<_lck_spin_t
);
419 lt_trylock_thread(void *arg
, wait_result_t wres __unused
)
421 void (*func
)(void) = (void (*)(void))arg
;
425 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
429 lt_start_trylock_thread(thread_continue_t func
)
434 kr
= kernel_thread_start(lt_trylock_thread
, func
, &thread
);
435 assert(kr
== KERN_SUCCESS
);
437 thread_deallocate(thread
);
441 lt_wait_for_lock_test_threads()
444 /* Spin to reduce dependencies */
445 while (lt_done_threads
< lt_target_done_threads
) {
446 lt_sleep_a_little_bit();
456 extern unsigned int real_ncpus
;
459 * First mtx try lock succeeds, second fails.
461 success
= lck_mtx_try_lock(<_mtx
);
462 T_ASSERT_NOTNULL(success
, "First mtx try lock");
463 success
= lck_mtx_try_lock(<_mtx
);
464 T_ASSERT_NULL(success
, "Second mtx try lock for a locked mtx");
465 lck_mtx_unlock(<_mtx
);
468 * After regular grab, can't try lock.
470 lck_mtx_lock(<_mtx
);
471 success
= lck_mtx_try_lock(<_mtx
);
472 T_ASSERT_NULL(success
, "try lock should fail after regular lck_mtx_lock");
473 lck_mtx_unlock(<_mtx
);
476 * Two shared try locks on a previously unheld rwlock suceed, and a
477 * subsequent exclusive attempt fails.
479 success
= lck_rw_try_lock_shared(<_rwlock
);
480 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
481 success
= lck_rw_try_lock_shared(<_rwlock
);
482 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
483 success
= lck_rw_try_lock_exclusive(<_rwlock
);
484 T_ASSERT_NULL(success
, "exclusive lock attempt on previously held lock should fail");
485 lck_rw_done(<_rwlock
);
486 lck_rw_done(<_rwlock
);
489 * After regular shared grab, can trylock
490 * for shared but not for exclusive.
492 lck_rw_lock_shared(<_rwlock
);
493 success
= lck_rw_try_lock_shared(<_rwlock
);
494 T_ASSERT_NOTNULL(success
, "After regular shared grab another shared try lock should succeed.");
495 success
= lck_rw_try_lock_exclusive(<_rwlock
);
496 T_ASSERT_NULL(success
, "After regular shared grab an exclusive lock attempt should fail.");
497 lck_rw_done(<_rwlock
);
498 lck_rw_done(<_rwlock
);
501 * An exclusive try lock succeeds, subsequent shared and exclusive
504 success
= lck_rw_try_lock_exclusive(<_rwlock
);
505 T_ASSERT_NOTNULL(success
, "An exclusive try lock should succeed");
506 success
= lck_rw_try_lock_shared(<_rwlock
);
507 T_ASSERT_NULL(success
, "try lock in shared mode attempt after an exclusive grab should fail");
508 success
= lck_rw_try_lock_exclusive(<_rwlock
);
509 T_ASSERT_NULL(success
, "try lock in exclusive mode attempt after an exclusive grab should fail");
510 lck_rw_done(<_rwlock
);
513 * After regular exclusive grab, neither kind of trylock succeeds.
515 lck_rw_lock_exclusive(<_rwlock
);
516 success
= lck_rw_try_lock_shared(<_rwlock
);
517 T_ASSERT_NULL(success
, "After regular exclusive grab, shared trylock should not succeed");
518 success
= lck_rw_try_lock_exclusive(<_rwlock
);
519 T_ASSERT_NULL(success
, "After regular exclusive grab, exclusive trylock should not succeed");
520 lck_rw_done(<_rwlock
);
523 * First spin lock attempts succeed, second attempts fail.
525 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
526 T_ASSERT_NOTNULL(success
, "First spin lock attempts should succeed");
527 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
528 T_ASSERT_NULL(success
, "Second attempt to spin lock should fail");
529 hw_lock_unlock(<_hw_lock
);
531 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
532 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
533 T_ASSERT_NULL(success
, "After taking spin lock, trylock attempt should fail");
534 hw_lock_unlock(<_hw_lock
);
537 lt_thread_lock_grabbed
= false;
538 lt_thread_lock_success
= true;
539 lt_target_done_threads
= 1;
541 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
542 success
= hw_lock_to(<_hw_lock
, 100, LCK_GRP_NULL
);
543 T_ASSERT_NOTNULL(success
, "First spin lock with timeout should succeed");
544 if (real_ncpus
== 1) {
545 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
547 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
548 lt_wait_for_lock_test_threads();
549 T_ASSERT_NULL(lt_thread_lock_success
, "Second spin lock with timeout should fail and timeout");
550 if (real_ncpus
== 1) {
551 mp_disable_preemption(); /* don't double-enable when we unlock */
553 hw_lock_unlock(<_hw_lock
);
556 lt_thread_lock_grabbed
= false;
557 lt_thread_lock_success
= true;
558 lt_target_done_threads
= 1;
560 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
561 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
562 if (real_ncpus
== 1) {
563 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
565 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
566 lt_wait_for_lock_test_threads();
567 T_ASSERT_NULL(lt_thread_lock_success
, "after taking a spin lock, lock attempt with timeout should fail");
568 if (real_ncpus
== 1) {
569 mp_disable_preemption(); /* don't double-enable when we unlock */
571 hw_lock_unlock(<_hw_lock
);
573 success
= lck_spin_try_lock(<_lck_spin_t
);
574 T_ASSERT_NOTNULL(success
, "spin trylock of previously unheld lock should succeed");
575 success
= lck_spin_try_lock(<_lck_spin_t
);
576 T_ASSERT_NULL(success
, "spin trylock attempt of previously held lock (with trylock) should fail");
577 lck_spin_unlock(<_lck_spin_t
);
580 lt_thread_lock_grabbed
= false;
581 lt_thread_lock_success
= true;
582 lt_target_done_threads
= 1;
583 lt_start_trylock_thread(lt_trylock_spin_try_lock
);
584 lck_spin_lock(<_lck_spin_t
);
585 if (real_ncpus
== 1) {
586 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
588 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
589 lt_wait_for_lock_test_threads();
590 T_ASSERT_NULL(lt_thread_lock_success
, "spin trylock attempt of previously held lock should fail");
591 if (real_ncpus
== 1) {
592 mp_disable_preemption(); /* don't double-enable when we unlock */
594 lck_spin_unlock(<_lck_spin_t
);
600 lt_thread(void *arg
, wait_result_t wres __unused
)
602 void (*func
)(void) = (void (*)(void))arg
;
605 for (i
= 0; i
< LOCK_TEST_ITERATIONS
; i
++) {
609 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
613 lt_start_lock_thread(thread_continue_t func
)
618 kr
= kernel_thread_start(lt_thread
, func
, &thread
);
619 assert(kr
== KERN_SUCCESS
);
621 thread_deallocate(thread
);
626 lt_bound_thread(void *arg
, wait_result_t wres __unused
)
628 void (*func
)(void) = (void (*)(void))arg
;
630 int cpuid
= OSIncrementAtomic((volatile SInt32
*)<_cpu_bind_id
);
632 processor_t processor
= processor_list
;
633 while ((processor
!= NULL
) && (processor
->cpu_id
!= cpuid
)) {
634 processor
= processor
->processor_list
;
637 if (processor
!= NULL
) {
638 thread_bind(processor
);
641 thread_block(THREAD_CONTINUE_NULL
);
645 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
649 lt_e_thread(void *arg
, wait_result_t wres __unused
)
651 void (*func
)(void) = (void (*)(void))arg
;
653 thread_t thread
= current_thread();
655 spl_t s
= splsched();
657 thread
->sched_flags
|= TH_SFLAG_ECORE_ONLY
;
658 thread_unlock(thread
);
661 thread_block(THREAD_CONTINUE_NULL
);
665 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
669 lt_p_thread(void *arg
, wait_result_t wres __unused
)
671 void (*func
)(void) = (void (*)(void))arg
;
673 thread_t thread
= current_thread();
675 spl_t s
= splsched();
677 thread
->sched_flags
|= TH_SFLAG_PCORE_ONLY
;
678 thread_unlock(thread
);
681 thread_block(THREAD_CONTINUE_NULL
);
685 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
689 lt_start_lock_thread_e(thread_continue_t func
)
694 kr
= kernel_thread_start(lt_e_thread
, func
, &thread
);
695 assert(kr
== KERN_SUCCESS
);
697 thread_deallocate(thread
);
701 lt_start_lock_thread_p(thread_continue_t func
)
706 kr
= kernel_thread_start(lt_p_thread
, func
, &thread
);
707 assert(kr
== KERN_SUCCESS
);
709 thread_deallocate(thread
);
713 lt_start_lock_thread_bound(thread_continue_t func
)
718 kr
= kernel_thread_start(lt_bound_thread
, func
, &thread
);
719 assert(kr
== KERN_SUCCESS
);
721 thread_deallocate(thread
);
728 kern_return_t kr
= KERN_SUCCESS
;
729 lck_grp_attr_t
*lga
= lck_grp_attr_alloc_init();
730 lck_grp_t
*lg
= lck_grp_alloc_init("lock test", lga
);
732 lck_mtx_init(<_mtx
, lg
, LCK_ATTR_NULL
);
733 lck_rw_init(<_rwlock
, lg
, LCK_ATTR_NULL
);
734 lck_spin_init(<_lck_spin_t
, lg
, LCK_ATTR_NULL
);
735 hw_lock_init(<_hw_lock
);
737 T_LOG("Testing locks.");
739 /* Try locks (custom) */
742 T_LOG("Running try lock test.");
743 kr
= lt_test_trylocks();
744 T_EXPECT_NULL(kr
, "try lock test failed.");
746 /* Uncontended mutex */
747 T_LOG("Running uncontended mutex test.");
749 lt_target_done_threads
= 1;
750 lt_start_lock_thread(lt_grab_mutex
);
751 lt_wait_for_lock_test_threads();
752 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
753 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
755 /* Contended mutex:try locks*/
756 T_LOG("Running contended mutex test.");
758 lt_target_done_threads
= 3;
759 lt_start_lock_thread(lt_grab_mutex
);
760 lt_start_lock_thread(lt_grab_mutex
);
761 lt_start_lock_thread(lt_grab_mutex
);
762 lt_wait_for_lock_test_threads();
763 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
764 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
766 /* Contended mutex: try locks*/
767 T_LOG("Running contended mutex trylock test.");
769 lt_target_done_threads
= 3;
770 lt_start_lock_thread(lt_grab_mutex_with_try
);
771 lt_start_lock_thread(lt_grab_mutex_with_try
);
772 lt_start_lock_thread(lt_grab_mutex_with_try
);
773 lt_wait_for_lock_test_threads();
774 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
775 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
777 /* Uncontended exclusive rwlock */
778 T_LOG("Running uncontended exclusive rwlock test.");
780 lt_target_done_threads
= 1;
781 lt_start_lock_thread(lt_grab_rw_exclusive
);
782 lt_wait_for_lock_test_threads();
783 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
784 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
786 /* Uncontended shared rwlock */
788 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
789 * T_LOG("Running uncontended shared rwlock test.");
791 * lt_target_done_threads = 1;
792 * lt_start_lock_thread(lt_grab_rw_shared);
793 * lt_wait_for_lock_test_threads();
794 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
798 /* Contended exclusive rwlock */
799 T_LOG("Running contended exclusive rwlock test.");
801 lt_target_done_threads
= 3;
802 lt_start_lock_thread(lt_grab_rw_exclusive
);
803 lt_start_lock_thread(lt_grab_rw_exclusive
);
804 lt_start_lock_thread(lt_grab_rw_exclusive
);
805 lt_wait_for_lock_test_threads();
806 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
807 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
809 /* One shared, two exclusive */
810 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
811 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
813 * lt_target_done_threads = 3;
814 * lt_start_lock_thread(lt_grab_rw_shared);
815 * lt_start_lock_thread(lt_grab_rw_exclusive);
816 * lt_start_lock_thread(lt_grab_rw_exclusive);
817 * lt_wait_for_lock_test_threads();
818 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
819 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
823 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
824 * T_LOG("Running test with four shared holders.");
826 * lt_target_done_threads = 4;
827 * lt_start_lock_thread(lt_grab_rw_shared);
828 * lt_start_lock_thread(lt_grab_rw_shared);
829 * lt_start_lock_thread(lt_grab_rw_shared);
830 * lt_start_lock_thread(lt_grab_rw_shared);
831 * lt_wait_for_lock_test_threads();
832 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
835 /* Three doing upgrades and downgrades */
836 T_LOG("Running test with threads upgrading and downgrading.");
838 lt_target_done_threads
= 3;
839 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
840 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
841 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
842 lt_wait_for_lock_test_threads();
843 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
844 T_EXPECT_LE_UINT(lt_max_holders
, 3, NULL
);
845 T_EXPECT_EQ_UINT(lt_max_upgrade_holders
, 1, NULL
);
847 /* Uncontended - exclusive trylocks */
848 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
850 lt_target_done_threads
= 1;
851 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
852 lt_wait_for_lock_test_threads();
853 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
854 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
856 /* Uncontended - shared trylocks */
857 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
858 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
860 * lt_target_done_threads = 1;
861 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
862 * lt_wait_for_lock_test_threads();
863 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
864 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
867 /* Three doing exclusive trylocks */
868 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
870 lt_target_done_threads
= 3;
871 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
872 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
873 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
874 lt_wait_for_lock_test_threads();
875 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
876 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
878 /* Three doing shared trylocks */
879 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
880 * T_LOG("Running test with threads doing shared rwlock trylocks.");
882 * lt_target_done_threads = 3;
883 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
884 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
885 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
886 * lt_wait_for_lock_test_threads();
887 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
888 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
891 /* Three doing various trylocks */
892 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
893 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
895 * lt_target_done_threads = 4;
896 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
897 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
898 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
899 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
900 * lt_wait_for_lock_test_threads();
901 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
902 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
906 T_LOG("Running test with hw_lock_lock()");
908 lt_target_done_threads
= 3;
909 lt_start_lock_thread(lt_grab_hw_lock
);
910 lt_start_lock_thread(lt_grab_hw_lock
);
911 lt_start_lock_thread(lt_grab_hw_lock
);
912 lt_wait_for_lock_test_threads();
913 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
916 /* Ticket locks stress test */
917 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
918 extern unsigned int real_ncpus
;
919 lck_grp_init(<_ticket_grp
, "ticket lock stress", LCK_GRP_ATTR_NULL
);
920 lck_ticket_init(<_ticket_lock
, <_ticket_grp
);
922 lt_target_done_threads
= real_ncpus
;
923 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
924 lt_start_lock_thread_bound(lt_stress_ticket_lock
);
926 lt_wait_for_lock_test_threads();
927 bool starvation
= false;
928 uint total_local_count
= 0;
929 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
930 starvation
= starvation
|| (lt_stress_local_counters
[processor
->cpu_id
] < 10);
931 total_local_count
+= lt_stress_local_counters
[processor
->cpu_id
];
933 if (total_local_count
!= lt_counter
) {
934 T_FAIL("Lock failure\n");
935 } else if (starvation
) {
936 T_FAIL("Lock starvation found\n");
938 T_PASS("Ticket locks stress test with lck_ticket_lock()");
941 /* AMP ticket locks stress test */
942 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
944 lt_target_done_threads
= real_ncpus
;
945 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
946 processor_set_t pset
= processor
->processor_set
;
947 if (pset
->pset_cluster_type
== PSET_AMP_P
) {
948 lt_start_lock_thread_p(lt_stress_ticket_lock
);
949 } else if (pset
->pset_cluster_type
== PSET_AMP_E
) {
950 lt_start_lock_thread_e(lt_stress_ticket_lock
);
952 lt_start_lock_thread(lt_stress_ticket_lock
);
955 lt_wait_for_lock_test_threads();
958 /* HW locks: trylocks */
959 T_LOG("Running test with hw_lock_try()");
961 lt_target_done_threads
= 3;
962 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
963 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
964 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
965 lt_wait_for_lock_test_threads();
966 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
968 /* HW locks: with timeout */
969 T_LOG("Running test with hw_lock_to()");
971 lt_target_done_threads
= 3;
972 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
973 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
974 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
975 lt_wait_for_lock_test_threads();
976 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
979 T_LOG("Running test with lck_spin_lock()");
981 lt_target_done_threads
= 3;
982 lt_start_lock_thread(lt_grab_spin_lock
);
983 lt_start_lock_thread(lt_grab_spin_lock
);
984 lt_start_lock_thread(lt_grab_spin_lock
);
985 lt_wait_for_lock_test_threads();
986 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
988 /* Spin locks: trylocks */
989 T_LOG("Running test with lck_spin_try_lock()");
991 lt_target_done_threads
= 3;
992 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
993 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
994 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
995 lt_wait_for_lock_test_threads();
996 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
1001 #define MT_MAX_ARGS 8
1002 #define MT_INITIAL_VALUE 0xfeedbeef
1003 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1004 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1005 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1007 typedef void (*sy_munge_t
)(void*);
1009 #define MT_FUNC(x) #x, x
1010 struct munger_test
{
1011 const char *mt_name
;
1013 uint32_t mt_in_words
;
1015 uint64_t mt_expected
[MT_MAX_ARGS
];
1016 } munger_tests
[] = {
1017 {MT_FUNC(munge_w
), 1, 1, {MT_W_VAL
}},
1018 {MT_FUNC(munge_ww
), 2, 2, {MT_W_VAL
, MT_W_VAL
}},
1019 {MT_FUNC(munge_www
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1020 {MT_FUNC(munge_wwww
), 4, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1021 {MT_FUNC(munge_wwwww
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1022 {MT_FUNC(munge_wwwwww
), 6, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1023 {MT_FUNC(munge_wwwwwww
), 7, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1024 {MT_FUNC(munge_wwwwwwww
), 8, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1025 {MT_FUNC(munge_wl
), 3, 2, {MT_W_VAL
, MT_L_VAL
}},
1026 {MT_FUNC(munge_wwl
), 4, 3, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1027 {MT_FUNC(munge_wwlll
), 8, 5, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1028 {MT_FUNC(munge_wlw
), 4, 3, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1029 {MT_FUNC(munge_wlwwwll
), 10, 7, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1030 {MT_FUNC(munge_wlwwwllw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
1031 {MT_FUNC(munge_wlwwlwlw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1032 {MT_FUNC(munge_wll
), 5, 3, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1033 {MT_FUNC(munge_wlll
), 7, 4, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1034 {MT_FUNC(munge_wllwwll
), 11, 7, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1035 {MT_FUNC(munge_wwwlw
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1036 {MT_FUNC(munge_wwwlww
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
1037 {MT_FUNC(munge_wwwlwww
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1038 {MT_FUNC(munge_wwwl
), 5, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1039 {MT_FUNC(munge_wwwwlw
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1040 {MT_FUNC(munge_wwwwllww
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
1041 {MT_FUNC(munge_wwwwl
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1042 {MT_FUNC(munge_wwwwwl
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1043 {MT_FUNC(munge_wwwwwlww
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
1044 {MT_FUNC(munge_wwwwwllw
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
1045 {MT_FUNC(munge_wwwwwlll
), 11, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1046 {MT_FUNC(munge_wwwwwwl
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1047 {MT_FUNC(munge_wwwwwwlw
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1048 {MT_FUNC(munge_wwwwwwll
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1049 {MT_FUNC(munge_wsw
), 3, 3, {MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
1050 {MT_FUNC(munge_wws
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_S_VAL
}},
1051 {MT_FUNC(munge_wwwsw
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
1052 {MT_FUNC(munge_llllll
), 12, 6, {MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1053 {MT_FUNC(munge_llll
), 8, 4, {MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1054 {MT_FUNC(munge_l
), 2, 1, {MT_L_VAL
}},
1055 {MT_FUNC(munge_lw
), 3, 2, {MT_L_VAL
, MT_W_VAL
}},
1056 {MT_FUNC(munge_lwww
), 5, 4, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1057 {MT_FUNC(munge_lwwwwwww
), 9, 8, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1058 {MT_FUNC(munge_wlwwwl
), 8, 6, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1059 {MT_FUNC(munge_wwlwwwl
), 9, 7, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}}
1062 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1065 mt_reset(uint32_t in_words
, size_t total_size
, uint32_t *data
)
1069 for (i
= 0; i
< in_words
; i
++) {
1070 data
[i
] = MT_INITIAL_VALUE
;
1073 if (in_words
* sizeof(uint32_t) < total_size
) {
1074 bzero(&data
[in_words
], total_size
- in_words
* sizeof(uint32_t));
1081 uint64_t data
[MT_MAX_ARGS
];
1084 for (i
= 0; i
< MT_TEST_COUNT
; i
++) {
1085 struct munger_test
*test
= &munger_tests
[i
];
1088 T_LOG("Testing %s", test
->mt_name
);
1090 mt_reset(test
->mt_in_words
, sizeof(data
), (uint32_t*)data
);
1091 test
->mt_func(data
);
1093 for (j
= 0; j
< test
->mt_nout
; j
++) {
1094 if (data
[j
] != test
->mt_expected
[j
]) {
1095 T_FAIL("Index %d: expected %llx, got %llx.", j
, test
->mt_expected
[j
], data
[j
]);
1100 T_PASS(test
->mt_name
);
1105 /* Exception Callback Test */
1106 static ex_cb_action_t
1108 ex_cb_class_t cb_class
,
1110 const ex_cb_state_t
*state
1113 ex_cb_state_t
*context
= (ex_cb_state_t
*)refcon
;
1115 if ((NULL
== refcon
) || (NULL
== state
)) {
1116 return EXCB_ACTION_TEST_FAIL
;
1119 context
->far
= state
->far
;
1122 case EXCB_CLASS_TEST1
:
1123 return EXCB_ACTION_RERUN
;
1124 case EXCB_CLASS_TEST2
:
1125 return EXCB_ACTION_NONE
;
1127 return EXCB_ACTION_TEST_FAIL
;
1135 const vm_offset_t far1
= 0xdead0001;
1136 const vm_offset_t far2
= 0xdead0002;
1138 ex_cb_state_t test_context_1
= {0xdeadbeef};
1139 ex_cb_state_t test_context_2
= {0xdeadbeef};
1140 ex_cb_action_t action
;
1142 T_LOG("Testing Exception Callback.");
1144 T_LOG("Running registration test.");
1146 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1147 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST1 exception callback");
1148 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1149 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST2 exception callback");
1151 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1152 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST2 exception callback");
1153 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1154 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST1 exception callback");
1156 T_LOG("Running invocation test.");
1158 action
= ex_cb_invoke(EXCB_CLASS_TEST1
, far1
);
1159 T_ASSERT(EXCB_ACTION_RERUN
== action
, NULL
);
1160 T_ASSERT(far1
== test_context_1
.far
, NULL
);
1162 action
= ex_cb_invoke(EXCB_CLASS_TEST2
, far2
);
1163 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1164 T_ASSERT(far2
== test_context_2
.far
, NULL
);
1166 action
= ex_cb_invoke(EXCB_CLASS_TEST3
, 0);
1167 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1169 return KERN_SUCCESS
;
1172 #if defined(HAS_APPLE_PAC)
1178 T_LOG("Testing ROP/JOP");
1180 /* how is ROP/JOP configured */
1181 boolean_t config_rop_enabled
= TRUE
;
1182 boolean_t config_jop_enabled
= TRUE
;
1185 if (config_jop_enabled
) {
1187 uint64_t apiakey_hi
= __builtin_arm_rsr64("APIAKEYHI_EL1");
1188 uint64_t apiakey_lo
= __builtin_arm_rsr64("APIAKEYLO_EL1");
1190 T_EXPECT(apiakey_hi
!= 0 && apiakey_lo
!= 0, NULL
);
1193 if (config_rop_enabled
) {
1195 uint64_t apibkey_hi
= __builtin_arm_rsr64("APIBKEYHI_EL1");
1196 uint64_t apibkey_lo
= __builtin_arm_rsr64("APIBKEYLO_EL1");
1198 T_EXPECT(apibkey_hi
!= 0 && apibkey_lo
!= 0, NULL
);
1200 /* sign a KVA (the address of this function) */
1201 uint64_t kva_signed
= (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled
, ptrauth_key_asib
, 0);
1203 /* assert it was signed (changed) */
1204 T_EXPECT(kva_signed
!= (uint64_t)&config_rop_enabled
, NULL
);
1206 /* authenticate the newly signed KVA */
1207 uint64_t kva_authed
= (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed
, ptrauth_key_asib
, 0);
1209 /* assert the authed KVA is the original KVA */
1210 T_EXPECT(kva_authed
== (uint64_t)&config_rop_enabled
, NULL
);
1212 /* corrupt a signed ptr, auth it, ensure auth failed */
1213 uint64_t kva_corrupted
= kva_signed
^ 1;
1215 /* authenticate the corrupted pointer */
1216 kva_authed
= (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted
, ptrauth_key_asib
, 0);
1218 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1219 uint64_t auth_fail_mask
= 3ULL << 61;
1220 uint64_t authib_fail
= 2ULL << 61;
1222 /* assert the failed authIB of corrupted pointer is tagged */
1223 T_EXPECT((kva_authed
& auth_fail_mask
) == authib_fail
, NULL
);
1226 return KERN_SUCCESS
;
1228 #endif /* defined(HAS_APPLE_PAC) */
1230 #if __ARM_PAN_AVAILABLE__
1232 struct pan_test_thread_args
{
1237 arm64_pan_test_thread(void *arg
, wait_result_t __unused wres
)
1239 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1241 struct pan_test_thread_args
*args
= arg
;
1243 for (processor_t p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
1245 thread_block(THREAD_CONTINUE_NULL
);
1246 kprintf("Running PAN test on cpu %d\n", p
->cpu_id
);
1250 /* unbind thread from specific cpu */
1251 thread_bind(PROCESSOR_NULL
);
1252 thread_block(THREAD_CONTINUE_NULL
);
1254 while (!args
->join
) {
1258 thread_wakeup(args
);
1262 arm64_late_pan_test()
1267 struct pan_test_thread_args args
;
1270 kr
= kernel_thread_start(arm64_pan_test_thread
, &args
, &thread
);
1271 assert(kr
== KERN_SUCCESS
);
1273 thread_deallocate(thread
);
1275 assert_wait(&args
, THREAD_UNINT
);
1277 thread_block(THREAD_CONTINUE_NULL
);
1278 return KERN_SUCCESS
;
1282 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t
* state
)
1284 bool retval
= false;
1285 uint32_t esr
= get_saved_state_esr(state
);
1286 esr_exception_class_t
class = ESR_EC(esr
);
1287 fault_status_t fsc
= ISS_IA_FSC(ESR_ISS(esr
));
1288 uint32_t cpsr
= get_saved_state_cpsr(state
);
1289 uint64_t far
= get_saved_state_far(state
);
1291 if ((class == ESR_EC_DABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
) &&
1292 (cpsr
& PSR64_PAN
) &&
1293 ((esr
& ISS_DA_WNR
) ? mmu_kvtop_wpreflight(far
) : mmu_kvtop(far
))) {
1294 ++pan_exception_level
;
1295 // read the user-accessible value to make sure
1296 // pan is enabled and produces a 2nd fault from
1297 // the exception handler
1298 if (pan_exception_level
== 1) {
1299 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler
, far
);
1300 pan_fault_value
= *(volatile char *)far
;
1301 ml_expect_fault_end();
1302 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1304 // this fault address is used for PAN test
1305 // disable PAN and rerun
1306 mask_saved_state_cpsr(state
, 0, PSR64_PAN
);
1315 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t
* state
)
1317 bool retval
= false;
1318 uint32_t esr
= get_saved_state_esr(state
);
1319 esr_exception_class_t
class = ESR_EC(esr
);
1320 fault_status_t fsc
= ISS_IA_FSC(ESR_ISS(esr
));
1321 uint32_t cpsr
= get_saved_state_cpsr(state
);
1323 if ((class == ESR_EC_DABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
) &&
1324 !(cpsr
& PSR64_PAN
)) {
1325 ++pan_exception_level
;
1326 // On an exception taken from a PAN-disabled context, verify
1327 // that PAN is re-enabled for the exception handler and that
1328 // accessing the test address produces a PAN fault.
1329 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler
, pan_test_addr
);
1330 pan_fault_value
= *(volatile char *)pan_test_addr
;
1331 ml_expect_fault_end();
1332 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1333 add_saved_state_pc(state
, 4);
1344 bool values_match
= false;
1345 vm_offset_t priv_addr
= _COMM_PAGE_SIGNATURE
;
1347 T_LOG("Testing PAN.");
1350 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED
) == 0, "SCTLR_EL1.SPAN must be cleared");
1352 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1354 pan_exception_level
= 0;
1355 pan_fault_value
= 0xDE;
1356 // convert priv_addr to one that is accessible from user mode
1357 pan_test_addr
= priv_addr
+ _COMM_HIGH_PAGE64_BASE_ADDRESS
-
1358 _COMM_PAGE_START_ADDRESS
;
1360 // Context-switch with PAN disabled is prohibited; prevent test logging from
1361 // triggering a voluntary context switch.
1362 mp_disable_preemption();
1364 // Below should trigger a PAN exception as pan_test_addr is accessible
1366 // The exception handler, upon recognizing the fault address is pan_test_addr,
1367 // will disable PAN and rerun this instruction successfully
1368 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler
, pan_test_addr
);
1369 values_match
= (*(volatile char *)pan_test_addr
== *(volatile char *)priv_addr
);
1370 ml_expect_fault_end();
1371 T_ASSERT(values_match
, NULL
);
1373 T_ASSERT(pan_exception_level
== 2, NULL
);
1375 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1377 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1379 pan_exception_level
= 0;
1380 pan_fault_value
= 0xAD;
1381 pan_ro_addr
= (vm_offset_t
) &pan_ro_value
;
1383 // Force a permission fault while PAN is disabled to make sure PAN is
1384 // re-enabled during the exception handler.
1385 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler
, pan_ro_addr
);
1386 *((volatile uint64_t*)pan_ro_addr
) = 0xFEEDFACECAFECAFE;
1387 ml_expect_fault_end();
1389 T_ASSERT(pan_exception_level
== 2, NULL
);
1391 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1393 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1398 __builtin_arm_wsr("pan", 1);
1400 mp_enable_preemption();
1402 return KERN_SUCCESS
;
1404 #endif /* __ARM_PAN_AVAILABLE__ */
1410 return lt_test_locks();
1420 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1421 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test
;
1422 uint64_t ctrr_nx_test
= 0xd65f03c0; /* RET */
1423 volatile uint64_t ctrr_exception_esr
;
1424 vm_offset_t ctrr_test_va
;
1425 vm_offset_t ctrr_test_page
;
1431 boolean_t ctrr_disable
= FALSE
;
1433 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable
, sizeof(ctrr_disable
));
1435 #if CONFIG_CSR_FROM_DT
1436 if (csr_unsafe_kernel_text
) {
1437 ctrr_disable
= TRUE
;
1439 #endif /* CONFIG_CSR_FROM_DT */
1442 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1443 return KERN_SUCCESS
;
1446 T_LOG("Running CTRR test.");
1448 for (p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
1450 thread_block(THREAD_CONTINUE_NULL
);
1451 T_LOG("Running CTRR test on cpu %d\n", p
->cpu_id
);
1455 /* unbind thread from specific cpu */
1456 thread_bind(PROCESSOR_NULL
);
1457 thread_block(THREAD_CONTINUE_NULL
);
1459 return KERN_SUCCESS
;
1463 ctrr_test_ro_fault_handler(arm_saved_state_t
* state
)
1465 bool retval
= false;
1466 uint32_t esr
= get_saved_state_esr(state
);
1467 esr_exception_class_t
class = ESR_EC(esr
);
1468 fault_status_t fsc
= ISS_DA_FSC(ESR_ISS(esr
));
1470 if ((class == ESR_EC_DABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
)) {
1471 ctrr_exception_esr
= esr
;
1472 add_saved_state_pc(state
, 4);
1480 ctrr_test_nx_fault_handler(arm_saved_state_t
* state
)
1482 bool retval
= false;
1483 uint32_t esr
= get_saved_state_esr(state
);
1484 esr_exception_class_t
class = ESR_EC(esr
);
1485 fault_status_t fsc
= ISS_IA_FSC(ESR_ISS(esr
));
1487 if ((class == ESR_EC_IABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
)) {
1488 ctrr_exception_esr
= esr
;
1489 /* return to the instruction immediately after the call to NX page */
1490 set_saved_state_pc(state
, get_saved_state_lr(state
));
1497 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1498 /* ctrr_test_page was reserved during bootstrap process */
1502 ppnum_t ro_pn
, nx_pn
;
1503 uint64_t *ctrr_ro_test_ptr
;
1504 void (*ctrr_nx_test_ptr
)(void);
1507 extern vm_offset_t virtual_space_start
;
1509 /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1511 vm_offset_t rorgn_begin_va
= phystokv(ctrr_begin
);
1512 vm_offset_t rorgn_end_va
= phystokv(ctrr_end
) + 1;
1513 vm_offset_t ro_test_va
= (vm_offset_t
)&ctrr_ro_test
;
1514 vm_offset_t nx_test_va
= (vm_offset_t
)&ctrr_nx_test
;
1516 T_EXPECT(rorgn_begin_va
<= ro_test_va
&& ro_test_va
< rorgn_end_va
, "Expect ro_test_va to be inside the CTRR region");
1517 T_EXPECT((nx_test_va
< rorgn_begin_va
) ^ (nx_test_va
>= rorgn_end_va
), "Expect nx_test_va to be outside the CTRR region");
1519 ro_pn
= pmap_find_phys(kernel_pmap
, ro_test_va
);
1520 nx_pn
= pmap_find_phys(kernel_pmap
, nx_test_va
);
1521 T_EXPECT(ro_pn
&& nx_pn
, "Expect ro page number and nx page number to be non zero");
1523 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1524 (void *)ctrr_test_page
, &ctrr_ro_test
, &ctrr_nx_test
, ro_pn
, nx_pn
);
1526 prot
= pmap_get_arm64_prot(kernel_pmap
, ctrr_test_page
);
1527 T_EXPECT(~prot
& ARM_TTE_VALID
, "Expect ctrr_test_page to be unmapped");
1529 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page
, ro_pn
);
1530 kr
= pmap_enter(kernel_pmap
, ctrr_test_page
, ro_pn
,
1531 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
1532 T_EXPECT(kr
== KERN_SUCCESS
, "Expect pmap_enter of RW mapping to succeed");
1534 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1535 // fetch effective block level protections from table/block entries
1536 prot
= pmap_get_arm64_prot(kernel_pmap
, ctrr_test_page
);
1537 T_EXPECT(ARM_PTE_EXTRACT_AP(prot
) == AP_RWNA
&& (prot
& ARM_PTE_PNX
), "Mapping is EL1 RWNX");
1539 ctrr_test_va
= ctrr_test_page
+ (ro_test_va
& PAGE_MASK
);
1540 ctrr_ro_test_ptr
= (void *)ctrr_test_va
;
1542 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr
);
1544 // should cause data abort
1545 ml_expect_fault_begin(ctrr_test_ro_fault_handler
, ctrr_test_va
);
1546 *ctrr_ro_test_ptr
= 1;
1547 ml_expect_fault_end();
1549 // ensure write permission fault at expected level
1550 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1552 T_EXPECT(ESR_EC(ctrr_exception_esr
) == ESR_EC_DABORT_EL1
, "Data Abort from EL1 expected");
1553 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr
)) == FSC_PERMISSION_FAULT_L3
, "Permission Fault Expected");
1554 T_EXPECT(ESR_ISS(ctrr_exception_esr
) & ISS_DA_WNR
, "Write Fault Expected");
1557 ctrr_exception_esr
= 0;
1558 pmap_remove(kernel_pmap
, ctrr_test_page
, ctrr_test_page
+ PAGE_SIZE
);
1560 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page
, nx_pn
);
1562 kr
= pmap_enter(kernel_pmap
, ctrr_test_page
, nx_pn
,
1563 VM_PROT_READ
| VM_PROT_EXECUTE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
1564 T_EXPECT(kr
== KERN_SUCCESS
, "Expect pmap_enter of RX mapping to succeed");
1566 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1567 prot
= pmap_get_arm64_prot(kernel_pmap
, ctrr_test_page
);
1568 T_EXPECT(ARM_PTE_EXTRACT_AP(prot
) == AP_RONA
&& (~prot
& ARM_PTE_PNX
), "Mapping is EL1 ROX");
1570 ctrr_test_va
= ctrr_test_page
+ (nx_test_va
& PAGE_MASK
);
1571 #if __has_feature(ptrauth_calls)
1572 ctrr_nx_test_ptr
= ptrauth_sign_unauthenticated((void *)ctrr_test_va
, ptrauth_key_function_pointer
, 0);
1574 ctrr_nx_test_ptr
= (void *)ctrr_test_va
;
1577 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr
);
1579 // should cause prefetch abort
1580 ml_expect_fault_begin(ctrr_test_nx_fault_handler
, ctrr_test_va
);
1582 ml_expect_fault_end();
1584 // TODO: ensure execute permission fault at expected level
1585 T_EXPECT(ESR_EC(ctrr_exception_esr
) == ESR_EC_IABORT_EL1
, "Instruction abort from EL1 Expected");
1586 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr
)) == FSC_PERMISSION_FAULT_L3
, "Permission Fault Expected");
1589 ctrr_exception_esr
= 0;
1591 pmap_remove(kernel_pmap
, ctrr_test_page
, ctrr_test_page
+ PAGE_SIZE
);
1593 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1594 for (vm_offset_t addr
= rorgn_begin_va
; addr
< rorgn_end_va
; addr
+= 8) {
1595 volatile uint64_t x
= *(uint64_t *)addr
;
1596 (void) x
; /* read for side effect only */
1599 return KERN_SUCCESS
;
1601 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1603 #if HAS_TWO_STAGE_SPR_LOCK
1606 #define STR(x) STR1(x)
1608 volatile vm_offset_t spr_lock_test_addr
;
1609 volatile uint32_t spr_lock_exception_esr
;
1612 arm64_spr_lock_test()
1616 for (p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
1618 thread_block(THREAD_CONTINUE_NULL
);
1619 T_LOG("Running SPR lock test on cpu %d\n", p
->cpu_id
);
1621 uint64_t orig_value
= __builtin_arm_rsr64(STR(S3_0_C15_C8_0
));
1622 spr_lock_test_addr
= (vm_offset_t
)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test
);
1623 spr_lock_exception_esr
= 0;
1624 arm64_msr_lock_test(~orig_value
);
1625 T_EXPECT(spr_lock_exception_esr
!= 0, "MSR write generated synchronous abort");
1627 uint64_t new_value
= __builtin_arm_rsr64(STR(S3_0_C15_C8_0
));
1628 T_EXPECT(orig_value
== new_value
, "MSR write did not succeed");
1630 spr_lock_test_addr
= 0;
1633 /* unbind thread from specific cpu */
1634 thread_bind(PROCESSOR_NULL
);
1635 thread_block(THREAD_CONTINUE_NULL
);
1637 T_PASS("Done running SPR lock tests");
1639 return KERN_SUCCESS
;
1642 #endif /* HAS_TWO_STAGE_SPR_LOCK */