2 * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * Carnegie Mellon requests users of this software to return to
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
55 #include <mach_ldebug.h>
57 #define LOCK_PRIVATE 1
60 #include <kern/kalloc.h>
61 #include <kern/cpu_number.h>
62 #include <kern/locks.h>
63 #include <kern/misc_protos.h>
64 #include <kern/thread.h>
65 #include <kern/processor.h>
66 #include <kern/sched_prim.h>
67 #include <kern/debug.h>
69 #include <tests/xnupost.h>
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
78 #include <sys/kdebug.h>
79 #include <sys/munge.h>
80 #include <machine/cpu_capabilities.h>
81 #include <arm/cpu_data_internal.h>
84 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
85 #include <arm64/amcc_rorgn.h>
86 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
88 kern_return_t
arm64_lock_test(void);
89 kern_return_t
arm64_munger_test(void);
90 kern_return_t
ex_cb_test(void);
91 kern_return_t
arm64_pan_test(void);
92 kern_return_t
arm64_late_pan_test(void);
93 #if defined(HAS_APPLE_PAC)
95 kern_return_t
arm64_ropjop_test(void);
97 #if defined(KERNEL_INTEGRITY_CTRR)
98 kern_return_t
ctrr_test(void);
99 kern_return_t
ctrr_test_cpu(void);
101 #if HAS_TWO_STAGE_SPR_LOCK
102 kern_return_t
arm64_spr_lock_test(void);
103 extern void arm64_msr_lock_test(uint64_t);
106 // exception handler ignores this fault address during PAN test
107 #if __ARM_PAN_AVAILABLE__
108 const uint64_t pan_ro_value
= 0xFEEDB0B0DEADBEEF;
109 vm_offset_t pan_test_addr
= 0;
110 vm_offset_t pan_ro_addr
= 0;
111 volatile int pan_exception_level
= 0;
112 volatile char pan_fault_value
= 0;
115 #include <libkern/OSAtomic.h>
116 #define LOCK_TEST_ITERATIONS 50
117 static hw_lock_data_t lt_hw_lock
;
118 static lck_spin_t lt_lck_spin_t
;
119 static lck_mtx_t lt_mtx
;
120 static lck_rw_t lt_rwlock
;
121 static volatile uint32_t lt_counter
= 0;
122 static volatile int lt_spinvolatile
;
123 static volatile uint32_t lt_max_holders
= 0;
124 static volatile uint32_t lt_upgrade_holders
= 0;
125 static volatile uint32_t lt_max_upgrade_holders
= 0;
126 static volatile uint32_t lt_num_holders
= 0;
127 static volatile uint32_t lt_done_threads
;
128 static volatile uint32_t lt_target_done_threads
;
129 static volatile uint32_t lt_cpu_bind_id
= 0;
132 lt_note_another_blocking_lock_holder()
134 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
136 lt_max_holders
= (lt_max_holders
< lt_num_holders
) ? lt_num_holders
: lt_max_holders
;
137 hw_lock_unlock(<_hw_lock
);
141 lt_note_blocking_lock_release()
143 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
145 hw_lock_unlock(<_hw_lock
);
149 lt_spin_a_little_bit()
153 for (i
= 0; i
< 10000; i
++) {
159 lt_sleep_a_little_bit()
167 lck_mtx_lock(<_mtx
);
168 lt_note_another_blocking_lock_holder();
169 lt_sleep_a_little_bit();
171 lt_note_blocking_lock_release();
172 lck_mtx_unlock(<_mtx
);
176 lt_grab_mutex_with_try()
178 while (0 == lck_mtx_try_lock(<_mtx
)) {
181 lt_note_another_blocking_lock_holder();
182 lt_sleep_a_little_bit();
184 lt_note_blocking_lock_release();
185 lck_mtx_unlock(<_mtx
);
189 lt_grab_rw_exclusive()
191 lck_rw_lock_exclusive(<_rwlock
);
192 lt_note_another_blocking_lock_holder();
193 lt_sleep_a_little_bit();
195 lt_note_blocking_lock_release();
196 lck_rw_done(<_rwlock
);
200 lt_grab_rw_exclusive_with_try()
202 while (0 == lck_rw_try_lock_exclusive(<_rwlock
)) {
203 lt_sleep_a_little_bit();
206 lt_note_another_blocking_lock_holder();
207 lt_sleep_a_little_bit();
209 lt_note_blocking_lock_release();
210 lck_rw_done(<_rwlock
);
213 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
215 * lt_grab_rw_shared()
217 * lck_rw_lock_shared(<_rwlock);
220 * lt_note_another_blocking_lock_holder();
221 * lt_sleep_a_little_bit();
222 * lt_note_blocking_lock_release();
224 * lck_rw_done(<_rwlock);
228 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
230 * lt_grab_rw_shared_with_try()
232 * while(0 == lck_rw_try_lock_shared(<_rwlock));
235 * lt_note_another_blocking_lock_holder();
236 * lt_sleep_a_little_bit();
237 * lt_note_blocking_lock_release();
239 * lck_rw_done(<_rwlock);
244 lt_upgrade_downgrade_rw()
246 boolean_t upgraded
, success
;
248 success
= lck_rw_try_lock_shared(<_rwlock
);
250 lck_rw_lock_shared(<_rwlock
);
253 lt_note_another_blocking_lock_holder();
254 lt_sleep_a_little_bit();
255 lt_note_blocking_lock_release();
257 upgraded
= lck_rw_lock_shared_to_exclusive(<_rwlock
);
259 success
= lck_rw_try_lock_exclusive(<_rwlock
);
262 lck_rw_lock_exclusive(<_rwlock
);
266 lt_upgrade_holders
++;
267 if (lt_upgrade_holders
> lt_max_upgrade_holders
) {
268 lt_max_upgrade_holders
= lt_upgrade_holders
;
272 lt_sleep_a_little_bit();
274 lt_upgrade_holders
--;
276 lck_rw_lock_exclusive_to_shared(<_rwlock
);
278 lt_spin_a_little_bit();
279 lck_rw_done(<_rwlock
);
283 const int limit
= 1000000;
284 static int lt_stress_local_counters
[MAX_CPUS
];
286 lck_ticket_t lt_ticket_lock
;
287 lck_grp_t lt_ticket_grp
;
290 lt_stress_ticket_lock()
292 int local_counter
= 0;
294 uint cpuid
= cpu_number();
296 kprintf("%s>cpu %d starting\n", __FUNCTION__
, cpuid
);
298 lck_ticket_lock(<_ticket_lock
, <_ticket_grp
);
301 lck_ticket_unlock(<_ticket_lock
);
303 while (lt_counter
< lt_target_done_threads
) {
307 kprintf("%s>cpu %d started\n", __FUNCTION__
, cpuid
);
309 while (lt_counter
< limit
) {
310 lck_ticket_lock(<_ticket_lock
, <_ticket_grp
);
311 if (lt_counter
< limit
) {
315 lck_ticket_unlock(<_ticket_lock
);
318 lt_stress_local_counters
[cpuid
] = local_counter
;
320 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__
, lt_counter
, cpuid
, local_counter
);
327 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
329 lt_spin_a_little_bit();
330 hw_lock_unlock(<_hw_lock
);
334 lt_grab_hw_lock_with_try()
336 while (0 == hw_lock_try(<_hw_lock
, LCK_GRP_NULL
)) {
340 lt_spin_a_little_bit();
341 hw_lock_unlock(<_hw_lock
);
345 lt_grab_hw_lock_with_to()
347 while (0 == hw_lock_to(<_hw_lock
, LockTimeOut
, LCK_GRP_NULL
)) {
348 mp_enable_preemption();
351 lt_spin_a_little_bit();
352 hw_lock_unlock(<_hw_lock
);
358 lck_spin_lock(<_lck_spin_t
);
360 lt_spin_a_little_bit();
361 lck_spin_unlock(<_lck_spin_t
);
365 lt_grab_spin_lock_with_try()
367 while (0 == lck_spin_try_lock(<_lck_spin_t
)) {
371 lt_spin_a_little_bit();
372 lck_spin_unlock(<_lck_spin_t
);
375 static volatile boolean_t lt_thread_lock_grabbed
;
376 static volatile boolean_t lt_thread_lock_success
;
384 lt_max_upgrade_holders
= 0;
385 lt_upgrade_holders
= 0;
387 lt_target_done_threads
= 0;
394 lt_trylock_hw_lock_with_to()
397 while (!lt_thread_lock_grabbed
) {
398 lt_sleep_a_little_bit();
401 lt_thread_lock_success
= hw_lock_to(<_hw_lock
, 100, LCK_GRP_NULL
);
403 mp_enable_preemption();
407 lt_trylock_spin_try_lock()
410 while (!lt_thread_lock_grabbed
) {
411 lt_sleep_a_little_bit();
414 lt_thread_lock_success
= lck_spin_try_lock(<_lck_spin_t
);
419 lt_trylock_thread(void *arg
, wait_result_t wres __unused
)
421 void (*func
)(void) = (void (*)(void))arg
;
425 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
429 lt_start_trylock_thread(thread_continue_t func
)
434 kr
= kernel_thread_start(lt_trylock_thread
, func
, &thread
);
435 assert(kr
== KERN_SUCCESS
);
437 thread_deallocate(thread
);
441 lt_wait_for_lock_test_threads()
444 /* Spin to reduce dependencies */
445 while (lt_done_threads
< lt_target_done_threads
) {
446 lt_sleep_a_little_bit();
456 extern unsigned int real_ncpus
;
459 * First mtx try lock succeeds, second fails.
461 success
= lck_mtx_try_lock(<_mtx
);
462 T_ASSERT_NOTNULL(success
, "First mtx try lock");
463 success
= lck_mtx_try_lock(<_mtx
);
464 T_ASSERT_NULL(success
, "Second mtx try lock for a locked mtx");
465 lck_mtx_unlock(<_mtx
);
468 * After regular grab, can't try lock.
470 lck_mtx_lock(<_mtx
);
471 success
= lck_mtx_try_lock(<_mtx
);
472 T_ASSERT_NULL(success
, "try lock should fail after regular lck_mtx_lock");
473 lck_mtx_unlock(<_mtx
);
476 * Two shared try locks on a previously unheld rwlock suceed, and a
477 * subsequent exclusive attempt fails.
479 success
= lck_rw_try_lock_shared(<_rwlock
);
480 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
481 success
= lck_rw_try_lock_shared(<_rwlock
);
482 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
483 success
= lck_rw_try_lock_exclusive(<_rwlock
);
484 T_ASSERT_NULL(success
, "exclusive lock attempt on previously held lock should fail");
485 lck_rw_done(<_rwlock
);
486 lck_rw_done(<_rwlock
);
489 * After regular shared grab, can trylock
490 * for shared but not for exclusive.
492 lck_rw_lock_shared(<_rwlock
);
493 success
= lck_rw_try_lock_shared(<_rwlock
);
494 T_ASSERT_NOTNULL(success
, "After regular shared grab another shared try lock should succeed.");
495 success
= lck_rw_try_lock_exclusive(<_rwlock
);
496 T_ASSERT_NULL(success
, "After regular shared grab an exclusive lock attempt should fail.");
497 lck_rw_done(<_rwlock
);
498 lck_rw_done(<_rwlock
);
501 * An exclusive try lock succeeds, subsequent shared and exclusive
504 success
= lck_rw_try_lock_exclusive(<_rwlock
);
505 T_ASSERT_NOTNULL(success
, "An exclusive try lock should succeed");
506 success
= lck_rw_try_lock_shared(<_rwlock
);
507 T_ASSERT_NULL(success
, "try lock in shared mode attempt after an exclusive grab should fail");
508 success
= lck_rw_try_lock_exclusive(<_rwlock
);
509 T_ASSERT_NULL(success
, "try lock in exclusive mode attempt after an exclusive grab should fail");
510 lck_rw_done(<_rwlock
);
513 * After regular exclusive grab, neither kind of trylock succeeds.
515 lck_rw_lock_exclusive(<_rwlock
);
516 success
= lck_rw_try_lock_shared(<_rwlock
);
517 T_ASSERT_NULL(success
, "After regular exclusive grab, shared trylock should not succeed");
518 success
= lck_rw_try_lock_exclusive(<_rwlock
);
519 T_ASSERT_NULL(success
, "After regular exclusive grab, exclusive trylock should not succeed");
520 lck_rw_done(<_rwlock
);
523 * First spin lock attempts succeed, second attempts fail.
525 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
526 T_ASSERT_NOTNULL(success
, "First spin lock attempts should succeed");
527 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
528 T_ASSERT_NULL(success
, "Second attempt to spin lock should fail");
529 hw_lock_unlock(<_hw_lock
);
531 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
532 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
533 T_ASSERT_NULL(success
, "After taking spin lock, trylock attempt should fail");
534 hw_lock_unlock(<_hw_lock
);
537 lt_thread_lock_grabbed
= false;
538 lt_thread_lock_success
= true;
539 lt_target_done_threads
= 1;
541 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
542 success
= hw_lock_to(<_hw_lock
, 100, LCK_GRP_NULL
);
543 T_ASSERT_NOTNULL(success
, "First spin lock with timeout should succeed");
544 if (real_ncpus
== 1) {
545 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
547 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
548 lt_wait_for_lock_test_threads();
549 T_ASSERT_NULL(lt_thread_lock_success
, "Second spin lock with timeout should fail and timeout");
550 if (real_ncpus
== 1) {
551 mp_disable_preemption(); /* don't double-enable when we unlock */
553 hw_lock_unlock(<_hw_lock
);
556 lt_thread_lock_grabbed
= false;
557 lt_thread_lock_success
= true;
558 lt_target_done_threads
= 1;
560 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
561 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
562 if (real_ncpus
== 1) {
563 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
565 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
566 lt_wait_for_lock_test_threads();
567 T_ASSERT_NULL(lt_thread_lock_success
, "after taking a spin lock, lock attempt with timeout should fail");
568 if (real_ncpus
== 1) {
569 mp_disable_preemption(); /* don't double-enable when we unlock */
571 hw_lock_unlock(<_hw_lock
);
573 success
= lck_spin_try_lock(<_lck_spin_t
);
574 T_ASSERT_NOTNULL(success
, "spin trylock of previously unheld lock should succeed");
575 success
= lck_spin_try_lock(<_lck_spin_t
);
576 T_ASSERT_NULL(success
, "spin trylock attempt of previously held lock (with trylock) should fail");
577 lck_spin_unlock(<_lck_spin_t
);
580 lt_thread_lock_grabbed
= false;
581 lt_thread_lock_success
= true;
582 lt_target_done_threads
= 1;
583 lt_start_trylock_thread(lt_trylock_spin_try_lock
);
584 lck_spin_lock(<_lck_spin_t
);
585 if (real_ncpus
== 1) {
586 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
588 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
589 lt_wait_for_lock_test_threads();
590 T_ASSERT_NULL(lt_thread_lock_success
, "spin trylock attempt of previously held lock should fail");
591 if (real_ncpus
== 1) {
592 mp_disable_preemption(); /* don't double-enable when we unlock */
594 lck_spin_unlock(<_lck_spin_t
);
600 lt_thread(void *arg
, wait_result_t wres __unused
)
602 void (*func
)(void) = (void (*)(void))arg
;
605 for (i
= 0; i
< LOCK_TEST_ITERATIONS
; i
++) {
609 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
613 lt_start_lock_thread(thread_continue_t func
)
618 kr
= kernel_thread_start(lt_thread
, func
, &thread
);
619 assert(kr
== KERN_SUCCESS
);
621 thread_deallocate(thread
);
626 lt_bound_thread(void *arg
, wait_result_t wres __unused
)
628 void (*func
)(void) = (void (*)(void))arg
;
630 int cpuid
= OSIncrementAtomic((volatile SInt32
*)<_cpu_bind_id
);
632 processor_t processor
= processor_list
;
633 while ((processor
!= NULL
) && (processor
->cpu_id
!= cpuid
)) {
634 processor
= processor
->processor_list
;
637 if (processor
!= NULL
) {
638 thread_bind(processor
);
641 thread_block(THREAD_CONTINUE_NULL
);
645 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
649 lt_e_thread(void *arg
, wait_result_t wres __unused
)
651 void (*func
)(void) = (void (*)(void))arg
;
653 thread_t thread
= current_thread();
655 spl_t s
= splsched();
657 thread
->sched_flags
|= TH_SFLAG_ECORE_ONLY
;
658 thread_unlock(thread
);
661 thread_block(THREAD_CONTINUE_NULL
);
665 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
669 lt_p_thread(void *arg
, wait_result_t wres __unused
)
671 void (*func
)(void) = (void (*)(void))arg
;
673 thread_t thread
= current_thread();
675 spl_t s
= splsched();
677 thread
->sched_flags
|= TH_SFLAG_PCORE_ONLY
;
678 thread_unlock(thread
);
681 thread_block(THREAD_CONTINUE_NULL
);
685 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
689 lt_start_lock_thread_e(thread_continue_t func
)
694 kr
= kernel_thread_start(lt_e_thread
, func
, &thread
);
695 assert(kr
== KERN_SUCCESS
);
697 thread_deallocate(thread
);
701 lt_start_lock_thread_p(thread_continue_t func
)
706 kr
= kernel_thread_start(lt_p_thread
, func
, &thread
);
707 assert(kr
== KERN_SUCCESS
);
709 thread_deallocate(thread
);
713 lt_start_lock_thread_bound(thread_continue_t func
)
718 kr
= kernel_thread_start(lt_bound_thread
, func
, &thread
);
719 assert(kr
== KERN_SUCCESS
);
721 thread_deallocate(thread
);
728 kern_return_t kr
= KERN_SUCCESS
;
729 lck_grp_attr_t
*lga
= lck_grp_attr_alloc_init();
730 lck_grp_t
*lg
= lck_grp_alloc_init("lock test", lga
);
732 lck_mtx_init(<_mtx
, lg
, LCK_ATTR_NULL
);
733 lck_rw_init(<_rwlock
, lg
, LCK_ATTR_NULL
);
734 lck_spin_init(<_lck_spin_t
, lg
, LCK_ATTR_NULL
);
735 hw_lock_init(<_hw_lock
);
737 T_LOG("Testing locks.");
739 /* Try locks (custom) */
742 T_LOG("Running try lock test.");
743 kr
= lt_test_trylocks();
744 T_EXPECT_NULL(kr
, "try lock test failed.");
746 /* Uncontended mutex */
747 T_LOG("Running uncontended mutex test.");
749 lt_target_done_threads
= 1;
750 lt_start_lock_thread(lt_grab_mutex
);
751 lt_wait_for_lock_test_threads();
752 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
753 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
755 /* Contended mutex:try locks*/
756 T_LOG("Running contended mutex test.");
758 lt_target_done_threads
= 3;
759 lt_start_lock_thread(lt_grab_mutex
);
760 lt_start_lock_thread(lt_grab_mutex
);
761 lt_start_lock_thread(lt_grab_mutex
);
762 lt_wait_for_lock_test_threads();
763 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
764 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
766 /* Contended mutex: try locks*/
767 T_LOG("Running contended mutex trylock test.");
769 lt_target_done_threads
= 3;
770 lt_start_lock_thread(lt_grab_mutex_with_try
);
771 lt_start_lock_thread(lt_grab_mutex_with_try
);
772 lt_start_lock_thread(lt_grab_mutex_with_try
);
773 lt_wait_for_lock_test_threads();
774 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
775 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
777 /* Uncontended exclusive rwlock */
778 T_LOG("Running uncontended exclusive rwlock test.");
780 lt_target_done_threads
= 1;
781 lt_start_lock_thread(lt_grab_rw_exclusive
);
782 lt_wait_for_lock_test_threads();
783 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
784 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
786 /* Uncontended shared rwlock */
788 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
789 * T_LOG("Running uncontended shared rwlock test.");
791 * lt_target_done_threads = 1;
792 * lt_start_lock_thread(lt_grab_rw_shared);
793 * lt_wait_for_lock_test_threads();
794 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
798 /* Contended exclusive rwlock */
799 T_LOG("Running contended exclusive rwlock test.");
801 lt_target_done_threads
= 3;
802 lt_start_lock_thread(lt_grab_rw_exclusive
);
803 lt_start_lock_thread(lt_grab_rw_exclusive
);
804 lt_start_lock_thread(lt_grab_rw_exclusive
);
805 lt_wait_for_lock_test_threads();
806 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
807 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
809 /* One shared, two exclusive */
810 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
811 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
813 * lt_target_done_threads = 3;
814 * lt_start_lock_thread(lt_grab_rw_shared);
815 * lt_start_lock_thread(lt_grab_rw_exclusive);
816 * lt_start_lock_thread(lt_grab_rw_exclusive);
817 * lt_wait_for_lock_test_threads();
818 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
819 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
823 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
824 * T_LOG("Running test with four shared holders.");
826 * lt_target_done_threads = 4;
827 * lt_start_lock_thread(lt_grab_rw_shared);
828 * lt_start_lock_thread(lt_grab_rw_shared);
829 * lt_start_lock_thread(lt_grab_rw_shared);
830 * lt_start_lock_thread(lt_grab_rw_shared);
831 * lt_wait_for_lock_test_threads();
832 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
835 /* Three doing upgrades and downgrades */
836 T_LOG("Running test with threads upgrading and downgrading.");
838 lt_target_done_threads
= 3;
839 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
840 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
841 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
842 lt_wait_for_lock_test_threads();
843 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
844 T_EXPECT_LE_UINT(lt_max_holders
, 3, NULL
);
845 T_EXPECT_EQ_UINT(lt_max_upgrade_holders
, 1, NULL
);
847 /* Uncontended - exclusive trylocks */
848 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
850 lt_target_done_threads
= 1;
851 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
852 lt_wait_for_lock_test_threads();
853 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
854 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
856 /* Uncontended - shared trylocks */
857 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
858 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
860 * lt_target_done_threads = 1;
861 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
862 * lt_wait_for_lock_test_threads();
863 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
864 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
867 /* Three doing exclusive trylocks */
868 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
870 lt_target_done_threads
= 3;
871 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
872 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
873 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
874 lt_wait_for_lock_test_threads();
875 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
876 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
878 /* Three doing shared trylocks */
879 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
880 * T_LOG("Running test with threads doing shared rwlock trylocks.");
882 * lt_target_done_threads = 3;
883 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
884 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
885 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
886 * lt_wait_for_lock_test_threads();
887 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
888 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
891 /* Three doing various trylocks */
892 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
893 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
895 * lt_target_done_threads = 4;
896 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
897 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
898 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
899 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
900 * lt_wait_for_lock_test_threads();
901 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
902 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
906 T_LOG("Running test with hw_lock_lock()");
908 lt_target_done_threads
= 3;
909 lt_start_lock_thread(lt_grab_hw_lock
);
910 lt_start_lock_thread(lt_grab_hw_lock
);
911 lt_start_lock_thread(lt_grab_hw_lock
);
912 lt_wait_for_lock_test_threads();
913 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
916 /* Ticket locks stress test */
917 T_LOG("Running Ticket locks stress test with lck_ticket_lock()");
918 extern unsigned int real_ncpus
;
919 lck_grp_init(<_ticket_grp
, "ticket lock stress", LCK_GRP_ATTR_NULL
);
920 lck_ticket_init(<_ticket_lock
, <_ticket_grp
);
922 lt_target_done_threads
= real_ncpus
;
923 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
924 lt_start_lock_thread_bound(lt_stress_ticket_lock
);
926 lt_wait_for_lock_test_threads();
927 bool starvation
= false;
928 uint total_local_count
= 0;
929 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
930 starvation
= starvation
|| (lt_stress_local_counters
[processor
->cpu_id
] < 10);
931 total_local_count
+= lt_stress_local_counters
[processor
->cpu_id
];
933 if (total_local_count
!= lt_counter
) {
934 T_FAIL("Lock failure\n");
935 } else if (starvation
) {
936 T_FAIL("Lock starvation found\n");
938 T_PASS("Ticket locks stress test with lck_ticket_lock()");
941 /* AMP ticket locks stress test */
942 T_LOG("Running AMP Ticket locks stress test bound to clusters with lck_ticket_lock()");
944 lt_target_done_threads
= real_ncpus
;
945 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
946 processor_set_t pset
= processor
->processor_set
;
947 if (pset
->pset_cluster_type
== PSET_AMP_P
) {
948 lt_start_lock_thread_p(lt_stress_ticket_lock
);
949 } else if (pset
->pset_cluster_type
== PSET_AMP_E
) {
950 lt_start_lock_thread_e(lt_stress_ticket_lock
);
952 lt_start_lock_thread(lt_stress_ticket_lock
);
955 lt_wait_for_lock_test_threads();
958 /* HW locks: trylocks */
959 T_LOG("Running test with hw_lock_try()");
961 lt_target_done_threads
= 3;
962 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
963 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
964 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
965 lt_wait_for_lock_test_threads();
966 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
968 /* HW locks: with timeout */
969 T_LOG("Running test with hw_lock_to()");
971 lt_target_done_threads
= 3;
972 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
973 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
974 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
975 lt_wait_for_lock_test_threads();
976 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
979 T_LOG("Running test with lck_spin_lock()");
981 lt_target_done_threads
= 3;
982 lt_start_lock_thread(lt_grab_spin_lock
);
983 lt_start_lock_thread(lt_grab_spin_lock
);
984 lt_start_lock_thread(lt_grab_spin_lock
);
985 lt_wait_for_lock_test_threads();
986 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
988 /* Spin locks: trylocks */
989 T_LOG("Running test with lck_spin_try_lock()");
991 lt_target_done_threads
= 3;
992 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
993 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
994 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
995 lt_wait_for_lock_test_threads();
996 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
1001 #define MT_MAX_ARGS 8
1002 #define MT_INITIAL_VALUE 0xfeedbeef
1003 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
1004 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
1005 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
1007 typedef void (*sy_munge_t
)(void*);
1009 #define MT_FUNC(x) #x, x
1010 struct munger_test
{
1011 const char *mt_name
;
1013 uint32_t mt_in_words
;
1015 uint64_t mt_expected
[MT_MAX_ARGS
];
1016 } munger_tests
[] = {
1017 {MT_FUNC(munge_w
), 1, 1, {MT_W_VAL
}},
1018 {MT_FUNC(munge_ww
), 2, 2, {MT_W_VAL
, MT_W_VAL
}},
1019 {MT_FUNC(munge_www
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1020 {MT_FUNC(munge_wwww
), 4, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1021 {MT_FUNC(munge_wwwww
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1022 {MT_FUNC(munge_wwwwww
), 6, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1023 {MT_FUNC(munge_wwwwwww
), 7, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1024 {MT_FUNC(munge_wwwwwwww
), 8, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1025 {MT_FUNC(munge_wl
), 3, 2, {MT_W_VAL
, MT_L_VAL
}},
1026 {MT_FUNC(munge_wwl
), 4, 3, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1027 {MT_FUNC(munge_wwlll
), 8, 5, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1028 {MT_FUNC(munge_wlw
), 4, 3, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1029 {MT_FUNC(munge_wlwwwll
), 10, 7, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1030 {MT_FUNC(munge_wlwwwllw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
1031 {MT_FUNC(munge_wlwwlwlw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1032 {MT_FUNC(munge_wll
), 5, 3, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1033 {MT_FUNC(munge_wlll
), 7, 4, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1034 {MT_FUNC(munge_wllwwll
), 11, 7, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1035 {MT_FUNC(munge_wwwlw
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1036 {MT_FUNC(munge_wwwlww
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
1037 {MT_FUNC(munge_wwwlwww
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1038 {MT_FUNC(munge_wwwl
), 5, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1039 {MT_FUNC(munge_wwwwlw
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1040 {MT_FUNC(munge_wwwwllww
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
1041 {MT_FUNC(munge_wwwwl
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1042 {MT_FUNC(munge_wwwwwl
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1043 {MT_FUNC(munge_wwwwwlww
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
1044 {MT_FUNC(munge_wwwwwllw
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
1045 {MT_FUNC(munge_wwwwwlll
), 11, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1046 {MT_FUNC(munge_wwwwwwl
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1047 {MT_FUNC(munge_wwwwwwlw
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
1048 {MT_FUNC(munge_wwwwwwll
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
1049 {MT_FUNC(munge_wsw
), 3, 3, {MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
1050 {MT_FUNC(munge_wws
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_S_VAL
}},
1051 {MT_FUNC(munge_wwwsw
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
1052 {MT_FUNC(munge_llllll
), 12, 6, {MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
1053 {MT_FUNC(munge_l
), 2, 1, {MT_L_VAL
}},
1054 {MT_FUNC(munge_lw
), 3, 2, {MT_L_VAL
, MT_W_VAL
}},
1055 {MT_FUNC(munge_lwww
), 5, 4, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1056 {MT_FUNC(munge_lwwwwwww
), 9, 8, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
1057 {MT_FUNC(munge_wlwwwl
), 8, 6, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
1058 {MT_FUNC(munge_wwlwwwl
), 9, 7, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}}
1061 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
1064 mt_reset(uint32_t in_words
, size_t total_size
, uint32_t *data
)
1068 for (i
= 0; i
< in_words
; i
++) {
1069 data
[i
] = MT_INITIAL_VALUE
;
1072 if (in_words
* sizeof(uint32_t) < total_size
) {
1073 bzero(&data
[in_words
], total_size
- in_words
* sizeof(uint32_t));
1080 uint64_t data
[MT_MAX_ARGS
];
1083 for (i
= 0; i
< MT_TEST_COUNT
; i
++) {
1084 struct munger_test
*test
= &munger_tests
[i
];
1087 T_LOG("Testing %s", test
->mt_name
);
1089 mt_reset(test
->mt_in_words
, sizeof(data
), (uint32_t*)data
);
1090 test
->mt_func(data
);
1092 for (j
= 0; j
< test
->mt_nout
; j
++) {
1093 if (data
[j
] != test
->mt_expected
[j
]) {
1094 T_FAIL("Index %d: expected %llx, got %llx.", j
, test
->mt_expected
[j
], data
[j
]);
1099 T_PASS(test
->mt_name
);
1104 /* Exception Callback Test */
1105 static ex_cb_action_t
1107 ex_cb_class_t cb_class
,
1109 const ex_cb_state_t
*state
1112 ex_cb_state_t
*context
= (ex_cb_state_t
*)refcon
;
1114 if ((NULL
== refcon
) || (NULL
== state
)) {
1115 return EXCB_ACTION_TEST_FAIL
;
1118 context
->far
= state
->far
;
1121 case EXCB_CLASS_TEST1
:
1122 return EXCB_ACTION_RERUN
;
1123 case EXCB_CLASS_TEST2
:
1124 return EXCB_ACTION_NONE
;
1126 return EXCB_ACTION_TEST_FAIL
;
1134 const vm_offset_t far1
= 0xdead0001;
1135 const vm_offset_t far2
= 0xdead0002;
1137 ex_cb_state_t test_context_1
= {0xdeadbeef};
1138 ex_cb_state_t test_context_2
= {0xdeadbeef};
1139 ex_cb_action_t action
;
1141 T_LOG("Testing Exception Callback.");
1143 T_LOG("Running registration test.");
1145 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1146 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST1 exception callback");
1147 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1148 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST2 exception callback");
1150 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1151 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST2 exception callback");
1152 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1153 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST1 exception callback");
1155 T_LOG("Running invocation test.");
1157 action
= ex_cb_invoke(EXCB_CLASS_TEST1
, far1
);
1158 T_ASSERT(EXCB_ACTION_RERUN
== action
, NULL
);
1159 T_ASSERT(far1
== test_context_1
.far
, NULL
);
1161 action
= ex_cb_invoke(EXCB_CLASS_TEST2
, far2
);
1162 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1163 T_ASSERT(far2
== test_context_2
.far
, NULL
);
1165 action
= ex_cb_invoke(EXCB_CLASS_TEST3
, 0);
1166 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1168 return KERN_SUCCESS
;
1171 #if defined(HAS_APPLE_PAC)
1175 * arm64_ropjop_test - basic xnu ROP/JOP test plan
1177 * - assert ROP/JOP configured and running status match
1178 * - assert all AppleMode ROP/JOP features enabled
1179 * - ensure ROP/JOP keys are set and diversified
1180 * - sign a KVA (the address of this function),assert it was signed (changed)
1181 * - authenticate the newly signed KVA
1182 * - assert the authed KVA is the original KVA
1183 * - corrupt a signed ptr, auth it, ensure auth failed
1184 * - assert the failed authIB of corrupted pointer is tagged
1191 T_LOG("Testing ROP/JOP");
1193 /* how is ROP/JOP configured */
1194 boolean_t config_rop_enabled
= TRUE
;
1195 boolean_t config_jop_enabled
= TRUE
;
1198 /* assert all AppleMode ROP/JOP features enabled */
1199 uint64_t apctl
= __builtin_arm_rsr64(ARM64_REG_APCTL_EL1
);
1200 #if __APSTS_SUPPORTED__
1201 uint64_t apsts
= __builtin_arm_rsr64(ARM64_REG_APSTS_EL1
);
1202 T_EXPECT(apsts
& APSTS_EL1_MKEYVld
, NULL
);
1204 T_EXPECT(apctl
& APCTL_EL1_MKEYVld
, NULL
);
1205 #endif /* __APSTS_SUPPORTED__ */
1206 T_EXPECT(apctl
& APCTL_EL1_AppleMode
, NULL
);
1208 bool kernkeyen
= apctl
& APCTL_EL1_KernKeyEn
;
1209 #if HAS_APCTL_EL1_USERKEYEN
1210 bool userkeyen
= apctl
& APCTL_EL1_UserKeyEn
;
1212 bool userkeyen
= false;
1214 /* for KernKey to work as a diversifier, it must be enabled at exactly one of {EL0, EL1/2} */
1215 T_EXPECT(kernkeyen
|| userkeyen
, "KernKey is enabled");
1216 T_EXPECT(!(kernkeyen
&& userkeyen
), "KernKey is not simultaneously enabled at userspace and kernel space");
1218 /* ROP/JOP keys enabled current status */
1219 bool status_jop_enabled
, status_rop_enabled
;
1220 #if __APSTS_SUPPORTED__ /* H13+ */
1221 status_jop_enabled
= status_rop_enabled
= apctl
& APCTL_EL1_EnAPKey1
;
1222 #elif __APCFG_SUPPORTED__ /* H12 */
1223 uint64_t apcfg_el1
= __builtin_arm_rsr64(APCFG_EL1
);
1224 status_jop_enabled
= status_rop_enabled
= apcfg_el1
& APCFG_EL1_ELXENKEY
;
1225 #else /* !__APCFG_SUPPORTED__ H11 */
1226 uint64_t sctlr_el1
= __builtin_arm_rsr64("SCTLR_EL1");
1227 status_jop_enabled
= sctlr_el1
& SCTLR_PACIA_ENABLED
;
1228 status_rop_enabled
= sctlr_el1
& SCTLR_PACIB_ENABLED
;
1229 #endif /* __APSTS_SUPPORTED__ */
1231 /* assert configured and running status match */
1232 T_EXPECT(config_rop_enabled
== status_rop_enabled
, NULL
);
1233 T_EXPECT(config_jop_enabled
== status_jop_enabled
, NULL
);
1236 if (config_jop_enabled
) {
1238 uint64_t apiakey_hi
= __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1
);
1239 uint64_t apiakey_lo
= __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1
);
1241 /* ensure JOP key is set and diversified */
1242 T_EXPECT(apiakey_hi
!= KERNEL_ROP_ID
&& apiakey_lo
!= KERNEL_ROP_ID
, NULL
);
1243 T_EXPECT(apiakey_hi
!= 0 && apiakey_lo
!= 0, NULL
);
1246 if (config_rop_enabled
) {
1248 uint64_t apibkey_hi
= __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1
);
1249 uint64_t apibkey_lo
= __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1
);
1251 /* ensure ROP key is set and diversified */
1252 T_EXPECT(apibkey_hi
!= KERNEL_ROP_ID
&& apibkey_lo
!= KERNEL_ROP_ID
, NULL
);
1253 T_EXPECT(apibkey_hi
!= 0 && apibkey_lo
!= 0, NULL
);
1255 /* sign a KVA (the address of this function) */
1256 uint64_t kva_signed
= (uint64_t) ptrauth_sign_unauthenticated((void *)&config_rop_enabled
, ptrauth_key_asib
, 0);
1258 /* assert it was signed (changed) */
1259 T_EXPECT(kva_signed
!= (uint64_t)&config_rop_enabled
, NULL
);
1261 /* authenticate the newly signed KVA */
1262 uint64_t kva_authed
= (uint64_t) ml_auth_ptr_unchecked((void *)kva_signed
, ptrauth_key_asib
, 0);
1264 /* assert the authed KVA is the original KVA */
1265 T_EXPECT(kva_authed
== (uint64_t)&config_rop_enabled
, NULL
);
1267 /* corrupt a signed ptr, auth it, ensure auth failed */
1268 uint64_t kva_corrupted
= kva_signed
^ 1;
1270 /* authenticate the corrupted pointer */
1271 kva_authed
= (uint64_t) ml_auth_ptr_unchecked((void *)kva_corrupted
, ptrauth_key_asib
, 0);
1273 /* when AuthIB fails, bits 63:62 will be set to 2'b10 */
1274 uint64_t auth_fail_mask
= 3ULL << 61;
1275 uint64_t authib_fail
= 2ULL << 61;
1277 /* assert the failed authIB of corrupted pointer is tagged */
1278 T_EXPECT((kva_authed
& auth_fail_mask
) == authib_fail
, NULL
);
1281 return KERN_SUCCESS
;
1283 #endif /* defined(HAS_APPLE_PAC) */
1285 #if __ARM_PAN_AVAILABLE__
1287 struct pan_test_thread_args
{
1292 arm64_pan_test_thread(void *arg
, wait_result_t __unused wres
)
1294 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1296 struct pan_test_thread_args
*args
= arg
;
1298 for (processor_t p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
1300 thread_block(THREAD_CONTINUE_NULL
);
1301 kprintf("Running PAN test on cpu %d\n", p
->cpu_id
);
1305 /* unbind thread from specific cpu */
1306 thread_bind(PROCESSOR_NULL
);
1307 thread_block(THREAD_CONTINUE_NULL
);
1309 while (!args
->join
) {
1313 thread_wakeup(args
);
1317 arm64_late_pan_test()
1322 struct pan_test_thread_args args
;
1325 kr
= kernel_thread_start(arm64_pan_test_thread
, &args
, &thread
);
1326 assert(kr
== KERN_SUCCESS
);
1328 thread_deallocate(thread
);
1330 assert_wait(&args
, THREAD_UNINT
);
1332 thread_block(THREAD_CONTINUE_NULL
);
1333 return KERN_SUCCESS
;
1337 arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t
* state
)
1339 bool retval
= false;
1340 uint32_t esr
= get_saved_state_esr(state
);
1341 esr_exception_class_t
class = ESR_EC(esr
);
1342 fault_status_t fsc
= ISS_IA_FSC(ESR_ISS(esr
));
1343 uint32_t cpsr
= get_saved_state_cpsr(state
);
1344 uint64_t far
= get_saved_state_far(state
);
1346 if ((class == ESR_EC_DABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
) &&
1347 (cpsr
& PSR64_PAN
) &&
1348 ((esr
& ISS_DA_WNR
) ? mmu_kvtop_wpreflight(far
) : mmu_kvtop(far
))) {
1349 ++pan_exception_level
;
1350 // read the user-accessible value to make sure
1351 // pan is enabled and produces a 2nd fault from
1352 // the exception handler
1353 if (pan_exception_level
== 1) {
1354 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler
, far
);
1355 pan_fault_value
= *(volatile char *)far
;
1356 ml_expect_fault_end();
1357 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1359 // this fault address is used for PAN test
1360 // disable PAN and rerun
1361 mask_saved_state_cpsr(state
, 0, PSR64_PAN
);
1370 arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t
* state
)
1372 bool retval
= false;
1373 uint32_t esr
= get_saved_state_esr(state
);
1374 esr_exception_class_t
class = ESR_EC(esr
);
1375 fault_status_t fsc
= ISS_IA_FSC(ESR_ISS(esr
));
1376 uint32_t cpsr
= get_saved_state_cpsr(state
);
1378 if ((class == ESR_EC_DABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
) &&
1379 !(cpsr
& PSR64_PAN
)) {
1380 ++pan_exception_level
;
1381 // On an exception taken from a PAN-disabled context, verify
1382 // that PAN is re-enabled for the exception handler and that
1383 // accessing the test address produces a PAN fault.
1384 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler
, pan_test_addr
);
1385 pan_fault_value
= *(volatile char *)pan_test_addr
;
1386 ml_expect_fault_end();
1387 __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context
1388 add_saved_state_pc(state
, 4);
1399 bool values_match
= false;
1400 vm_offset_t priv_addr
= _COMM_PAGE_SIGNATURE
;
1402 T_LOG("Testing PAN.");
1405 T_ASSERT((__builtin_arm_rsr("SCTLR_EL1") & SCTLR_PAN_UNCHANGED
) == 0, "SCTLR_EL1.SPAN must be cleared");
1407 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1409 pan_exception_level
= 0;
1410 pan_fault_value
= 0xDE;
1411 // convert priv_addr to one that is accessible from user mode
1412 pan_test_addr
= priv_addr
+ _COMM_HIGH_PAGE64_BASE_ADDRESS
-
1413 _COMM_PAGE_START_ADDRESS
;
1415 // Context-switch with PAN disabled is prohibited; prevent test logging from
1416 // triggering a voluntary context switch.
1417 mp_disable_preemption();
1419 // Below should trigger a PAN exception as pan_test_addr is accessible
1421 // The exception handler, upon recognizing the fault address is pan_test_addr,
1422 // will disable PAN and rerun this instruction successfully
1423 ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler
, pan_test_addr
);
1424 values_match
= (*(volatile char *)pan_test_addr
== *(volatile char *)priv_addr
);
1425 ml_expect_fault_end();
1426 T_ASSERT(values_match
, NULL
);
1428 T_ASSERT(pan_exception_level
== 2, NULL
);
1430 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1432 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1434 pan_exception_level
= 0;
1435 pan_fault_value
= 0xAD;
1436 pan_ro_addr
= (vm_offset_t
) &pan_ro_value
;
1438 // Force a permission fault while PAN is disabled to make sure PAN is
1439 // re-enabled during the exception handler.
1440 ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler
, pan_ro_addr
);
1441 *((volatile uint64_t*)pan_ro_addr
) = 0xFEEDFACECAFECAFE;
1442 ml_expect_fault_end();
1444 T_ASSERT(pan_exception_level
== 2, NULL
);
1446 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1448 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1453 __builtin_arm_wsr("pan", 1);
1455 mp_enable_preemption();
1457 return KERN_SUCCESS
;
1459 #endif /* __ARM_PAN_AVAILABLE__ */
1465 return lt_test_locks();
1475 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
1476 SECURITY_READ_ONLY_LATE(uint64_t) ctrr_ro_test
;
1477 uint64_t ctrr_nx_test
= 0xd65f03c0; /* RET */
1478 volatile uint64_t ctrr_exception_esr
;
1479 vm_offset_t ctrr_test_va
;
1480 vm_offset_t ctrr_test_page
;
1486 boolean_t ctrr_disable
= FALSE
;
1488 PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable
, sizeof(ctrr_disable
));
1490 #if CONFIG_CSR_FROM_DT
1491 if (csr_unsafe_kernel_text
) {
1492 ctrr_disable
= TRUE
;
1494 #endif /* CONFIG_CSR_FROM_DT */
1497 T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present");
1498 return KERN_SUCCESS
;
1501 T_LOG("Running CTRR test.");
1503 for (p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
1505 thread_block(THREAD_CONTINUE_NULL
);
1506 T_LOG("Running CTRR test on cpu %d\n", p
->cpu_id
);
1510 /* unbind thread from specific cpu */
1511 thread_bind(PROCESSOR_NULL
);
1512 thread_block(THREAD_CONTINUE_NULL
);
1514 return KERN_SUCCESS
;
1518 ctrr_test_ro_fault_handler(arm_saved_state_t
* state
)
1520 bool retval
= false;
1521 uint32_t esr
= get_saved_state_esr(state
);
1522 esr_exception_class_t
class = ESR_EC(esr
);
1523 fault_status_t fsc
= ISS_DA_FSC(ESR_ISS(esr
));
1525 if ((class == ESR_EC_DABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
)) {
1526 ctrr_exception_esr
= esr
;
1527 add_saved_state_pc(state
, 4);
1535 ctrr_test_nx_fault_handler(arm_saved_state_t
* state
)
1537 bool retval
= false;
1538 uint32_t esr
= get_saved_state_esr(state
);
1539 esr_exception_class_t
class = ESR_EC(esr
);
1540 fault_status_t fsc
= ISS_IA_FSC(ESR_ISS(esr
));
1542 if ((class == ESR_EC_IABORT_EL1
) && (fsc
== FSC_PERMISSION_FAULT_L3
)) {
1543 ctrr_exception_esr
= esr
;
1544 /* return to the instruction immediately after the call to NX page */
1545 set_saved_state_pc(state
, get_saved_state_lr(state
));
1552 /* test CTRR on a cpu, caller to bind thread to desired cpu */
1553 /* ctrr_test_page was reserved during bootstrap process */
1557 ppnum_t ro_pn
, nx_pn
;
1558 uint64_t *ctrr_ro_test_ptr
;
1559 void (*ctrr_nx_test_ptr
)(void);
1562 extern vm_offset_t virtual_space_start
;
1564 /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */
1566 vm_offset_t rorgn_begin_va
= phystokv(ctrr_begin
);
1567 vm_offset_t rorgn_end_va
= phystokv(ctrr_end
) + 1;
1568 vm_offset_t ro_test_va
= (vm_offset_t
)&ctrr_ro_test
;
1569 vm_offset_t nx_test_va
= (vm_offset_t
)&ctrr_nx_test
;
1571 T_EXPECT(rorgn_begin_va
<= ro_test_va
&& ro_test_va
< rorgn_end_va
, "Expect ro_test_va to be inside the CTRR region");
1572 T_EXPECT((nx_test_va
< rorgn_begin_va
) ^ (nx_test_va
>= rorgn_end_va
), "Expect nx_test_va to be outside the CTRR region");
1574 ro_pn
= pmap_find_phys(kernel_pmap
, ro_test_va
);
1575 nx_pn
= pmap_find_phys(kernel_pmap
, nx_test_va
);
1576 T_EXPECT(ro_pn
&& nx_pn
, "Expect ro page number and nx page number to be non zero");
1578 T_LOG("test virtual page: %p, ctrr_ro_test: %p, ctrr_nx_test: %p, ro_pn: %x, nx_pn: %x ",
1579 (void *)ctrr_test_page
, &ctrr_ro_test
, &ctrr_nx_test
, ro_pn
, nx_pn
);
1581 prot
= pmap_get_arm64_prot(kernel_pmap
, ctrr_test_page
);
1582 T_EXPECT(~prot
& ARM_TTE_VALID
, "Expect ctrr_test_page to be unmapped");
1584 T_LOG("Read only region test mapping virtual page %p to CTRR RO page number %d", ctrr_test_page
, ro_pn
);
1585 kr
= pmap_enter(kernel_pmap
, ctrr_test_page
, ro_pn
,
1586 VM_PROT_READ
| VM_PROT_WRITE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
1587 T_EXPECT(kr
== KERN_SUCCESS
, "Expect pmap_enter of RW mapping to succeed");
1589 // assert entire mmu prot path (Hierarchical protection model) is NOT RO
1590 // fetch effective block level protections from table/block entries
1591 prot
= pmap_get_arm64_prot(kernel_pmap
, ctrr_test_page
);
1592 T_EXPECT(ARM_PTE_EXTRACT_AP(prot
) == AP_RWNA
&& (prot
& ARM_PTE_PNX
), "Mapping is EL1 RWNX");
1594 ctrr_test_va
= ctrr_test_page
+ (ro_test_va
& PAGE_MASK
);
1595 ctrr_ro_test_ptr
= (void *)ctrr_test_va
;
1597 T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr
);
1599 // should cause data abort
1600 ml_expect_fault_begin(ctrr_test_ro_fault_handler
, ctrr_test_va
);
1601 *ctrr_ro_test_ptr
= 1;
1602 ml_expect_fault_end();
1604 // ensure write permission fault at expected level
1605 // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault
1607 T_EXPECT(ESR_EC(ctrr_exception_esr
) == ESR_EC_DABORT_EL1
, "Data Abort from EL1 expected");
1608 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr
)) == FSC_PERMISSION_FAULT_L3
, "Permission Fault Expected");
1609 T_EXPECT(ESR_ISS(ctrr_exception_esr
) & ISS_DA_WNR
, "Write Fault Expected");
1612 ctrr_exception_esr
= 0;
1613 pmap_remove(kernel_pmap
, ctrr_test_page
, ctrr_test_page
+ PAGE_SIZE
);
1615 T_LOG("No execute test mapping virtual page %p to CTRR PXN page number %d", ctrr_test_page
, nx_pn
);
1617 kr
= pmap_enter(kernel_pmap
, ctrr_test_page
, nx_pn
,
1618 VM_PROT_READ
| VM_PROT_EXECUTE
, VM_PROT_NONE
, VM_WIMG_USE_DEFAULT
, FALSE
);
1619 T_EXPECT(kr
== KERN_SUCCESS
, "Expect pmap_enter of RX mapping to succeed");
1621 // assert entire mmu prot path (Hierarchical protection model) is NOT XN
1622 prot
= pmap_get_arm64_prot(kernel_pmap
, ctrr_test_page
);
1623 T_EXPECT(ARM_PTE_EXTRACT_AP(prot
) == AP_RONA
&& (~prot
& ARM_PTE_PNX
), "Mapping is EL1 ROX");
1625 ctrr_test_va
= ctrr_test_page
+ (nx_test_va
& PAGE_MASK
);
1626 #if __has_feature(ptrauth_calls)
1627 ctrr_nx_test_ptr
= ptrauth_sign_unauthenticated((void *)ctrr_test_va
, ptrauth_key_function_pointer
, 0);
1629 ctrr_nx_test_ptr
= (void *)ctrr_test_va
;
1632 T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr
);
1634 // should cause prefetch abort
1635 ml_expect_fault_begin(ctrr_test_nx_fault_handler
, ctrr_test_va
);
1637 ml_expect_fault_end();
1639 // TODO: ensure execute permission fault at expected level
1640 T_EXPECT(ESR_EC(ctrr_exception_esr
) == ESR_EC_IABORT_EL1
, "Instruction abort from EL1 Expected");
1641 T_EXPECT(ISS_DA_FSC(ESR_ISS(ctrr_exception_esr
)) == FSC_PERMISSION_FAULT_L3
, "Permission Fault Expected");
1644 ctrr_exception_esr
= 0;
1646 pmap_remove(kernel_pmap
, ctrr_test_page
, ctrr_test_page
+ PAGE_SIZE
);
1648 T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits");
1649 for (vm_offset_t addr
= rorgn_begin_va
; addr
< rorgn_end_va
; addr
+= 8) {
1650 volatile uint64_t x
= *(uint64_t *)addr
;
1651 (void) x
; /* read for side effect only */
1654 return KERN_SUCCESS
;
1656 #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */
1658 #if HAS_TWO_STAGE_SPR_LOCK
1661 #define STR(x) STR1(x)
1663 volatile vm_offset_t spr_lock_test_addr
;
1664 volatile uint32_t spr_lock_exception_esr
;
1667 arm64_spr_lock_test()
1671 for (p
= processor_list
; p
!= NULL
; p
= p
->processor_list
) {
1673 thread_block(THREAD_CONTINUE_NULL
);
1674 T_LOG("Running SPR lock test on cpu %d\n", p
->cpu_id
);
1676 uint64_t orig_value
= __builtin_arm_rsr64(STR(ARM64_REG_HID8
));
1677 spr_lock_test_addr
= (vm_offset_t
)VM_KERNEL_STRIP_PTR(arm64_msr_lock_test
);
1678 spr_lock_exception_esr
= 0;
1679 arm64_msr_lock_test(~orig_value
);
1680 T_EXPECT(spr_lock_exception_esr
!= 0, "MSR write generated synchronous abort");
1682 uint64_t new_value
= __builtin_arm_rsr64(STR(ARM64_REG_HID8
));
1683 T_EXPECT(orig_value
== new_value
, "MSR write did not succeed");
1685 spr_lock_test_addr
= 0;
1688 /* unbind thread from specific cpu */
1689 thread_bind(PROCESSOR_NULL
);
1690 thread_block(THREAD_CONTINUE_NULL
);
1692 T_PASS("Done running SPR lock tests");
1694 return KERN_SUCCESS
;
1697 #endif /* HAS_TWO_STAGE_SPR_LOCK */