2 * Copyright (c) 2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * Carnegie Mellon requests users of this software to return to
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
55 #include <mach_ldebug.h>
57 #define LOCK_PRIVATE 1
60 #include <kern/kalloc.h>
61 #include <kern/locks.h>
62 #include <kern/misc_protos.h>
63 #include <kern/thread.h>
64 #include <kern/processor.h>
65 #include <kern/sched_prim.h>
67 #include <kern/debug.h>
69 #include <tests/xnupost.h>
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
78 #include <sys/kdebug.h>
79 #include <sys/munge.h>
80 #include <machine/cpu_capabilities.h>
81 #include <arm/cpu_data_internal.h>
84 kern_return_t
arm64_lock_test(void);
85 kern_return_t
arm64_munger_test(void);
86 kern_return_t
ex_cb_test(void);
87 kern_return_t
arm64_pan_test(void);
89 // exception handler ignores this fault address during PAN test
90 #if __ARM_PAN_AVAILABLE__
91 const uint64_t pan_ro_value
= 0xFEEDB0B0DEADBEEF;
92 vm_offset_t pan_test_addr
= 0;
93 vm_offset_t pan_ro_addr
= 0;
94 volatile int pan_exception_level
= 0;
95 volatile char pan_fault_value
= 0;
98 #include <libkern/OSAtomic.h>
99 #define LOCK_TEST_ITERATIONS 50
100 static hw_lock_data_t lt_hw_lock
;
101 static lck_spin_t lt_lck_spin_t
;
102 static lck_mtx_t lt_mtx
;
103 static lck_rw_t lt_rwlock
;
104 static volatile uint32_t lt_counter
= 0;
105 static volatile int lt_spinvolatile
;
106 static volatile uint32_t lt_max_holders
= 0;
107 static volatile uint32_t lt_upgrade_holders
= 0;
108 static volatile uint32_t lt_max_upgrade_holders
= 0;
109 static volatile uint32_t lt_num_holders
= 0;
110 static volatile uint32_t lt_done_threads
;
111 static volatile uint32_t lt_target_done_threads
;
112 static volatile uint32_t lt_cpu_bind_id
= 0;
115 lt_note_another_blocking_lock_holder()
117 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
119 lt_max_holders
= (lt_max_holders
< lt_num_holders
) ? lt_num_holders
: lt_max_holders
;
120 hw_lock_unlock(<_hw_lock
);
124 lt_note_blocking_lock_release()
126 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
128 hw_lock_unlock(<_hw_lock
);
132 lt_spin_a_little_bit()
136 for (i
= 0; i
< 10000; i
++) {
142 lt_sleep_a_little_bit()
150 lck_mtx_lock(<_mtx
);
151 lt_note_another_blocking_lock_holder();
152 lt_sleep_a_little_bit();
154 lt_note_blocking_lock_release();
155 lck_mtx_unlock(<_mtx
);
159 lt_grab_mutex_with_try()
161 while (0 == lck_mtx_try_lock(<_mtx
)) {
164 lt_note_another_blocking_lock_holder();
165 lt_sleep_a_little_bit();
167 lt_note_blocking_lock_release();
168 lck_mtx_unlock(<_mtx
);
172 lt_grab_rw_exclusive()
174 lck_rw_lock_exclusive(<_rwlock
);
175 lt_note_another_blocking_lock_holder();
176 lt_sleep_a_little_bit();
178 lt_note_blocking_lock_release();
179 lck_rw_done(<_rwlock
);
183 lt_grab_rw_exclusive_with_try()
185 while (0 == lck_rw_try_lock_exclusive(<_rwlock
)) {
186 lt_sleep_a_little_bit();
189 lt_note_another_blocking_lock_holder();
190 lt_sleep_a_little_bit();
192 lt_note_blocking_lock_release();
193 lck_rw_done(<_rwlock
);
196 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
198 * lt_grab_rw_shared()
200 * lck_rw_lock_shared(<_rwlock);
203 * lt_note_another_blocking_lock_holder();
204 * lt_sleep_a_little_bit();
205 * lt_note_blocking_lock_release();
207 * lck_rw_done(<_rwlock);
211 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
213 * lt_grab_rw_shared_with_try()
215 * while(0 == lck_rw_try_lock_shared(<_rwlock));
218 * lt_note_another_blocking_lock_holder();
219 * lt_sleep_a_little_bit();
220 * lt_note_blocking_lock_release();
222 * lck_rw_done(<_rwlock);
227 lt_upgrade_downgrade_rw()
229 boolean_t upgraded
, success
;
231 success
= lck_rw_try_lock_shared(<_rwlock
);
233 lck_rw_lock_shared(<_rwlock
);
236 lt_note_another_blocking_lock_holder();
237 lt_sleep_a_little_bit();
238 lt_note_blocking_lock_release();
240 upgraded
= lck_rw_lock_shared_to_exclusive(<_rwlock
);
242 success
= lck_rw_try_lock_exclusive(<_rwlock
);
245 lck_rw_lock_exclusive(<_rwlock
);
249 lt_upgrade_holders
++;
250 if (lt_upgrade_holders
> lt_max_upgrade_holders
) {
251 lt_max_upgrade_holders
= lt_upgrade_holders
;
255 lt_sleep_a_little_bit();
257 lt_upgrade_holders
--;
259 lck_rw_lock_exclusive_to_shared(<_rwlock
);
261 lt_spin_a_little_bit();
262 lck_rw_done(<_rwlock
);
265 const int limit
= 1000000;
266 static int lt_stress_local_counters
[MAX_CPUS
];
271 int local_counter
= 0;
273 uint cpuid
= current_processor()->cpu_id
;
275 kprintf("%s>cpu %d starting\n", __FUNCTION__
, cpuid
);
277 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
280 hw_lock_unlock(<_hw_lock
);
282 while (lt_counter
< lt_target_done_threads
) {
286 kprintf("%s>cpu %d started\n", __FUNCTION__
, cpuid
);
288 while (lt_counter
< limit
) {
289 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
290 if (lt_counter
< limit
) {
294 hw_lock_unlock(<_hw_lock
);
297 lt_stress_local_counters
[cpuid
] = local_counter
;
299 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__
, lt_counter
, cpuid
, local_counter
);
305 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
307 lt_spin_a_little_bit();
308 hw_lock_unlock(<_hw_lock
);
312 lt_grab_hw_lock_with_try()
314 while (0 == hw_lock_try(<_hw_lock
, LCK_GRP_NULL
)) {
318 lt_spin_a_little_bit();
319 hw_lock_unlock(<_hw_lock
);
323 lt_grab_hw_lock_with_to()
325 while (0 == hw_lock_to(<_hw_lock
, LockTimeOut
, LCK_GRP_NULL
)) {
326 mp_enable_preemption();
329 lt_spin_a_little_bit();
330 hw_lock_unlock(<_hw_lock
);
336 lck_spin_lock(<_lck_spin_t
);
338 lt_spin_a_little_bit();
339 lck_spin_unlock(<_lck_spin_t
);
343 lt_grab_spin_lock_with_try()
345 while (0 == lck_spin_try_lock(<_lck_spin_t
)) {
349 lt_spin_a_little_bit();
350 lck_spin_unlock(<_lck_spin_t
);
353 static volatile boolean_t lt_thread_lock_grabbed
;
354 static volatile boolean_t lt_thread_lock_success
;
362 lt_max_upgrade_holders
= 0;
363 lt_upgrade_holders
= 0;
365 lt_target_done_threads
= 0;
372 lt_trylock_hw_lock_with_to()
375 while (!lt_thread_lock_grabbed
) {
376 lt_sleep_a_little_bit();
379 lt_thread_lock_success
= hw_lock_to(<_hw_lock
, 100, LCK_GRP_NULL
);
381 mp_enable_preemption();
385 lt_trylock_spin_try_lock()
388 while (!lt_thread_lock_grabbed
) {
389 lt_sleep_a_little_bit();
392 lt_thread_lock_success
= lck_spin_try_lock(<_lck_spin_t
);
397 lt_trylock_thread(void *arg
, wait_result_t wres __unused
)
399 void (*func
)(void) = (void (*)(void))arg
;
403 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
407 lt_start_trylock_thread(thread_continue_t func
)
412 kr
= kernel_thread_start(lt_trylock_thread
, func
, &thread
);
413 assert(kr
== KERN_SUCCESS
);
415 thread_deallocate(thread
);
419 lt_wait_for_lock_test_threads()
422 /* Spin to reduce dependencies */
423 while (lt_done_threads
< lt_target_done_threads
) {
424 lt_sleep_a_little_bit();
434 extern unsigned int real_ncpus
;
437 * First mtx try lock succeeds, second fails.
439 success
= lck_mtx_try_lock(<_mtx
);
440 T_ASSERT_NOTNULL(success
, "First mtx try lock");
441 success
= lck_mtx_try_lock(<_mtx
);
442 T_ASSERT_NULL(success
, "Second mtx try lock for a locked mtx");
443 lck_mtx_unlock(<_mtx
);
446 * After regular grab, can't try lock.
448 lck_mtx_lock(<_mtx
);
449 success
= lck_mtx_try_lock(<_mtx
);
450 T_ASSERT_NULL(success
, "try lock should fail after regular lck_mtx_lock");
451 lck_mtx_unlock(<_mtx
);
454 * Two shared try locks on a previously unheld rwlock suceed, and a
455 * subsequent exclusive attempt fails.
457 success
= lck_rw_try_lock_shared(<_rwlock
);
458 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
459 success
= lck_rw_try_lock_shared(<_rwlock
);
460 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
461 success
= lck_rw_try_lock_exclusive(<_rwlock
);
462 T_ASSERT_NULL(success
, "exclusive lock attempt on previously held lock should fail");
463 lck_rw_done(<_rwlock
);
464 lck_rw_done(<_rwlock
);
467 * After regular shared grab, can trylock
468 * for shared but not for exclusive.
470 lck_rw_lock_shared(<_rwlock
);
471 success
= lck_rw_try_lock_shared(<_rwlock
);
472 T_ASSERT_NOTNULL(success
, "After regular shared grab another shared try lock should succeed.");
473 success
= lck_rw_try_lock_exclusive(<_rwlock
);
474 T_ASSERT_NULL(success
, "After regular shared grab an exclusive lock attempt should fail.");
475 lck_rw_done(<_rwlock
);
476 lck_rw_done(<_rwlock
);
479 * An exclusive try lock succeeds, subsequent shared and exclusive
482 success
= lck_rw_try_lock_exclusive(<_rwlock
);
483 T_ASSERT_NOTNULL(success
, "An exclusive try lock should succeed");
484 success
= lck_rw_try_lock_shared(<_rwlock
);
485 T_ASSERT_NULL(success
, "try lock in shared mode attempt after an exclusive grab should fail");
486 success
= lck_rw_try_lock_exclusive(<_rwlock
);
487 T_ASSERT_NULL(success
, "try lock in exclusive mode attempt after an exclusive grab should fail");
488 lck_rw_done(<_rwlock
);
491 * After regular exclusive grab, neither kind of trylock succeeds.
493 lck_rw_lock_exclusive(<_rwlock
);
494 success
= lck_rw_try_lock_shared(<_rwlock
);
495 T_ASSERT_NULL(success
, "After regular exclusive grab, shared trylock should not succeed");
496 success
= lck_rw_try_lock_exclusive(<_rwlock
);
497 T_ASSERT_NULL(success
, "After regular exclusive grab, exclusive trylock should not succeed");
498 lck_rw_done(<_rwlock
);
501 * First spin lock attempts succeed, second attempts fail.
503 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
504 T_ASSERT_NOTNULL(success
, "First spin lock attempts should succeed");
505 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
506 T_ASSERT_NULL(success
, "Second attempt to spin lock should fail");
507 hw_lock_unlock(<_hw_lock
);
509 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
510 success
= hw_lock_try(<_hw_lock
, LCK_GRP_NULL
);
511 T_ASSERT_NULL(success
, "After taking spin lock, trylock attempt should fail");
512 hw_lock_unlock(<_hw_lock
);
515 lt_thread_lock_grabbed
= false;
516 lt_thread_lock_success
= true;
517 lt_target_done_threads
= 1;
519 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
520 success
= hw_lock_to(<_hw_lock
, 100, LCK_GRP_NULL
);
521 T_ASSERT_NOTNULL(success
, "First spin lock with timeout should succeed");
522 if (real_ncpus
== 1) {
523 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
525 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
526 lt_wait_for_lock_test_threads();
527 T_ASSERT_NULL(lt_thread_lock_success
, "Second spin lock with timeout should fail and timeout");
528 if (real_ncpus
== 1) {
529 mp_disable_preemption(); /* don't double-enable when we unlock */
531 hw_lock_unlock(<_hw_lock
);
534 lt_thread_lock_grabbed
= false;
535 lt_thread_lock_success
= true;
536 lt_target_done_threads
= 1;
538 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
539 hw_lock_lock(<_hw_lock
, LCK_GRP_NULL
);
540 if (real_ncpus
== 1) {
541 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
543 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
544 lt_wait_for_lock_test_threads();
545 T_ASSERT_NULL(lt_thread_lock_success
, "after taking a spin lock, lock attempt with timeout should fail");
546 if (real_ncpus
== 1) {
547 mp_disable_preemption(); /* don't double-enable when we unlock */
549 hw_lock_unlock(<_hw_lock
);
551 success
= lck_spin_try_lock(<_lck_spin_t
);
552 T_ASSERT_NOTNULL(success
, "spin trylock of previously unheld lock should succeed");
553 success
= lck_spin_try_lock(<_lck_spin_t
);
554 T_ASSERT_NULL(success
, "spin trylock attempt of previously held lock (with trylock) should fail");
555 lck_spin_unlock(<_lck_spin_t
);
558 lt_thread_lock_grabbed
= false;
559 lt_thread_lock_success
= true;
560 lt_target_done_threads
= 1;
561 lt_start_trylock_thread(lt_trylock_spin_try_lock
);
562 lck_spin_lock(<_lck_spin_t
);
563 if (real_ncpus
== 1) {
564 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
566 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
567 lt_wait_for_lock_test_threads();
568 T_ASSERT_NULL(lt_thread_lock_success
, "spin trylock attempt of previously held lock should fail");
569 if (real_ncpus
== 1) {
570 mp_disable_preemption(); /* don't double-enable when we unlock */
572 lck_spin_unlock(<_lck_spin_t
);
578 lt_thread(void *arg
, wait_result_t wres __unused
)
580 void (*func
)(void) = (void (*)(void))arg
;
583 for (i
= 0; i
< LOCK_TEST_ITERATIONS
; i
++) {
587 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
591 lt_bound_thread(void *arg
, wait_result_t wres __unused
)
593 void (*func
)(void) = (void (*)(void))arg
;
595 int cpuid
= OSIncrementAtomic((volatile SInt32
*)<_cpu_bind_id
);
597 processor_t processor
= processor_list
;
598 while ((processor
!= NULL
) && (processor
->cpu_id
!= cpuid
)) {
599 processor
= processor
->processor_list
;
602 if (processor
!= NULL
) {
603 thread_bind(processor
);
606 thread_block(THREAD_CONTINUE_NULL
);
610 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
614 lt_start_lock_thread(thread_continue_t func
)
619 kr
= kernel_thread_start(lt_thread
, func
, &thread
);
620 assert(kr
== KERN_SUCCESS
);
622 thread_deallocate(thread
);
627 lt_start_lock_thread_bound(thread_continue_t func
)
632 kr
= kernel_thread_start(lt_bound_thread
, func
, &thread
);
633 assert(kr
== KERN_SUCCESS
);
635 thread_deallocate(thread
);
641 kern_return_t kr
= KERN_SUCCESS
;
642 lck_grp_attr_t
*lga
= lck_grp_attr_alloc_init();
643 lck_grp_t
*lg
= lck_grp_alloc_init("lock test", lga
);
645 lck_mtx_init(<_mtx
, lg
, LCK_ATTR_NULL
);
646 lck_rw_init(<_rwlock
, lg
, LCK_ATTR_NULL
);
647 lck_spin_init(<_lck_spin_t
, lg
, LCK_ATTR_NULL
);
648 hw_lock_init(<_hw_lock
);
650 T_LOG("Testing locks.");
652 /* Try locks (custom) */
655 T_LOG("Running try lock test.");
656 kr
= lt_test_trylocks();
657 T_EXPECT_NULL(kr
, "try lock test failed.");
659 /* Uncontended mutex */
660 T_LOG("Running uncontended mutex test.");
662 lt_target_done_threads
= 1;
663 lt_start_lock_thread(lt_grab_mutex
);
664 lt_wait_for_lock_test_threads();
665 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
666 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
668 /* Contended mutex:try locks*/
669 T_LOG("Running contended mutex test.");
671 lt_target_done_threads
= 3;
672 lt_start_lock_thread(lt_grab_mutex
);
673 lt_start_lock_thread(lt_grab_mutex
);
674 lt_start_lock_thread(lt_grab_mutex
);
675 lt_wait_for_lock_test_threads();
676 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
677 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
679 /* Contended mutex: try locks*/
680 T_LOG("Running contended mutex trylock test.");
682 lt_target_done_threads
= 3;
683 lt_start_lock_thread(lt_grab_mutex_with_try
);
684 lt_start_lock_thread(lt_grab_mutex_with_try
);
685 lt_start_lock_thread(lt_grab_mutex_with_try
);
686 lt_wait_for_lock_test_threads();
687 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
688 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
690 /* Uncontended exclusive rwlock */
691 T_LOG("Running uncontended exclusive rwlock test.");
693 lt_target_done_threads
= 1;
694 lt_start_lock_thread(lt_grab_rw_exclusive
);
695 lt_wait_for_lock_test_threads();
696 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
697 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
699 /* Uncontended shared rwlock */
701 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
702 * T_LOG("Running uncontended shared rwlock test.");
704 * lt_target_done_threads = 1;
705 * lt_start_lock_thread(lt_grab_rw_shared);
706 * lt_wait_for_lock_test_threads();
707 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
708 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
711 /* Contended exclusive rwlock */
712 T_LOG("Running contended exclusive rwlock test.");
714 lt_target_done_threads
= 3;
715 lt_start_lock_thread(lt_grab_rw_exclusive
);
716 lt_start_lock_thread(lt_grab_rw_exclusive
);
717 lt_start_lock_thread(lt_grab_rw_exclusive
);
718 lt_wait_for_lock_test_threads();
719 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
720 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
722 /* One shared, two exclusive */
723 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
724 * T_LOG("Running test with one shared and two exclusive rw lock threads.");
726 * lt_target_done_threads = 3;
727 * lt_start_lock_thread(lt_grab_rw_shared);
728 * lt_start_lock_thread(lt_grab_rw_exclusive);
729 * lt_start_lock_thread(lt_grab_rw_exclusive);
730 * lt_wait_for_lock_test_threads();
731 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
732 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
736 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
737 * T_LOG("Running test with four shared holders.");
739 * lt_target_done_threads = 4;
740 * lt_start_lock_thread(lt_grab_rw_shared);
741 * lt_start_lock_thread(lt_grab_rw_shared);
742 * lt_start_lock_thread(lt_grab_rw_shared);
743 * lt_start_lock_thread(lt_grab_rw_shared);
744 * lt_wait_for_lock_test_threads();
745 * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
748 /* Three doing upgrades and downgrades */
749 T_LOG("Running test with threads upgrading and downgrading.");
751 lt_target_done_threads
= 3;
752 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
753 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
754 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
755 lt_wait_for_lock_test_threads();
756 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
757 T_EXPECT_LE_UINT(lt_max_holders
, 3, NULL
);
758 T_EXPECT_EQ_UINT(lt_max_upgrade_holders
, 1, NULL
);
760 /* Uncontended - exclusive trylocks */
761 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
763 lt_target_done_threads
= 1;
764 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
765 lt_wait_for_lock_test_threads();
766 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
767 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
769 /* Uncontended - shared trylocks */
770 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
771 * T_LOG("Running test with single thread doing shared rwlock trylocks.");
773 * lt_target_done_threads = 1;
774 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
775 * lt_wait_for_lock_test_threads();
776 * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
777 * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
780 /* Three doing exclusive trylocks */
781 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
783 lt_target_done_threads
= 3;
784 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
785 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
786 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
787 lt_wait_for_lock_test_threads();
788 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
789 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
791 /* Three doing shared trylocks */
792 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
793 * T_LOG("Running test with threads doing shared rwlock trylocks.");
795 * lt_target_done_threads = 3;
796 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
797 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
798 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
799 * lt_wait_for_lock_test_threads();
800 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
801 * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
804 /* Three doing various trylocks */
805 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
806 * T_LOG("Running test with threads doing mixed rwlock trylocks.");
808 * lt_target_done_threads = 4;
809 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
810 * lt_start_lock_thread(lt_grab_rw_shared_with_try);
811 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
812 * lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
813 * lt_wait_for_lock_test_threads();
814 * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
815 * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
819 T_LOG("Running test with hw_lock_lock()");
821 lt_target_done_threads
= 3;
822 lt_start_lock_thread(lt_grab_hw_lock
);
823 lt_start_lock_thread(lt_grab_hw_lock
);
824 lt_start_lock_thread(lt_grab_hw_lock
);
825 lt_wait_for_lock_test_threads();
826 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
828 /* HW locks stress test */
829 T_LOG("Running HW locks stress test with hw_lock_lock()");
830 extern unsigned int real_ncpus
;
832 lt_target_done_threads
= real_ncpus
;
833 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
834 lt_start_lock_thread_bound(lt_stress_hw_lock
);
836 lt_wait_for_lock_test_threads();
837 bool starvation
= false;
838 uint total_local_count
= 0;
839 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
840 starvation
= starvation
|| (lt_stress_local_counters
[processor
->cpu_id
] < 10);
841 total_local_count
+= lt_stress_local_counters
[processor
->cpu_id
];
843 if (total_local_count
!= lt_counter
) {
844 T_FAIL("Lock failure\n");
845 } else if (starvation
) {
846 T_FAIL("Lock starvation found\n");
848 T_PASS("HW locks stress test with hw_lock_lock()");
852 /* HW locks: trylocks */
853 T_LOG("Running test with hw_lock_try()");
855 lt_target_done_threads
= 3;
856 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
857 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
858 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
859 lt_wait_for_lock_test_threads();
860 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
862 /* HW locks: with timeout */
863 T_LOG("Running test with hw_lock_to()");
865 lt_target_done_threads
= 3;
866 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
867 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
868 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
869 lt_wait_for_lock_test_threads();
870 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
873 T_LOG("Running test with lck_spin_lock()");
875 lt_target_done_threads
= 3;
876 lt_start_lock_thread(lt_grab_spin_lock
);
877 lt_start_lock_thread(lt_grab_spin_lock
);
878 lt_start_lock_thread(lt_grab_spin_lock
);
879 lt_wait_for_lock_test_threads();
880 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
882 /* Spin locks: trylocks */
883 T_LOG("Running test with lck_spin_try_lock()");
885 lt_target_done_threads
= 3;
886 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
887 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
888 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
889 lt_wait_for_lock_test_threads();
890 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
895 #define MT_MAX_ARGS 8
896 #define MT_INITIAL_VALUE 0xfeedbeef
897 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
898 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
899 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
901 typedef void (*sy_munge_t
)(void*);
903 #define MT_FUNC(x) #x, x
907 uint32_t mt_in_words
;
909 uint64_t mt_expected
[MT_MAX_ARGS
];
911 {MT_FUNC(munge_w
), 1, 1, {MT_W_VAL
}},
912 {MT_FUNC(munge_ww
), 2, 2, {MT_W_VAL
, MT_W_VAL
}},
913 {MT_FUNC(munge_www
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
914 {MT_FUNC(munge_wwww
), 4, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
915 {MT_FUNC(munge_wwwww
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
916 {MT_FUNC(munge_wwwwww
), 6, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
917 {MT_FUNC(munge_wwwwwww
), 7, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
918 {MT_FUNC(munge_wwwwwwww
), 8, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
919 {MT_FUNC(munge_wl
), 3, 2, {MT_W_VAL
, MT_L_VAL
}},
920 {MT_FUNC(munge_wwl
), 4, 3, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
921 {MT_FUNC(munge_wwlll
), 8, 5, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
922 {MT_FUNC(munge_wlw
), 4, 3, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
923 {MT_FUNC(munge_wlwwwll
), 10, 7, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
924 {MT_FUNC(munge_wlwwwllw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
925 {MT_FUNC(munge_wlwwlwlw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
926 {MT_FUNC(munge_wll
), 5, 3, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
927 {MT_FUNC(munge_wlll
), 7, 4, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
928 {MT_FUNC(munge_wllwwll
), 11, 7, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
929 {MT_FUNC(munge_wwwlw
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
930 {MT_FUNC(munge_wwwlww
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
931 {MT_FUNC(munge_wwwl
), 5, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
932 {MT_FUNC(munge_wwwwlw
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
933 {MT_FUNC(munge_wwwwl
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
934 {MT_FUNC(munge_wwwwwl
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
935 {MT_FUNC(munge_wwwwwlww
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
936 {MT_FUNC(munge_wwwwwllw
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
937 {MT_FUNC(munge_wwwwwlll
), 11, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
938 {MT_FUNC(munge_wwwwwwl
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
939 {MT_FUNC(munge_wwwwwwlw
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
940 {MT_FUNC(munge_wwwwwwll
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
941 {MT_FUNC(munge_wsw
), 3, 3, {MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
942 {MT_FUNC(munge_wws
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_S_VAL
}},
943 {MT_FUNC(munge_wwwsw
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
944 {MT_FUNC(munge_llllll
), 12, 6, {MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
945 {MT_FUNC(munge_l
), 2, 1, {MT_L_VAL
}},
946 {MT_FUNC(munge_lw
), 3, 2, {MT_L_VAL
, MT_W_VAL
}},
947 {MT_FUNC(munge_lwww
), 5, 4, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
948 {MT_FUNC(munge_lwwwwwww
), 9, 8, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
949 {MT_FUNC(munge_wlwwwl
), 8, 6, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
950 {MT_FUNC(munge_wwlwwwl
), 9, 7, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}}
953 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
956 mt_reset(uint32_t in_words
, size_t total_size
, uint32_t *data
)
960 for (i
= 0; i
< in_words
; i
++) {
961 data
[i
] = MT_INITIAL_VALUE
;
964 if (in_words
* sizeof(uint32_t) < total_size
) {
965 bzero(&data
[in_words
], total_size
- in_words
* sizeof(uint32_t));
972 uint64_t data
[MT_MAX_ARGS
];
975 for (i
= 0; i
< MT_TEST_COUNT
; i
++) {
976 struct munger_test
*test
= &munger_tests
[i
];
979 T_LOG("Testing %s", test
->mt_name
);
981 mt_reset(test
->mt_in_words
, sizeof(data
), (uint32_t*)data
);
984 for (j
= 0; j
< test
->mt_nout
; j
++) {
985 if (data
[j
] != test
->mt_expected
[j
]) {
986 T_FAIL("Index %d: expected %llx, got %llx.", j
, test
->mt_expected
[j
], data
[j
]);
991 T_PASS(test
->mt_name
);
996 /* Exception Callback Test */
997 static ex_cb_action_t
999 ex_cb_class_t cb_class
,
1001 const ex_cb_state_t
*state
1004 ex_cb_state_t
*context
= (ex_cb_state_t
*)refcon
;
1006 if ((NULL
== refcon
) || (NULL
== state
)) {
1007 return EXCB_ACTION_TEST_FAIL
;
1010 context
->far
= state
->far
;
1013 case EXCB_CLASS_TEST1
:
1014 return EXCB_ACTION_RERUN
;
1015 case EXCB_CLASS_TEST2
:
1016 return EXCB_ACTION_NONE
;
1018 return EXCB_ACTION_TEST_FAIL
;
1026 const vm_offset_t far1
= 0xdead0001;
1027 const vm_offset_t far2
= 0xdead0002;
1029 ex_cb_state_t test_context_1
= {0xdeadbeef};
1030 ex_cb_state_t test_context_2
= {0xdeadbeef};
1031 ex_cb_action_t action
;
1033 T_LOG("Testing Exception Callback.");
1035 T_LOG("Running registration test.");
1037 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1038 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST1 exception callback");
1039 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1040 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST2 exception callback");
1042 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1043 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST2 exception callback");
1044 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1045 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST1 exception callback");
1047 T_LOG("Running invocation test.");
1049 action
= ex_cb_invoke(EXCB_CLASS_TEST1
, far1
);
1050 T_ASSERT(EXCB_ACTION_RERUN
== action
, NULL
);
1051 T_ASSERT(far1
== test_context_1
.far
, NULL
);
1053 action
= ex_cb_invoke(EXCB_CLASS_TEST2
, far2
);
1054 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1055 T_ASSERT(far2
== test_context_2
.far
, NULL
);
1057 action
= ex_cb_invoke(EXCB_CLASS_TEST3
, 0);
1058 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1060 return KERN_SUCCESS
;
1064 #if __ARM_PAN_AVAILABLE__
1068 vm_offset_t priv_addr
= _COMM_PAGE_SIGNATURE
;
1070 T_LOG("Testing PAN.");
1072 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1074 pan_exception_level
= 0;
1075 pan_fault_value
= 0xDE;
1076 // convert priv_addr to one that is accessible from user mode
1077 pan_test_addr
= priv_addr
+ _COMM_HIGH_PAGE64_BASE_ADDRESS
-
1078 _COMM_PAGE_START_ADDRESS
;
1080 // Below should trigger a PAN exception as pan_test_addr is accessible
1082 // The exception handler, upon recognizing the fault address is pan_test_addr,
1083 // will disable PAN and rerun this instruction successfully
1084 T_ASSERT(*(char *)pan_test_addr
== *(char *)priv_addr
, NULL
);
1086 T_ASSERT(pan_exception_level
== 2, NULL
);
1088 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1090 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1092 pan_exception_level
= 0;
1093 pan_fault_value
= 0xAD;
1094 pan_ro_addr
= (vm_offset_t
) &pan_ro_value
;
1096 // Force a permission fault while PAN is disabled to make sure PAN is
1097 // re-enabled during the exception handler.
1098 *((volatile uint64_t*)pan_ro_addr
) = 0xFEEDFACECAFECAFE;
1100 T_ASSERT(pan_exception_level
== 2, NULL
);
1102 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1104 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1109 __builtin_arm_wsr("pan", 1);
1110 return KERN_SUCCESS
;
1118 return lt_test_locks();