2 * Copyright (c) 2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * Carnegie Mellon requests users of this software to return to
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
55 #include <mach_ldebug.h>
57 #define LOCK_PRIVATE 1
59 #include <kern/kalloc.h>
60 #include <kern/locks.h>
61 #include <kern/misc_protos.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/sched_prim.h>
66 #include <kern/debug.h>
68 #include <tests/xnupost.h>
71 #include <ddb/db_command.h>
72 #include <ddb/db_output.h>
73 #include <ddb/db_sym.h>
74 #include <ddb/db_print.h>
77 #include <sys/kdebug.h>
78 #include <sys/munge.h>
79 #include <machine/cpu_capabilities.h>
80 #include <arm/cpu_data_internal.h>
82 kern_return_t
arm64_lock_test(void);
83 kern_return_t
arm64_munger_test(void);
84 kern_return_t
ex_cb_test(void);
85 kern_return_t
arm64_pan_test(void);
87 // exception handler ignores this fault address during PAN test
88 #if __ARM_PAN_AVAILABLE__
89 const uint64_t pan_ro_value
= 0xFEEDB0B0DEADBEEF;
90 vm_offset_t pan_test_addr
= 0;
91 vm_offset_t pan_ro_addr
= 0;
92 volatile int pan_exception_level
= 0;
93 volatile char pan_fault_value
= 0;
96 #include <libkern/OSAtomic.h>
97 #define LOCK_TEST_ITERATIONS 50
98 static hw_lock_data_t lt_hw_lock
;
99 static lck_spin_t lt_lck_spin_t
;
100 static lck_mtx_t lt_mtx
;
101 static lck_rw_t lt_rwlock
;
102 static volatile uint32_t lt_counter
= 0;
103 static volatile int lt_spinvolatile
;
104 static volatile uint32_t lt_max_holders
= 0;
105 static volatile uint32_t lt_upgrade_holders
= 0;
106 static volatile uint32_t lt_max_upgrade_holders
= 0;
107 static volatile uint32_t lt_num_holders
= 0;
108 static volatile uint32_t lt_done_threads
;
109 static volatile uint32_t lt_target_done_threads
;
110 static volatile uint32_t lt_cpu_bind_id
= 0;
113 lt_note_another_blocking_lock_holder()
115 hw_lock_lock(<_hw_lock
);
117 lt_max_holders
= (lt_max_holders
< lt_num_holders
) ? lt_num_holders
: lt_max_holders
;
118 hw_lock_unlock(<_hw_lock
);
122 lt_note_blocking_lock_release()
124 hw_lock_lock(<_hw_lock
);
126 hw_lock_unlock(<_hw_lock
);
130 lt_spin_a_little_bit()
134 for (i
= 0; i
< 10000; i
++) {
140 lt_sleep_a_little_bit()
148 lck_mtx_lock(<_mtx
);
149 lt_note_another_blocking_lock_holder();
150 lt_sleep_a_little_bit();
152 lt_note_blocking_lock_release();
153 lck_mtx_unlock(<_mtx
);
157 lt_grab_mutex_with_try()
159 while(0 == lck_mtx_try_lock(<_mtx
));
160 lt_note_another_blocking_lock_holder();
161 lt_sleep_a_little_bit();
163 lt_note_blocking_lock_release();
164 lck_mtx_unlock(<_mtx
);
169 lt_grab_rw_exclusive()
171 lck_rw_lock_exclusive(<_rwlock
);
172 lt_note_another_blocking_lock_holder();
173 lt_sleep_a_little_bit();
175 lt_note_blocking_lock_release();
176 lck_rw_done(<_rwlock
);
180 lt_grab_rw_exclusive_with_try()
182 while(0 == lck_rw_try_lock_exclusive(<_rwlock
)) {
183 lt_sleep_a_little_bit();
186 lt_note_another_blocking_lock_holder();
187 lt_sleep_a_little_bit();
189 lt_note_blocking_lock_release();
190 lck_rw_done(<_rwlock
);
193 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
197 lck_rw_lock_shared(<_rwlock);
200 lt_note_another_blocking_lock_holder();
201 lt_sleep_a_little_bit();
202 lt_note_blocking_lock_release();
204 lck_rw_done(<_rwlock);
208 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
210 lt_grab_rw_shared_with_try()
212 while(0 == lck_rw_try_lock_shared(<_rwlock));
215 lt_note_another_blocking_lock_holder();
216 lt_sleep_a_little_bit();
217 lt_note_blocking_lock_release();
219 lck_rw_done(<_rwlock);
224 lt_upgrade_downgrade_rw()
226 boolean_t upgraded
, success
;
228 success
= lck_rw_try_lock_shared(<_rwlock
);
230 lck_rw_lock_shared(<_rwlock
);
233 lt_note_another_blocking_lock_holder();
234 lt_sleep_a_little_bit();
235 lt_note_blocking_lock_release();
237 upgraded
= lck_rw_lock_shared_to_exclusive(<_rwlock
);
239 success
= lck_rw_try_lock_exclusive(<_rwlock
);
242 lck_rw_lock_exclusive(<_rwlock
);
246 lt_upgrade_holders
++;
247 if (lt_upgrade_holders
> lt_max_upgrade_holders
) {
248 lt_max_upgrade_holders
= lt_upgrade_holders
;
252 lt_sleep_a_little_bit();
254 lt_upgrade_holders
--;
256 lck_rw_lock_exclusive_to_shared(<_rwlock
);
258 lt_spin_a_little_bit();
259 lck_rw_done(<_rwlock
);
262 const int limit
= 1000000;
263 static int lt_stress_local_counters
[MAX_CPUS
];
268 int local_counter
= 0;
270 uint cpuid
= current_processor()->cpu_id
;
272 kprintf("%s>cpu %d starting\n", __FUNCTION__
, cpuid
);
274 hw_lock_lock(<_hw_lock
);
277 hw_lock_unlock(<_hw_lock
);
279 while (lt_counter
< lt_target_done_threads
) {
283 kprintf("%s>cpu %d started\n", __FUNCTION__
, cpuid
);
285 while (lt_counter
< limit
) {
286 spl_t s
= splsched();
287 hw_lock_lock(<_hw_lock
);
288 if (lt_counter
< limit
) {
292 hw_lock_unlock(<_hw_lock
);
296 lt_stress_local_counters
[cpuid
] = local_counter
;
298 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__
, lt_counter
, cpuid
, local_counter
);
304 hw_lock_lock(<_hw_lock
);
306 lt_spin_a_little_bit();
307 hw_lock_unlock(<_hw_lock
);
311 lt_grab_hw_lock_with_try()
313 while(0 == hw_lock_try(<_hw_lock
));
315 lt_spin_a_little_bit();
316 hw_lock_unlock(<_hw_lock
);
320 lt_grab_hw_lock_with_to()
322 while(0 == hw_lock_to(<_hw_lock
, LockTimeOut
))
323 mp_enable_preemption();
325 lt_spin_a_little_bit();
326 hw_lock_unlock(<_hw_lock
);
332 lck_spin_lock(<_lck_spin_t
);
334 lt_spin_a_little_bit();
335 lck_spin_unlock(<_lck_spin_t
);
339 lt_grab_spin_lock_with_try()
341 while(0 == lck_spin_try_lock(<_lck_spin_t
));
343 lt_spin_a_little_bit();
344 lck_spin_unlock(<_lck_spin_t
);
347 static volatile boolean_t lt_thread_lock_grabbed
;
348 static volatile boolean_t lt_thread_lock_success
;
356 lt_max_upgrade_holders
= 0;
357 lt_upgrade_holders
= 0;
359 lt_target_done_threads
= 0;
366 lt_trylock_hw_lock_with_to()
369 while (!lt_thread_lock_grabbed
) {
370 lt_sleep_a_little_bit();
373 lt_thread_lock_success
= hw_lock_to(<_hw_lock
, 100);
375 mp_enable_preemption();
379 lt_trylock_spin_try_lock()
382 while (!lt_thread_lock_grabbed
) {
383 lt_sleep_a_little_bit();
386 lt_thread_lock_success
= lck_spin_try_lock(<_lck_spin_t
);
391 lt_trylock_thread(void *arg
, wait_result_t wres __unused
)
393 void (*func
)(void) = (void(*)(void))arg
;
397 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
401 lt_start_trylock_thread(thread_continue_t func
)
406 kr
= kernel_thread_start(lt_trylock_thread
, func
, &thread
);
407 assert(kr
== KERN_SUCCESS
);
409 thread_deallocate(thread
);
413 lt_wait_for_lock_test_threads()
416 /* Spin to reduce dependencies */
417 while (lt_done_threads
< lt_target_done_threads
) {
418 lt_sleep_a_little_bit();
428 extern unsigned int real_ncpus
;
431 * First mtx try lock succeeds, second fails.
433 success
= lck_mtx_try_lock(<_mtx
);
434 T_ASSERT_NOTNULL(success
, "First mtx try lock");
435 success
= lck_mtx_try_lock(<_mtx
);
436 T_ASSERT_NULL(success
, "Second mtx try lock for a locked mtx");
437 lck_mtx_unlock(<_mtx
);
440 * After regular grab, can't try lock.
442 lck_mtx_lock(<_mtx
);
443 success
= lck_mtx_try_lock(<_mtx
);
444 T_ASSERT_NULL(success
, "try lock should fail after regular lck_mtx_lock");
445 lck_mtx_unlock(<_mtx
);
448 * Two shared try locks on a previously unheld rwlock suceed, and a
449 * subsequent exclusive attempt fails.
451 success
= lck_rw_try_lock_shared(<_rwlock
);
452 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
453 success
= lck_rw_try_lock_shared(<_rwlock
);
454 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
455 success
= lck_rw_try_lock_exclusive(<_rwlock
);
456 T_ASSERT_NULL(success
, "exclusive lock attempt on previously held lock should fail");
457 lck_rw_done(<_rwlock
);
458 lck_rw_done(<_rwlock
);
461 * After regular shared grab, can trylock
462 * for shared but not for exclusive.
464 lck_rw_lock_shared(<_rwlock
);
465 success
= lck_rw_try_lock_shared(<_rwlock
);
466 T_ASSERT_NOTNULL(success
, "After regular shared grab another shared try lock should succeed.");
467 success
= lck_rw_try_lock_exclusive(<_rwlock
);
468 T_ASSERT_NULL(success
, "After regular shared grab an exclusive lock attempt should fail.");
469 lck_rw_done(<_rwlock
);
470 lck_rw_done(<_rwlock
);
473 * An exclusive try lock succeeds, subsequent shared and exclusive
476 success
= lck_rw_try_lock_exclusive(<_rwlock
);
477 T_ASSERT_NOTNULL(success
, "An exclusive try lock should succeed");
478 success
= lck_rw_try_lock_shared(<_rwlock
);
479 T_ASSERT_NULL(success
, "try lock in shared mode attempt after an exclusive grab should fail");
480 success
= lck_rw_try_lock_exclusive(<_rwlock
);
481 T_ASSERT_NULL(success
, "try lock in exclusive mode attempt after an exclusive grab should fail");
482 lck_rw_done(<_rwlock
);
485 * After regular exclusive grab, neither kind of trylock succeeds.
487 lck_rw_lock_exclusive(<_rwlock
);
488 success
= lck_rw_try_lock_shared(<_rwlock
);
489 T_ASSERT_NULL(success
, "After regular exclusive grab, shared trylock should not succeed");
490 success
= lck_rw_try_lock_exclusive(<_rwlock
);
491 T_ASSERT_NULL(success
, "After regular exclusive grab, exclusive trylock should not succeed");
492 lck_rw_done(<_rwlock
);
495 * First spin lock attempts succeed, second attempts fail.
497 success
= hw_lock_try(<_hw_lock
);
498 T_ASSERT_NOTNULL(success
, "First spin lock attempts should succeed");
499 success
= hw_lock_try(<_hw_lock
);
500 T_ASSERT_NULL(success
, "Second attempt to spin lock should fail");
501 hw_lock_unlock(<_hw_lock
);
503 hw_lock_lock(<_hw_lock
);
504 success
= hw_lock_try(<_hw_lock
);
505 T_ASSERT_NULL(success
, "After taking spin lock, trylock attempt should fail");
506 hw_lock_unlock(<_hw_lock
);
509 lt_thread_lock_grabbed
= false;
510 lt_thread_lock_success
= true;
511 lt_target_done_threads
= 1;
513 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
514 success
= hw_lock_to(<_hw_lock
, 100);
515 T_ASSERT_NOTNULL(success
, "First spin lock with timeout should succeed");
516 if (real_ncpus
== 1) {
517 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
519 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
520 lt_wait_for_lock_test_threads();
521 T_ASSERT_NULL(lt_thread_lock_success
, "Second spin lock with timeout should fail and timeout");
522 if (real_ncpus
== 1) {
523 mp_disable_preemption(); /* don't double-enable when we unlock */
525 hw_lock_unlock(<_hw_lock
);
528 lt_thread_lock_grabbed
= false;
529 lt_thread_lock_success
= true;
530 lt_target_done_threads
= 1;
532 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
533 hw_lock_lock(<_hw_lock
);
534 if (real_ncpus
== 1) {
535 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
537 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
538 lt_wait_for_lock_test_threads();
539 T_ASSERT_NULL(lt_thread_lock_success
, "after taking a spin lock, lock attempt with timeout should fail");
540 if (real_ncpus
== 1) {
541 mp_disable_preemption(); /* don't double-enable when we unlock */
543 hw_lock_unlock(<_hw_lock
);
545 success
= lck_spin_try_lock(<_lck_spin_t
);
546 T_ASSERT_NOTNULL(success
, "spin trylock of previously unheld lock should succeed");
547 success
= lck_spin_try_lock(<_lck_spin_t
);
548 T_ASSERT_NULL(success
, "spin trylock attempt of previously held lock (with trylock) should fail");
549 lck_spin_unlock(<_lck_spin_t
);
552 lt_thread_lock_grabbed
= false;
553 lt_thread_lock_success
= true;
554 lt_target_done_threads
= 1;
555 lt_start_trylock_thread(lt_trylock_spin_try_lock
);
556 lck_spin_lock(<_lck_spin_t
);
557 if (real_ncpus
== 1) {
558 mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
560 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
561 lt_wait_for_lock_test_threads();
562 T_ASSERT_NULL(lt_thread_lock_success
, "spin trylock attempt of previously held lock should fail");
563 if (real_ncpus
== 1) {
564 mp_disable_preemption(); /* don't double-enable when we unlock */
566 lck_spin_unlock(<_lck_spin_t
);
572 lt_thread(void *arg
, wait_result_t wres __unused
)
574 void (*func
)(void) = (void(*)(void)) arg
;
577 for (i
= 0; i
< LOCK_TEST_ITERATIONS
; i
++) {
581 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
585 lt_bound_thread(void *arg
, wait_result_t wres __unused
)
587 void (*func
)(void) = (void(*)(void)) arg
;
589 int cpuid
= OSIncrementAtomic((volatile SInt32
*)<_cpu_bind_id
);
591 processor_t processor
= processor_list
;
592 while ((processor
!= NULL
) && (processor
->cpu_id
!= cpuid
)) {
593 processor
= processor
->processor_list
;
596 if (processor
!= NULL
) {
597 thread_bind(processor
);
600 thread_block(THREAD_CONTINUE_NULL
);
604 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
608 lt_start_lock_thread(thread_continue_t func
)
613 kr
= kernel_thread_start(lt_thread
, func
, &thread
);
614 assert(kr
== KERN_SUCCESS
);
616 thread_deallocate(thread
);
621 lt_start_lock_thread_bound(thread_continue_t func
)
626 kr
= kernel_thread_start(lt_bound_thread
, func
, &thread
);
627 assert(kr
== KERN_SUCCESS
);
629 thread_deallocate(thread
);
635 kern_return_t kr
= KERN_SUCCESS
;
636 lck_grp_attr_t
*lga
= lck_grp_attr_alloc_init();
637 lck_grp_t
*lg
= lck_grp_alloc_init("lock test", lga
);
639 lck_mtx_init(<_mtx
, lg
, LCK_ATTR_NULL
);
640 lck_rw_init(<_rwlock
, lg
, LCK_ATTR_NULL
);
641 lck_spin_init(<_lck_spin_t
, lg
, LCK_ATTR_NULL
);
642 hw_lock_init(<_hw_lock
);
644 T_LOG("Testing locks.");
646 /* Try locks (custom) */
649 T_LOG("Running try lock test.");
650 kr
= lt_test_trylocks();
651 T_EXPECT_NULL(kr
, "try lock test failed.");
653 /* Uncontended mutex */
654 T_LOG("Running uncontended mutex test.");
656 lt_target_done_threads
= 1;
657 lt_start_lock_thread(lt_grab_mutex
);
658 lt_wait_for_lock_test_threads();
659 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
660 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
662 /* Contended mutex:try locks*/
663 T_LOG("Running contended mutex test.");
665 lt_target_done_threads
= 3;
666 lt_start_lock_thread(lt_grab_mutex
);
667 lt_start_lock_thread(lt_grab_mutex
);
668 lt_start_lock_thread(lt_grab_mutex
);
669 lt_wait_for_lock_test_threads();
670 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
671 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
673 /* Contended mutex: try locks*/
674 T_LOG("Running contended mutex trylock test.");
676 lt_target_done_threads
= 3;
677 lt_start_lock_thread(lt_grab_mutex_with_try
);
678 lt_start_lock_thread(lt_grab_mutex_with_try
);
679 lt_start_lock_thread(lt_grab_mutex_with_try
);
680 lt_wait_for_lock_test_threads();
681 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
682 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
684 /* Uncontended exclusive rwlock */
685 T_LOG("Running uncontended exclusive rwlock test.");
687 lt_target_done_threads
= 1;
688 lt_start_lock_thread(lt_grab_rw_exclusive
);
689 lt_wait_for_lock_test_threads();
690 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
691 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
693 /* Uncontended shared rwlock */
695 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
696 T_LOG("Running uncontended shared rwlock test.");
698 lt_target_done_threads = 1;
699 lt_start_lock_thread(lt_grab_rw_shared);
700 lt_wait_for_lock_test_threads();
701 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
702 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
705 /* Contended exclusive rwlock */
706 T_LOG("Running contended exclusive rwlock test.");
708 lt_target_done_threads
= 3;
709 lt_start_lock_thread(lt_grab_rw_exclusive
);
710 lt_start_lock_thread(lt_grab_rw_exclusive
);
711 lt_start_lock_thread(lt_grab_rw_exclusive
);
712 lt_wait_for_lock_test_threads();
713 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
714 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
716 /* One shared, two exclusive */
717 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
718 T_LOG("Running test with one shared and two exclusive rw lock threads.");
720 lt_target_done_threads = 3;
721 lt_start_lock_thread(lt_grab_rw_shared);
722 lt_start_lock_thread(lt_grab_rw_exclusive);
723 lt_start_lock_thread(lt_grab_rw_exclusive);
724 lt_wait_for_lock_test_threads();
725 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
726 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
730 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
731 T_LOG("Running test with four shared holders.");
733 lt_target_done_threads = 4;
734 lt_start_lock_thread(lt_grab_rw_shared);
735 lt_start_lock_thread(lt_grab_rw_shared);
736 lt_start_lock_thread(lt_grab_rw_shared);
737 lt_start_lock_thread(lt_grab_rw_shared);
738 lt_wait_for_lock_test_threads();
739 T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
742 /* Three doing upgrades and downgrades */
743 T_LOG("Running test with threads upgrading and downgrading.");
745 lt_target_done_threads
= 3;
746 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
747 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
748 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
749 lt_wait_for_lock_test_threads();
750 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
751 T_EXPECT_LE_UINT(lt_max_holders
, 3, NULL
);
752 T_EXPECT_EQ_UINT(lt_max_upgrade_holders
, 1, NULL
);
754 /* Uncontended - exclusive trylocks */
755 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
757 lt_target_done_threads
= 1;
758 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
759 lt_wait_for_lock_test_threads();
760 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
761 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
763 /* Uncontended - shared trylocks */
764 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
765 T_LOG("Running test with single thread doing shared rwlock trylocks.");
767 lt_target_done_threads = 1;
768 lt_start_lock_thread(lt_grab_rw_shared_with_try);
769 lt_wait_for_lock_test_threads();
770 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
771 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
774 /* Three doing exclusive trylocks */
775 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
777 lt_target_done_threads
= 3;
778 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
779 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
780 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
781 lt_wait_for_lock_test_threads();
782 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
783 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
785 /* Three doing shared trylocks */
786 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
787 T_LOG("Running test with threads doing shared rwlock trylocks.");
789 lt_target_done_threads = 3;
790 lt_start_lock_thread(lt_grab_rw_shared_with_try);
791 lt_start_lock_thread(lt_grab_rw_shared_with_try);
792 lt_start_lock_thread(lt_grab_rw_shared_with_try);
793 lt_wait_for_lock_test_threads();
794 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
795 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
798 /* Three doing various trylocks */
799 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
800 T_LOG("Running test with threads doing mixed rwlock trylocks.");
802 lt_target_done_threads = 4;
803 lt_start_lock_thread(lt_grab_rw_shared_with_try);
804 lt_start_lock_thread(lt_grab_rw_shared_with_try);
805 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
806 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
807 lt_wait_for_lock_test_threads();
808 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
809 T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
813 T_LOG("Running test with hw_lock_lock()");
815 lt_target_done_threads
= 3;
816 lt_start_lock_thread(lt_grab_hw_lock
);
817 lt_start_lock_thread(lt_grab_hw_lock
);
818 lt_start_lock_thread(lt_grab_hw_lock
);
819 lt_wait_for_lock_test_threads();
820 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
822 /* HW locks stress test */
823 T_LOG("Running HW locks stress test with hw_lock_lock()");
824 extern unsigned int real_ncpus
;
826 lt_target_done_threads
= real_ncpus
;
827 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
828 lt_start_lock_thread_bound(lt_stress_hw_lock
);
830 lt_wait_for_lock_test_threads();
831 bool starvation
= false;
832 uint total_local_count
= 0;
833 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
834 starvation
= starvation
|| (lt_stress_local_counters
[processor
->cpu_id
] < 10);
835 total_local_count
+= lt_stress_local_counters
[processor
->cpu_id
];
837 if (total_local_count
!= lt_counter
) {
838 T_FAIL("Lock failure\n");
839 } else if (starvation
) {
840 T_FAIL("Lock starvation found\n");
842 T_PASS("HW locks stress test with hw_lock_lock()");
846 /* HW locks: trylocks */
847 T_LOG("Running test with hw_lock_try()");
849 lt_target_done_threads
= 3;
850 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
851 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
852 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
853 lt_wait_for_lock_test_threads();
854 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
856 /* HW locks: with timeout */
857 T_LOG("Running test with hw_lock_to()");
859 lt_target_done_threads
= 3;
860 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
861 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
862 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
863 lt_wait_for_lock_test_threads();
864 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
867 T_LOG("Running test with lck_spin_lock()");
869 lt_target_done_threads
= 3;
870 lt_start_lock_thread(lt_grab_spin_lock
);
871 lt_start_lock_thread(lt_grab_spin_lock
);
872 lt_start_lock_thread(lt_grab_spin_lock
);
873 lt_wait_for_lock_test_threads();
874 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
876 /* Spin locks: trylocks */
877 T_LOG("Running test with lck_spin_try_lock()");
879 lt_target_done_threads
= 3;
880 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
881 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
882 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
883 lt_wait_for_lock_test_threads();
884 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
889 #define MT_MAX_ARGS 8
890 #define MT_INITIAL_VALUE 0xfeedbeef
891 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
892 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
893 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
895 typedef void (*sy_munge_t
)(void*);
897 #define MT_FUNC(x) #x, x
901 uint32_t mt_in_words
;
903 uint64_t mt_expected
[MT_MAX_ARGS
];
905 {MT_FUNC(munge_w
), 1, 1, {MT_W_VAL
}},
906 {MT_FUNC(munge_ww
), 2, 2, {MT_W_VAL
, MT_W_VAL
}},
907 {MT_FUNC(munge_www
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
908 {MT_FUNC(munge_wwww
), 4, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
909 {MT_FUNC(munge_wwwww
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
910 {MT_FUNC(munge_wwwwww
), 6, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
911 {MT_FUNC(munge_wwwwwww
), 7, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
912 {MT_FUNC(munge_wwwwwwww
), 8, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
913 {MT_FUNC(munge_wl
), 3, 2, {MT_W_VAL
, MT_L_VAL
}},
914 {MT_FUNC(munge_wwl
), 4, 3, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
915 {MT_FUNC(munge_wwlll
), 8, 5, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
916 {MT_FUNC(munge_wlw
), 4, 3, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
917 {MT_FUNC(munge_wlwwwll
), 10, 7, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
918 {MT_FUNC(munge_wlwwwllw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
919 {MT_FUNC(munge_wlwwlwlw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
920 {MT_FUNC(munge_wll
), 5, 3, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
921 {MT_FUNC(munge_wlll
), 7, 4, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
922 {MT_FUNC(munge_wllwwll
), 11, 7, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
923 {MT_FUNC(munge_wwwlw
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
924 {MT_FUNC(munge_wwwlww
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
925 {MT_FUNC(munge_wwwl
), 5, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
926 {MT_FUNC(munge_wwwwlw
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
927 {MT_FUNC(munge_wwwwl
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
928 {MT_FUNC(munge_wwwwwl
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
929 {MT_FUNC(munge_wwwwwlww
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
930 {MT_FUNC(munge_wwwwwllw
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
931 {MT_FUNC(munge_wwwwwlll
), 11, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
932 {MT_FUNC(munge_wwwwwwl
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
933 {MT_FUNC(munge_wwwwwwlw
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
934 {MT_FUNC(munge_wwwwwwll
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
935 {MT_FUNC(munge_wsw
), 3, 3, {MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
936 {MT_FUNC(munge_wws
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_S_VAL
}},
937 {MT_FUNC(munge_wwwsw
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
938 {MT_FUNC(munge_llllll
), 12, 6, {MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
939 {MT_FUNC(munge_l
), 2, 1, {MT_L_VAL
}},
940 {MT_FUNC(munge_lw
), 3, 2, {MT_L_VAL
, MT_W_VAL
}},
941 {MT_FUNC(munge_lwww
), 5, 4, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
942 {MT_FUNC(munge_lwwwwwww
), 9, 8, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
943 {MT_FUNC(munge_wlwwwl
), 8, 6, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
944 {MT_FUNC(munge_wwlwwwl
), 9, 7, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}}
947 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
950 mt_reset(uint32_t in_words
, size_t total_size
, uint32_t *data
)
954 for (i
= 0; i
< in_words
; i
++) {
955 data
[i
] = MT_INITIAL_VALUE
;
958 if (in_words
* sizeof(uint32_t) < total_size
) {
959 bzero(&data
[in_words
], total_size
- in_words
* sizeof(uint32_t));
966 uint64_t data
[MT_MAX_ARGS
];
969 for (i
= 0; i
< MT_TEST_COUNT
; i
++) {
970 struct munger_test
*test
= &munger_tests
[i
];
973 T_LOG("Testing %s", test
->mt_name
);
975 mt_reset(test
->mt_in_words
, sizeof(data
), (uint32_t*)data
);
978 for (j
= 0; j
< test
->mt_nout
; j
++) {
979 if (data
[j
] != test
->mt_expected
[j
]) {
980 T_FAIL("Index %d: expected %llx, got %llx.", j
, test
->mt_expected
[j
], data
[j
]);
985 T_PASS(test
->mt_name
);
990 /* Exception Callback Test */
991 static ex_cb_action_t
excb_test_action(
992 ex_cb_class_t cb_class
,
994 const ex_cb_state_t
*state
997 ex_cb_state_t
*context
= (ex_cb_state_t
*)refcon
;
999 if ((NULL
== refcon
) || (NULL
== state
))
1001 return EXCB_ACTION_TEST_FAIL
;
1004 context
->far
= state
->far
;
1008 case EXCB_CLASS_TEST1
:
1009 return EXCB_ACTION_RERUN
;
1010 case EXCB_CLASS_TEST2
:
1011 return EXCB_ACTION_NONE
;
1013 return EXCB_ACTION_TEST_FAIL
;
1021 const vm_offset_t far1
= 0xdead0001;
1022 const vm_offset_t far2
= 0xdead0002;
1024 ex_cb_state_t test_context_1
= {0xdeadbeef};
1025 ex_cb_state_t test_context_2
= {0xdeadbeef};
1026 ex_cb_action_t action
;
1028 T_LOG("Testing Exception Callback.");
1030 T_LOG("Running registration test.");
1032 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1033 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST1 exception callback");
1034 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1035 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST2 exception callback");
1037 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1038 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST2 exception callback");
1039 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1040 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST1 exception callback");
1042 T_LOG("Running invocation test.");
1044 action
= ex_cb_invoke(EXCB_CLASS_TEST1
, far1
);
1045 T_ASSERT(EXCB_ACTION_RERUN
== action
, NULL
);
1046 T_ASSERT(far1
== test_context_1
.far
, NULL
);
1048 action
= ex_cb_invoke(EXCB_CLASS_TEST2
, far2
);
1049 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1050 T_ASSERT(far2
== test_context_2
.far
, NULL
);
1052 action
= ex_cb_invoke(EXCB_CLASS_TEST3
, 0);
1053 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1055 return KERN_SUCCESS
;
1058 #if __ARM_PAN_AVAILABLE__
1062 vm_offset_t priv_addr
= _COMM_PAGE_SIGNATURE
;
1064 T_LOG("Testing PAN.");
1066 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1068 pan_exception_level
= 0;
1069 pan_fault_value
= 0xDE;
1070 // convert priv_addr to one that is accessible from user mode
1071 pan_test_addr
= priv_addr
+ _COMM_HIGH_PAGE64_BASE_ADDRESS
-
1072 _COMM_PAGE_START_ADDRESS
;
1074 // Below should trigger a PAN exception as pan_test_addr is accessible
1076 // The exception handler, upon recognizing the fault address is pan_test_addr,
1077 // will disable PAN and rerun this instruction successfully
1078 T_ASSERT(*(char *)pan_test_addr
== *(char *)priv_addr
, NULL
);
1080 T_ASSERT(pan_exception_level
== 2, NULL
);
1082 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1084 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1086 pan_exception_level
= 0;
1087 pan_fault_value
= 0xAD;
1088 pan_ro_addr
= (vm_offset_t
) &pan_ro_value
;
1090 // Force a permission fault while PAN is disabled to make sure PAN is
1091 // re-enabled during the exception handler.
1092 *((volatile uint64_t*)pan_ro_addr
) = 0xFEEDFACECAFECAFE;
1094 T_ASSERT(pan_exception_level
== 2, NULL
);
1096 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1098 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1103 __builtin_arm_wsr("pan", 1);
1104 return KERN_SUCCESS
;
1112 return lt_test_locks();