2 * Copyright (c) 2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie
33 * Mellon University All Rights Reserved.
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright notice
37 * and this permission notice appear in all copies of the software,
38 * derivative works or modified versions, and any portions thereof, and that
39 * both notices appear in supporting documentation.
41 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.
42 * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
43 * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 * Carnegie Mellon requests users of this software to return to
47 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
48 * School of Computer Science Carnegie Mellon University Pittsburgh PA
51 * any improvements or extensions that they make and grant Carnegie Mellon the
52 * rights to redistribute these changes.
55 #include <mach_ldebug.h>
57 #define LOCK_PRIVATE 1
59 #include <kern/kalloc.h>
60 #include <kern/locks.h>
61 #include <kern/misc_protos.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/sched_prim.h>
66 #include <kern/debug.h>
68 #include <tests/xnupost.h>
71 #include <ddb/db_command.h>
72 #include <ddb/db_output.h>
73 #include <ddb/db_sym.h>
74 #include <ddb/db_print.h>
77 #include <sys/kdebug.h>
78 #include <sys/munge.h>
79 #include <machine/cpu_capabilities.h>
80 #include <arm/cpu_data_internal.h>
82 kern_return_t
arm64_lock_test(void);
83 kern_return_t
arm64_munger_test(void);
84 kern_return_t
ex_cb_test(void);
85 kern_return_t
arm64_pan_test(void);
87 // exception handler ignores this fault address during PAN test
88 #if __ARM_PAN_AVAILABLE__
89 const uint64_t pan_ro_value
= 0xFEEDB0B0DEADBEEF;
90 vm_offset_t pan_test_addr
= 0;
91 vm_offset_t pan_ro_addr
= 0;
92 volatile int pan_exception_level
= 0;
93 volatile char pan_fault_value
= 0;
96 #include <libkern/OSAtomic.h>
97 #define LOCK_TEST_ITERATIONS 50
98 static hw_lock_data_t lt_hw_lock
;
99 static lck_spin_t lt_lck_spin_t
;
100 static lck_mtx_t lt_mtx
;
101 static lck_rw_t lt_rwlock
;
102 static volatile uint32_t lt_counter
= 0;
103 static volatile int lt_spinvolatile
;
104 static volatile uint32_t lt_max_holders
= 0;
105 static volatile uint32_t lt_upgrade_holders
= 0;
106 static volatile uint32_t lt_max_upgrade_holders
= 0;
107 static volatile uint32_t lt_num_holders
= 0;
108 static volatile uint32_t lt_done_threads
;
109 static volatile uint32_t lt_target_done_threads
;
110 static volatile uint32_t lt_cpu_bind_id
= 0;
113 lt_note_another_blocking_lock_holder()
115 hw_lock_lock(<_hw_lock
);
117 lt_max_holders
= (lt_max_holders
< lt_num_holders
) ? lt_num_holders
: lt_max_holders
;
118 hw_lock_unlock(<_hw_lock
);
122 lt_note_blocking_lock_release()
124 hw_lock_lock(<_hw_lock
);
126 hw_lock_unlock(<_hw_lock
);
130 lt_spin_a_little_bit()
134 for (i
= 0; i
< 10000; i
++) {
140 lt_sleep_a_little_bit()
148 lck_mtx_lock(<_mtx
);
149 lt_note_another_blocking_lock_holder();
150 lt_sleep_a_little_bit();
152 lt_note_blocking_lock_release();
153 lck_mtx_unlock(<_mtx
);
157 lt_grab_mutex_with_try()
159 while(0 == lck_mtx_try_lock(<_mtx
));
160 lt_note_another_blocking_lock_holder();
161 lt_sleep_a_little_bit();
163 lt_note_blocking_lock_release();
164 lck_mtx_unlock(<_mtx
);
169 lt_grab_rw_exclusive()
171 lck_rw_lock_exclusive(<_rwlock
);
172 lt_note_another_blocking_lock_holder();
173 lt_sleep_a_little_bit();
175 lt_note_blocking_lock_release();
176 lck_rw_done(<_rwlock
);
180 lt_grab_rw_exclusive_with_try()
182 while(0 == lck_rw_try_lock_exclusive(<_rwlock
)) {
183 lt_sleep_a_little_bit();
186 lt_note_another_blocking_lock_holder();
187 lt_sleep_a_little_bit();
189 lt_note_blocking_lock_release();
190 lck_rw_done(<_rwlock
);
193 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
197 lck_rw_lock_shared(<_rwlock);
200 lt_note_another_blocking_lock_holder();
201 lt_sleep_a_little_bit();
202 lt_note_blocking_lock_release();
204 lck_rw_done(<_rwlock);
208 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
210 lt_grab_rw_shared_with_try()
212 while(0 == lck_rw_try_lock_shared(<_rwlock));
215 lt_note_another_blocking_lock_holder();
216 lt_sleep_a_little_bit();
217 lt_note_blocking_lock_release();
219 lck_rw_done(<_rwlock);
224 lt_upgrade_downgrade_rw()
226 boolean_t upgraded
, success
;
228 success
= lck_rw_try_lock_shared(<_rwlock
);
230 lck_rw_lock_shared(<_rwlock
);
233 lt_note_another_blocking_lock_holder();
234 lt_sleep_a_little_bit();
235 lt_note_blocking_lock_release();
237 upgraded
= lck_rw_lock_shared_to_exclusive(<_rwlock
);
239 success
= lck_rw_try_lock_exclusive(<_rwlock
);
242 lck_rw_lock_exclusive(<_rwlock
);
246 lt_upgrade_holders
++;
247 if (lt_upgrade_holders
> lt_max_upgrade_holders
) {
248 lt_max_upgrade_holders
= lt_upgrade_holders
;
252 lt_sleep_a_little_bit();
254 lt_upgrade_holders
--;
256 lck_rw_lock_exclusive_to_shared(<_rwlock
);
258 lt_spin_a_little_bit();
259 lck_rw_done(<_rwlock
);
262 const int limit
= 1000000;
263 static int lt_stress_local_counters
[MAX_CPUS
];
268 int local_counter
= 0;
270 uint cpuid
= current_processor()->cpu_id
;
272 kprintf("%s>cpu %d starting\n", __FUNCTION__
, cpuid
);
274 hw_lock_lock(<_hw_lock
);
277 hw_lock_unlock(<_hw_lock
);
279 while (lt_counter
< lt_target_done_threads
) {
283 kprintf("%s>cpu %d started\n", __FUNCTION__
, cpuid
);
285 while (lt_counter
< limit
) {
286 spl_t s
= splsched();
287 hw_lock_lock(<_hw_lock
);
288 if (lt_counter
< limit
) {
292 hw_lock_unlock(<_hw_lock
);
296 lt_stress_local_counters
[cpuid
] = local_counter
;
298 kprintf("%s>final counter %d cpu %d incremented the counter %d times\n", __FUNCTION__
, lt_counter
, cpuid
, local_counter
);
304 hw_lock_lock(<_hw_lock
);
306 lt_spin_a_little_bit();
307 hw_lock_unlock(<_hw_lock
);
311 lt_grab_hw_lock_with_try()
313 while(0 == hw_lock_try(<_hw_lock
));
315 lt_spin_a_little_bit();
316 hw_lock_unlock(<_hw_lock
);
320 lt_grab_hw_lock_with_to()
322 while(0 == hw_lock_to(<_hw_lock
, LockTimeOut
))
323 mp_enable_preemption();
325 lt_spin_a_little_bit();
326 hw_lock_unlock(<_hw_lock
);
332 lck_spin_lock(<_lck_spin_t
);
334 lt_spin_a_little_bit();
335 lck_spin_unlock(<_lck_spin_t
);
339 lt_grab_spin_lock_with_try()
341 while(0 == lck_spin_try_lock(<_lck_spin_t
));
343 lt_spin_a_little_bit();
344 lck_spin_unlock(<_lck_spin_t
);
347 static volatile boolean_t lt_thread_lock_grabbed
;
348 static volatile boolean_t lt_thread_lock_success
;
356 lt_max_upgrade_holders
= 0;
357 lt_upgrade_holders
= 0;
359 lt_target_done_threads
= 0;
366 lt_trylock_hw_lock_with_to()
369 while (!lt_thread_lock_grabbed
) {
370 lt_sleep_a_little_bit();
373 lt_thread_lock_success
= hw_lock_to(<_hw_lock
, 100);
375 mp_enable_preemption();
379 lt_trylock_spin_try_lock()
382 while (!lt_thread_lock_grabbed
) {
383 lt_sleep_a_little_bit();
386 lt_thread_lock_success
= lck_spin_try_lock(<_lck_spin_t
);
391 lt_trylock_thread(void *arg
, wait_result_t wres __unused
)
393 void (*func
)(void) = (void(*)(void))arg
;
397 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
401 lt_start_trylock_thread(thread_continue_t func
)
406 kr
= kernel_thread_start(lt_trylock_thread
, func
, &thread
);
407 assert(kr
== KERN_SUCCESS
);
409 thread_deallocate(thread
);
413 lt_wait_for_lock_test_threads()
416 /* Spin to reduce dependencies */
417 while (lt_done_threads
< lt_target_done_threads
) {
418 lt_sleep_a_little_bit();
430 * First mtx try lock succeeds, second fails.
432 success
= lck_mtx_try_lock(<_mtx
);
433 T_ASSERT_NOTNULL(success
, "First mtx try lock");
434 success
= lck_mtx_try_lock(<_mtx
);
435 T_ASSERT_NULL(success
, "Second mtx try lock for a locked mtx");
436 lck_mtx_unlock(<_mtx
);
439 * After regular grab, can't try lock.
441 lck_mtx_lock(<_mtx
);
442 success
= lck_mtx_try_lock(<_mtx
);
443 T_ASSERT_NULL(success
, "try lock should fail after regular lck_mtx_lock");
444 lck_mtx_unlock(<_mtx
);
447 * Two shared try locks on a previously unheld rwlock suceed, and a
448 * subsequent exclusive attempt fails.
450 success
= lck_rw_try_lock_shared(<_rwlock
);
451 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
452 success
= lck_rw_try_lock_shared(<_rwlock
);
453 T_ASSERT_NOTNULL(success
, "Two shared try locks on a previously unheld rwlock should succeed");
454 success
= lck_rw_try_lock_exclusive(<_rwlock
);
455 T_ASSERT_NULL(success
, "exclusive lock attempt on previously held lock should fail");
456 lck_rw_done(<_rwlock
);
457 lck_rw_done(<_rwlock
);
460 * After regular shared grab, can trylock
461 * for shared but not for exclusive.
463 lck_rw_lock_shared(<_rwlock
);
464 success
= lck_rw_try_lock_shared(<_rwlock
);
465 T_ASSERT_NOTNULL(success
, "After regular shared grab another shared try lock should succeed.");
466 success
= lck_rw_try_lock_exclusive(<_rwlock
);
467 T_ASSERT_NULL(success
, "After regular shared grab an exclusive lock attempt should fail.");
468 lck_rw_done(<_rwlock
);
469 lck_rw_done(<_rwlock
);
472 * An exclusive try lock succeeds, subsequent shared and exclusive
475 success
= lck_rw_try_lock_exclusive(<_rwlock
);
476 T_ASSERT_NOTNULL(success
, "An exclusive try lock should succeed");
477 success
= lck_rw_try_lock_shared(<_rwlock
);
478 T_ASSERT_NULL(success
, "try lock in shared mode attempt after an exclusive grab should fail");
479 success
= lck_rw_try_lock_exclusive(<_rwlock
);
480 T_ASSERT_NULL(success
, "try lock in exclusive mode attempt after an exclusive grab should fail");
481 lck_rw_done(<_rwlock
);
484 * After regular exclusive grab, neither kind of trylock succeeds.
486 lck_rw_lock_exclusive(<_rwlock
);
487 success
= lck_rw_try_lock_shared(<_rwlock
);
488 T_ASSERT_NULL(success
, "After regular exclusive grab, shared trylock should not succeed");
489 success
= lck_rw_try_lock_exclusive(<_rwlock
);
490 T_ASSERT_NULL(success
, "After regular exclusive grab, exclusive trylock should not succeed");
491 lck_rw_done(<_rwlock
);
494 * First spin lock attempts succeed, second attempts fail.
496 success
= hw_lock_try(<_hw_lock
);
497 T_ASSERT_NOTNULL(success
, "First spin lock attempts should succeed");
498 success
= hw_lock_try(<_hw_lock
);
499 T_ASSERT_NULL(success
, "Second attempt to spin lock should fail");
500 hw_lock_unlock(<_hw_lock
);
502 hw_lock_lock(<_hw_lock
);
503 success
= hw_lock_try(<_hw_lock
);
504 T_ASSERT_NULL(success
, "After taking spin lock, trylock attempt should fail");
505 hw_lock_unlock(<_hw_lock
);
508 lt_thread_lock_grabbed
= false;
509 lt_thread_lock_success
= true;
510 lt_target_done_threads
= 1;
512 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
513 success
= hw_lock_to(<_hw_lock
, 100);
514 T_ASSERT_NOTNULL(success
, "First spin lock with timeout should succeed");
515 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
516 lt_wait_for_lock_test_threads();
517 T_ASSERT_NULL(lt_thread_lock_success
, "Second spin lock with timeout should fail and timeout");
518 hw_lock_unlock(<_hw_lock
);
521 lt_thread_lock_grabbed
= false;
522 lt_thread_lock_success
= true;
523 lt_target_done_threads
= 1;
525 lt_start_trylock_thread(lt_trylock_hw_lock_with_to
);
526 hw_lock_lock(<_hw_lock
);
527 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
528 lt_wait_for_lock_test_threads();
529 T_ASSERT_NULL(lt_thread_lock_success
, "after taking a spin lock, lock attempt with timeout should fail");
530 hw_lock_unlock(<_hw_lock
);
532 success
= lck_spin_try_lock(<_lck_spin_t
);
533 T_ASSERT_NOTNULL(success
, "spin trylock of previously unheld lock should succeed");
534 success
= lck_spin_try_lock(<_lck_spin_t
);
535 T_ASSERT_NULL(success
, "spin trylock attempt of previously held lock (with trylock) should fail");
536 lck_spin_unlock(<_lck_spin_t
);
539 lt_thread_lock_grabbed
= false;
540 lt_thread_lock_success
= true;
541 lt_target_done_threads
= 1;
542 lt_start_trylock_thread(lt_trylock_spin_try_lock
);
543 lck_spin_lock(<_lck_spin_t
);
544 OSIncrementAtomic((volatile SInt32
*)<_thread_lock_grabbed
);
545 lt_wait_for_lock_test_threads();
546 T_ASSERT_NULL(lt_thread_lock_success
, "spin trylock attempt of previously held lock should fail");
547 lck_spin_unlock(<_lck_spin_t
);
553 lt_thread(void *arg
, wait_result_t wres __unused
)
555 void (*func
)(void) = (void(*)(void)) arg
;
558 for (i
= 0; i
< LOCK_TEST_ITERATIONS
; i
++) {
562 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
566 lt_bound_thread(void *arg
, wait_result_t wres __unused
)
568 void (*func
)(void) = (void(*)(void)) arg
;
570 int cpuid
= OSIncrementAtomic((volatile SInt32
*)<_cpu_bind_id
);
572 processor_t processor
= processor_list
;
573 while ((processor
!= NULL
) && (processor
->cpu_id
!= cpuid
)) {
574 processor
= processor
->processor_list
;
577 if (processor
!= NULL
) {
578 thread_bind(processor
);
581 thread_block(THREAD_CONTINUE_NULL
);
585 OSIncrementAtomic((volatile SInt32
*) <_done_threads
);
589 lt_start_lock_thread(thread_continue_t func
)
594 kr
= kernel_thread_start(lt_thread
, func
, &thread
);
595 assert(kr
== KERN_SUCCESS
);
597 thread_deallocate(thread
);
602 lt_start_lock_thread_bound(thread_continue_t func
)
607 kr
= kernel_thread_start(lt_bound_thread
, func
, &thread
);
608 assert(kr
== KERN_SUCCESS
);
610 thread_deallocate(thread
);
616 kern_return_t kr
= KERN_SUCCESS
;
617 lck_grp_attr_t
*lga
= lck_grp_attr_alloc_init();
618 lck_grp_t
*lg
= lck_grp_alloc_init("lock test", lga
);
620 lck_mtx_init(<_mtx
, lg
, LCK_ATTR_NULL
);
621 lck_rw_init(<_rwlock
, lg
, LCK_ATTR_NULL
);
622 lck_spin_init(<_lck_spin_t
, lg
, LCK_ATTR_NULL
);
623 hw_lock_init(<_hw_lock
);
625 T_LOG("Testing locks.");
627 /* Try locks (custom) */
630 T_LOG("Running try lock test.");
631 kr
= lt_test_trylocks();
632 T_EXPECT_NULL(kr
, "try lock test failed.");
634 /* Uncontended mutex */
635 T_LOG("Running uncontended mutex test.");
637 lt_target_done_threads
= 1;
638 lt_start_lock_thread(lt_grab_mutex
);
639 lt_wait_for_lock_test_threads();
640 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
641 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
643 /* Contended mutex:try locks*/
644 T_LOG("Running contended mutex test.");
646 lt_target_done_threads
= 3;
647 lt_start_lock_thread(lt_grab_mutex
);
648 lt_start_lock_thread(lt_grab_mutex
);
649 lt_start_lock_thread(lt_grab_mutex
);
650 lt_wait_for_lock_test_threads();
651 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
652 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
654 /* Contended mutex: try locks*/
655 T_LOG("Running contended mutex trylock test.");
657 lt_target_done_threads
= 3;
658 lt_start_lock_thread(lt_grab_mutex_with_try
);
659 lt_start_lock_thread(lt_grab_mutex_with_try
);
660 lt_start_lock_thread(lt_grab_mutex_with_try
);
661 lt_wait_for_lock_test_threads();
662 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
663 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
665 /* Uncontended exclusive rwlock */
666 T_LOG("Running uncontended exclusive rwlock test.");
668 lt_target_done_threads
= 1;
669 lt_start_lock_thread(lt_grab_rw_exclusive
);
670 lt_wait_for_lock_test_threads();
671 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
672 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
674 /* Uncontended shared rwlock */
676 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
677 T_LOG("Running uncontended shared rwlock test.");
679 lt_target_done_threads = 1;
680 lt_start_lock_thread(lt_grab_rw_shared);
681 lt_wait_for_lock_test_threads();
682 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
683 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
686 /* Contended exclusive rwlock */
687 T_LOG("Running contended exclusive rwlock test.");
689 lt_target_done_threads
= 3;
690 lt_start_lock_thread(lt_grab_rw_exclusive
);
691 lt_start_lock_thread(lt_grab_rw_exclusive
);
692 lt_start_lock_thread(lt_grab_rw_exclusive
);
693 lt_wait_for_lock_test_threads();
694 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
695 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
697 /* One shared, two exclusive */
698 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
699 T_LOG("Running test with one shared and two exclusive rw lock threads.");
701 lt_target_done_threads = 3;
702 lt_start_lock_thread(lt_grab_rw_shared);
703 lt_start_lock_thread(lt_grab_rw_exclusive);
704 lt_start_lock_thread(lt_grab_rw_exclusive);
705 lt_wait_for_lock_test_threads();
706 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
707 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
711 /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840)
712 T_LOG("Running test with four shared holders.");
714 lt_target_done_threads = 4;
715 lt_start_lock_thread(lt_grab_rw_shared);
716 lt_start_lock_thread(lt_grab_rw_shared);
717 lt_start_lock_thread(lt_grab_rw_shared);
718 lt_start_lock_thread(lt_grab_rw_shared);
719 lt_wait_for_lock_test_threads();
720 T_EXPECT_LE_UINT(lt_max_holders, 4, NULL);
723 /* Three doing upgrades and downgrades */
724 T_LOG("Running test with threads upgrading and downgrading.");
726 lt_target_done_threads
= 3;
727 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
728 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
729 lt_start_lock_thread(lt_upgrade_downgrade_rw
);
730 lt_wait_for_lock_test_threads();
731 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
732 T_EXPECT_LE_UINT(lt_max_holders
, 3, NULL
);
733 T_EXPECT_EQ_UINT(lt_max_upgrade_holders
, 1, NULL
);
735 /* Uncontended - exclusive trylocks */
736 T_LOG("Running test with single thread doing exclusive rwlock trylocks.");
738 lt_target_done_threads
= 1;
739 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
740 lt_wait_for_lock_test_threads();
741 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
742 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
744 /* Uncontended - shared trylocks */
745 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
746 T_LOG("Running test with single thread doing shared rwlock trylocks.");
748 lt_target_done_threads = 1;
749 lt_start_lock_thread(lt_grab_rw_shared_with_try);
750 lt_wait_for_lock_test_threads();
751 T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
752 T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL);
755 /* Three doing exclusive trylocks */
756 T_LOG("Running test with threads doing exclusive rwlock trylocks.");
758 lt_target_done_threads
= 3;
759 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
760 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
761 lt_start_lock_thread(lt_grab_rw_exclusive_with_try
);
762 lt_wait_for_lock_test_threads();
763 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
764 T_EXPECT_EQ_UINT(lt_max_holders
, 1, NULL
);
766 /* Three doing shared trylocks */
767 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
768 T_LOG("Running test with threads doing shared rwlock trylocks.");
770 lt_target_done_threads = 3;
771 lt_start_lock_thread(lt_grab_rw_shared_with_try);
772 lt_start_lock_thread(lt_grab_rw_shared_with_try);
773 lt_start_lock_thread(lt_grab_rw_shared_with_try);
774 lt_wait_for_lock_test_threads();
775 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
776 T_EXPECT_LE_UINT(lt_max_holders, 3, NULL);
779 /* Three doing various trylocks */
780 /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840)
781 T_LOG("Running test with threads doing mixed rwlock trylocks.");
783 lt_target_done_threads = 4;
784 lt_start_lock_thread(lt_grab_rw_shared_with_try);
785 lt_start_lock_thread(lt_grab_rw_shared_with_try);
786 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
787 lt_start_lock_thread(lt_grab_rw_exclusive_with_try);
788 lt_wait_for_lock_test_threads();
789 T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL);
790 T_EXPECT_LE_UINT(lt_max_holders, 2, NULL);
794 T_LOG("Running test with hw_lock_lock()");
796 lt_target_done_threads
= 3;
797 lt_start_lock_thread(lt_grab_hw_lock
);
798 lt_start_lock_thread(lt_grab_hw_lock
);
799 lt_start_lock_thread(lt_grab_hw_lock
);
800 lt_wait_for_lock_test_threads();
801 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
803 /* HW locks stress test */
804 T_LOG("Running HW locks stress test with hw_lock_lock()");
805 extern unsigned int real_ncpus
;
807 lt_target_done_threads
= real_ncpus
;
808 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
809 lt_start_lock_thread_bound(lt_stress_hw_lock
);
811 lt_wait_for_lock_test_threads();
812 bool starvation
= false;
813 uint total_local_count
= 0;
814 for (processor_t processor
= processor_list
; processor
!= NULL
; processor
= processor
->processor_list
) {
815 starvation
= starvation
|| (lt_stress_local_counters
[processor
->cpu_id
] < 10);
816 total_local_count
+= lt_stress_local_counters
[processor
->cpu_id
];
818 if (total_local_count
!= lt_counter
) {
819 T_FAIL("Lock failure\n");
820 } else if (starvation
) {
821 T_FAIL("Lock starvation found\n");
823 T_PASS("HW locks stress test with hw_lock_lock()");
827 /* HW locks: trylocks */
828 T_LOG("Running test with hw_lock_try()");
830 lt_target_done_threads
= 3;
831 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
832 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
833 lt_start_lock_thread(lt_grab_hw_lock_with_try
);
834 lt_wait_for_lock_test_threads();
835 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
837 /* HW locks: with timeout */
838 T_LOG("Running test with hw_lock_to()");
840 lt_target_done_threads
= 3;
841 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
842 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
843 lt_start_lock_thread(lt_grab_hw_lock_with_to
);
844 lt_wait_for_lock_test_threads();
845 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
848 T_LOG("Running test with lck_spin_lock()");
850 lt_target_done_threads
= 3;
851 lt_start_lock_thread(lt_grab_spin_lock
);
852 lt_start_lock_thread(lt_grab_spin_lock
);
853 lt_start_lock_thread(lt_grab_spin_lock
);
854 lt_wait_for_lock_test_threads();
855 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
857 /* Spin locks: trylocks */
858 T_LOG("Running test with lck_spin_try_lock()");
860 lt_target_done_threads
= 3;
861 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
862 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
863 lt_start_lock_thread(lt_grab_spin_lock_with_try
);
864 lt_wait_for_lock_test_threads();
865 T_EXPECT_EQ_UINT(lt_counter
, LOCK_TEST_ITERATIONS
* lt_target_done_threads
, NULL
);
870 #define MT_MAX_ARGS 8
871 #define MT_INITIAL_VALUE 0xfeedbeef
872 #define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */
873 #define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */
874 #define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */
876 typedef void (*sy_munge_t
)(void*);
878 #define MT_FUNC(x) #x, x
882 uint32_t mt_in_words
;
884 uint64_t mt_expected
[MT_MAX_ARGS
];
886 {MT_FUNC(munge_w
), 1, 1, {MT_W_VAL
}},
887 {MT_FUNC(munge_ww
), 2, 2, {MT_W_VAL
, MT_W_VAL
}},
888 {MT_FUNC(munge_www
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
889 {MT_FUNC(munge_wwww
), 4, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
890 {MT_FUNC(munge_wwwww
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
891 {MT_FUNC(munge_wwwwww
), 6, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
892 {MT_FUNC(munge_wwwwwww
), 7, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
893 {MT_FUNC(munge_wwwwwwww
), 8, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
894 {MT_FUNC(munge_wl
), 3, 2, {MT_W_VAL
, MT_L_VAL
}},
895 {MT_FUNC(munge_wwl
), 4, 3, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
896 {MT_FUNC(munge_wwlll
), 8, 5, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
897 {MT_FUNC(munge_wlw
), 4, 3, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
898 {MT_FUNC(munge_wlwwwll
), 10, 7, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
899 {MT_FUNC(munge_wlwwwllw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
900 {MT_FUNC(munge_wlwwlwlw
), 11, 8, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
901 {MT_FUNC(munge_wll
), 5, 3, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
902 {MT_FUNC(munge_wlll
), 7, 4, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
903 {MT_FUNC(munge_wllwwll
), 11, 7, {MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
904 {MT_FUNC(munge_wwwlw
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
905 {MT_FUNC(munge_wwwlww
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
906 {MT_FUNC(munge_wwwl
), 5, 4, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
907 {MT_FUNC(munge_wwwwlw
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
908 {MT_FUNC(munge_wwwwl
), 6, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
909 {MT_FUNC(munge_wwwwwl
), 7, 6, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
910 {MT_FUNC(munge_wwwwwlww
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
}},
911 {MT_FUNC(munge_wwwwwllw
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_W_VAL
}},
912 {MT_FUNC(munge_wwwwwlll
), 11, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
913 {MT_FUNC(munge_wwwwwwl
), 8, 7, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
914 {MT_FUNC(munge_wwwwwwlw
), 9, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
}},
915 {MT_FUNC(munge_wwwwwwll
), 10, 8, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_L_VAL
}},
916 {MT_FUNC(munge_wsw
), 3, 3, {MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
917 {MT_FUNC(munge_wws
), 3, 3, {MT_W_VAL
, MT_W_VAL
, MT_S_VAL
}},
918 {MT_FUNC(munge_wwwsw
), 5, 5, {MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_S_VAL
, MT_W_VAL
}},
919 {MT_FUNC(munge_llllll
), 12, 6, {MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
, MT_L_VAL
}},
920 {MT_FUNC(munge_l
), 2, 1, {MT_L_VAL
}},
921 {MT_FUNC(munge_lw
), 3, 2, {MT_L_VAL
, MT_W_VAL
}},
922 {MT_FUNC(munge_lwww
), 5, 4, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
923 {MT_FUNC(munge_lwwwwwww
), 9, 8, {MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
}},
924 {MT_FUNC(munge_wlwwwl
), 8, 6, {MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}},
925 {MT_FUNC(munge_wwlwwwl
), 9, 7, {MT_W_VAL
, MT_W_VAL
, MT_L_VAL
, MT_W_VAL
, MT_W_VAL
, MT_W_VAL
, MT_L_VAL
}}
928 #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test))
931 mt_reset(uint32_t in_words
, size_t total_size
, uint32_t *data
)
935 for (i
= 0; i
< in_words
; i
++) {
936 data
[i
] = MT_INITIAL_VALUE
;
939 if (in_words
* sizeof(uint32_t) < total_size
) {
940 bzero(&data
[in_words
], total_size
- in_words
* sizeof(uint32_t));
947 uint64_t data
[MT_MAX_ARGS
];
950 for (i
= 0; i
< MT_TEST_COUNT
; i
++) {
951 struct munger_test
*test
= &munger_tests
[i
];
954 T_LOG("Testing %s", test
->mt_name
);
956 mt_reset(test
->mt_in_words
, sizeof(data
), (uint32_t*)data
);
959 for (j
= 0; j
< test
->mt_nout
; j
++) {
960 if (data
[j
] != test
->mt_expected
[j
]) {
961 T_FAIL("Index %d: expected %llx, got %llx.", j
, test
->mt_expected
[j
], data
[j
]);
966 T_PASS(test
->mt_name
);
971 /* Exception Callback Test */
972 static ex_cb_action_t
excb_test_action(
973 ex_cb_class_t cb_class
,
975 const ex_cb_state_t
*state
978 ex_cb_state_t
*context
= (ex_cb_state_t
*)refcon
;
980 if ((NULL
== refcon
) || (NULL
== state
))
982 return EXCB_ACTION_TEST_FAIL
;
985 context
->far
= state
->far
;
989 case EXCB_CLASS_TEST1
:
990 return EXCB_ACTION_RERUN
;
991 case EXCB_CLASS_TEST2
:
992 return EXCB_ACTION_NONE
;
994 return EXCB_ACTION_TEST_FAIL
;
1002 const vm_offset_t far1
= 0xdead0001;
1003 const vm_offset_t far2
= 0xdead0002;
1005 ex_cb_state_t test_context_1
= {0xdeadbeef};
1006 ex_cb_state_t test_context_2
= {0xdeadbeef};
1007 ex_cb_action_t action
;
1009 T_LOG("Testing Exception Callback.");
1011 T_LOG("Running registration test.");
1013 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1014 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST1 exception callback");
1015 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1016 T_ASSERT(KERN_SUCCESS
== kr
, "First registration of TEST2 exception callback");
1018 kr
= ex_cb_register(EXCB_CLASS_TEST2
, &excb_test_action
, &test_context_2
);
1019 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST2 exception callback");
1020 kr
= ex_cb_register(EXCB_CLASS_TEST1
, &excb_test_action
, &test_context_1
);
1021 T_ASSERT(KERN_SUCCESS
!= kr
, "Second registration of TEST1 exception callback");
1023 T_LOG("Running invocation test.");
1025 action
= ex_cb_invoke(EXCB_CLASS_TEST1
, far1
);
1026 T_ASSERT(EXCB_ACTION_RERUN
== action
, NULL
);
1027 T_ASSERT(far1
== test_context_1
.far
, NULL
);
1029 action
= ex_cb_invoke(EXCB_CLASS_TEST2
, far2
);
1030 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1031 T_ASSERT(far2
== test_context_2
.far
, NULL
);
1033 action
= ex_cb_invoke(EXCB_CLASS_TEST3
, 0);
1034 T_ASSERT(EXCB_ACTION_NONE
== action
, NULL
);
1036 return KERN_SUCCESS
;
1039 #if __ARM_PAN_AVAILABLE__
1043 vm_offset_t priv_addr
= _COMM_PAGE_SIGNATURE
;
1045 T_LOG("Testing PAN.");
1047 T_ASSERT(__builtin_arm_rsr("pan") != 0, NULL
);
1049 pan_exception_level
= 0;
1050 pan_fault_value
= 0xDE;
1051 // convert priv_addr to one that is accessible from user mode
1052 pan_test_addr
= priv_addr
+ _COMM_HIGH_PAGE64_BASE_ADDRESS
-
1053 _COMM_PAGE_START_ADDRESS
;
1055 // Below should trigger a PAN exception as pan_test_addr is accessible
1057 // The exception handler, upon recognizing the fault address is pan_test_addr,
1058 // will disable PAN and rerun this instruction successfully
1059 T_ASSERT(*(char *)pan_test_addr
== *(char *)priv_addr
, NULL
);
1061 T_ASSERT(pan_exception_level
== 2, NULL
);
1063 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1065 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1067 pan_exception_level
= 0;
1068 pan_fault_value
= 0xAD;
1069 pan_ro_addr
= (vm_offset_t
) &pan_ro_value
;
1071 // Force a permission fault while PAN is disabled to make sure PAN is
1072 // re-enabled during the exception handler.
1073 *((volatile uint64_t*)pan_ro_addr
) = 0xFEEDFACECAFECAFE;
1075 T_ASSERT(pan_exception_level
== 2, NULL
);
1077 T_ASSERT(__builtin_arm_rsr("pan") == 0, NULL
);
1079 T_ASSERT(pan_fault_value
== *(char *)priv_addr
, NULL
);
1084 __builtin_arm_wsr("pan", 1);
1085 return KERN_SUCCESS
;
1093 return lt_test_locks();