1 #include <mach_ldebug.h>
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
8 #include <kern/locks.h>
9 #include <kern/misc_protos.h>
10 #include <kern/kalloc.h>
11 #include <kern/thread.h>
12 #include <kern/processor.h>
13 #include <kern/sched_prim.h>
14 #include <kern/debug.h>
15 #include <libkern/section_keywords.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_cpu.h>
18 #include <machine/atomic.h>
20 #include <kern/kalloc.h>
22 #include <sys/kdebug.h>
24 static lck_mtx_t test_mtx
;
25 static lck_grp_t test_mtx_grp
;
26 static lck_grp_attr_t test_mtx_grp_attr
;
27 static lck_attr_t test_mtx_attr
;
29 static lck_grp_t test_mtx_stats_grp
;
30 static lck_grp_attr_t test_mtx_stats_grp_attr
;
31 static lck_attr_t test_mtx_stats_attr
;
33 struct lck_mtx_test_stats_elem
{
42 #define TEST_MTX_LOCK_STATS 0
43 #define TEST_MTX_TRY_LOCK_STATS 1
44 #define TEST_MTX_LOCK_SPIN_STATS 2
45 #define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3
46 #define TEST_MTX_TRY_LOCK_SPIN_STATS 4
47 #define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5
48 #define TEST_MTX_UNLOCK_MTX_STATS 6
49 #define TEST_MTX_UNLOCK_SPIN_STATS 7
50 #define TEST_MTX_MAX_STATS 8
52 struct lck_mtx_test_stats_elem lck_mtx_test_stats
[TEST_MTX_MAX_STATS
];
53 atomic_bool enabled
= TRUE
;
56 init_test_mtx_stats(void)
60 lck_grp_attr_setdefault(&test_mtx_stats_grp_attr
);
61 lck_grp_init(&test_mtx_stats_grp
, "testlck_stats_mtx", &test_mtx_stats_grp_attr
);
62 lck_attr_setdefault(&test_mtx_stats_attr
);
64 atomic_store(&enabled
, TRUE
);
65 for(i
= 0; i
< TEST_MTX_MAX_STATS
; i
++){
66 memset(&lck_mtx_test_stats
[i
], 0 , sizeof(struct lck_mtx_test_stats_elem
));
67 lck_mtx_test_stats
[i
].min
= ~0;
68 lck_spin_init(&lck_mtx_test_stats
[i
].lock
, &test_mtx_stats_grp
, &test_mtx_stats_attr
);
73 update_test_mtx_stats(
78 if (atomic_load(&enabled
) == TRUE
) {
79 assert(type
< TEST_MTX_MAX_STATS
);
82 uint64_t elapsed
= end
- start
;
83 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[type
];
85 lck_spin_lock(&stat
->lock
);
89 stat
->avg
= stat
->tot
/ stat
->samples
;
90 if (stat
->max
< elapsed
)
92 if (stat
->min
> elapsed
)
94 lck_spin_unlock(&stat
->lock
);
102 assert(type
< TEST_MTX_MAX_STATS
);
103 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[type
];
105 lck_spin_lock(&stat
->lock
);
113 lck_spin_unlock(&stat
->lock
);
117 erase_all_test_mtx_stats(void)
120 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
121 erase_test_mtx_stats(i
);
126 disable_all_test_mtx_stats(void)
128 atomic_store(&enabled
, FALSE
);
132 enable_all_test_mtx_stats(void)
134 atomic_store(&enabled
, TRUE
);
138 print_test_mtx_stats_string_name(
145 case TEST_MTX_LOCK_STATS
:
146 type
= "TEST_MTX_LOCK_STATS";
148 case TEST_MTX_TRY_LOCK_STATS
:
149 type
= "TEST_MTX_TRY_LOCK_STATS";
151 case TEST_MTX_LOCK_SPIN_STATS
:
152 type
= "TEST_MTX_LOCK_SPIN_STATS";
154 case TEST_MTX_LOCK_SPIN_ALWAYS_STATS
:
155 type
= "TEST_MTX_LOCK_SPIN_ALWAYS_STATS";
157 case TEST_MTX_TRY_LOCK_SPIN_STATS
:
158 type
= "TEST_MTX_TRY_LOCK_SPIN_STATS";
160 case TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
:
161 type
= "TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS";
163 case TEST_MTX_UNLOCK_MTX_STATS
:
164 type
= "TEST_MTX_UNLOCK_MTX_STATS";
166 case TEST_MTX_UNLOCK_SPIN_STATS
:
167 type
= "TEST_MTX_UNLOCK_SPIN_STATS";
173 return snprintf(buffer
, size
, "%s ", type
);
177 get_test_mtx_stats_string(
184 ret
= snprintf(&buffer
[string_off
], size
, "\n");
189 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
190 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[i
];
192 ret
= snprintf(&buffer
[string_off
], size
, "{ ");
196 lck_spin_lock(&stat
->lock
);
199 ret
= snprintf(&buffer
[string_off
], size
, "samples %llu, ", stat
->samples
);
203 absolutetime_to_nanoseconds(stat
->tot
, &time
);
204 ret
= snprintf(&buffer
[string_off
], size
, "tot %llu ns, ", time
);
208 absolutetime_to_nanoseconds(stat
->avg
, &time
);
209 ret
= snprintf(&buffer
[string_off
], size
, "avg %llu ns, ", time
);
213 absolutetime_to_nanoseconds(stat
->max
, &time
);
214 ret
= snprintf(&buffer
[string_off
], size
, "max %llu ns, ", time
);
218 absolutetime_to_nanoseconds(stat
->min
, &time
);
219 ret
= snprintf(&buffer
[string_off
], size
, "min %llu ns", time
);
223 lck_spin_unlock(&stat
->lock
);
225 ret
= snprintf(&buffer
[string_off
], size
, " } ");
229 ret
= print_test_mtx_stats_string_name(i
, &buffer
[string_off
], size
);
233 ret
= snprintf(&buffer
[string_off
], size
, "\n");
242 lck_mtx_test_init(void)
244 static int first
= 0;
247 * This should be substituted with a version
248 * of dispatch_once for kernel (rdar:39537874)
250 if (os_atomic_load(&first
, acquire
) >= 2)
253 if (os_atomic_cmpxchg(&first
, 0, 1, relaxed
)){
254 lck_grp_attr_setdefault(&test_mtx_grp_attr
);
255 lck_grp_init(&test_mtx_grp
, "testlck_mtx", &test_mtx_grp_attr
);
256 lck_attr_setdefault(&test_mtx_attr
);
257 lck_mtx_init(&test_mtx
, &test_mtx_grp
, &test_mtx_attr
);
259 init_test_mtx_stats();
261 os_atomic_inc(&first
, release
);
264 while(os_atomic_load(&first
, acquire
) < 2);
268 lck_mtx_test_lock(void)
272 start
= mach_absolute_time();
274 lck_mtx_lock(&test_mtx
);
276 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_STATS
);
280 lck_mtx_test_try_lock(void)
284 start
= mach_absolute_time();
286 lck_mtx_try_lock(&test_mtx
);
288 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_STATS
);
292 lck_mtx_test_lock_spin(void)
296 start
= mach_absolute_time();
298 lck_mtx_lock_spin(&test_mtx
);
300 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_SPIN_STATS
);
304 lck_mtx_test_lock_spin_always(void)
308 start
= mach_absolute_time();
310 lck_mtx_lock_spin_always(&test_mtx
);
312 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_SPIN_ALWAYS_STATS
);
316 lck_mtx_test_try_lock_spin(void)
320 start
= mach_absolute_time();
322 lck_mtx_try_lock_spin(&test_mtx
);
324 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_STATS
);
328 lck_mtx_test_try_lock_spin_always(void)
332 start
= mach_absolute_time();
334 lck_mtx_try_lock_spin_always(&test_mtx
);
336 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
);
340 lck_mtx_test_unlock(void)
344 start
= mach_absolute_time();
346 lck_mtx_unlock(&test_mtx
);
348 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS
);
352 lck_mtx_test_unlock_mtx(void)
356 start
= mach_absolute_time();
358 lck_mtx_unlock(&test_mtx
);
360 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS
);
364 lck_mtx_test_unlock_spin(void)
368 start
= mach_absolute_time();
370 lck_mtx_unlock(&test_mtx
);
372 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS
);
375 #define WARMUP_ITER 1000
378 lck_mtx_test_mtx_uncontended_loop_time(
379 int iter
, char *buffer
, int size
)
382 uint64_t tot_time
[TEST_MTX_MAX_STATS
];
383 uint64_t run_time
[TEST_MTX_MAX_STATS
];
387 //warming up the test
388 for (i
= 0; i
< WARMUP_ITER
; i
++) {
389 lck_mtx_lock(&test_mtx
);
390 lck_mtx_unlock(&test_mtx
);
393 start_run
= thread_get_runtime_self();
394 start
= mach_absolute_time();
396 for (i
= 0; i
< iter
; i
++) {
397 lck_mtx_lock(&test_mtx
);
398 lck_mtx_unlock(&test_mtx
);
401 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_STATS
]);
402 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_STATS
]);
404 //warming up the test
405 for (i
= 0; i
< WARMUP_ITER
; i
++) {
406 lck_mtx_try_lock(&test_mtx
);
407 lck_mtx_unlock(&test_mtx
);
410 start_run
= thread_get_runtime_self();
411 start
= mach_absolute_time();
413 for (i
= 0; i
< iter
; i
++) {
414 lck_mtx_try_lock(&test_mtx
);
415 lck_mtx_unlock(&test_mtx
);
418 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_STATS
]);
419 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_STATS
]);
421 //warming up the test
422 for (i
= 0; i
< WARMUP_ITER
; i
++) {
423 lck_mtx_lock_spin(&test_mtx
);
424 lck_mtx_unlock(&test_mtx
);
427 start_run
= thread_get_runtime_self();
428 start
= mach_absolute_time();
430 for (i
= 0; i
< iter
; i
++) {
431 lck_mtx_lock_spin(&test_mtx
);
432 lck_mtx_unlock(&test_mtx
);
435 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_SPIN_STATS
]);
436 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_SPIN_STATS
]);
438 //warming up the test
439 for (i
= 0; i
< WARMUP_ITER
; i
++) {
440 lck_mtx_lock_spin_always(&test_mtx
);
441 lck_mtx_unlock(&test_mtx
);
444 start_run
= thread_get_runtime_self();
445 start
= mach_absolute_time();
447 for (i
= 0; i
< iter
; i
++) {
448 lck_mtx_lock_spin_always(&test_mtx
);
449 lck_mtx_unlock(&test_mtx
);
452 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_SPIN_ALWAYS_STATS
]);
453 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_SPIN_ALWAYS_STATS
]);
455 //warming up the test
456 for (i
= 0; i
< WARMUP_ITER
; i
++) {
457 lck_mtx_try_lock_spin(&test_mtx
);
458 lck_mtx_unlock(&test_mtx
);
461 start_run
= thread_get_runtime_self();
462 start
= mach_absolute_time();
464 for (i
= 0; i
< iter
; i
++) {
465 lck_mtx_try_lock_spin(&test_mtx
);
466 lck_mtx_unlock(&test_mtx
);
469 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_SPIN_STATS
]);
470 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_SPIN_STATS
]);
472 //warming up the test
473 for (i
= 0; i
< WARMUP_ITER
; i
++) {
474 lck_mtx_try_lock_spin_always(&test_mtx
);
475 lck_mtx_unlock(&test_mtx
);
478 start_run
= thread_get_runtime_self();
479 start
= mach_absolute_time();
481 for (i
= 0; i
< iter
; i
++) {
482 lck_mtx_try_lock_spin_always(&test_mtx
);
483 lck_mtx_unlock(&test_mtx
);
486 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
]);
487 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
]);
492 ret
= snprintf(&buffer
[string_off
], size
, "\n");
496 for (i
= 0; i
< TEST_MTX_MAX_STATS
- 2; i
++) {
498 ret
= snprintf(&buffer
[string_off
], size
, "total time %llu ns total run time %llu ns ", tot_time
[i
], run_time
[i
]);
502 ret
= print_test_mtx_stats_string_name(i
, &buffer
[string_off
], size
);
506 ret
= snprintf(&buffer
[string_off
], size
, "\n");
515 lck_mtx_test_mtx_lock_uncontended(
520 disable_all_test_mtx_stats();
522 //warming up the test for lock
523 for (i
= 0; i
< WARMUP_ITER
; i
++) {
525 lck_mtx_test_unlock_mtx();
528 enable_all_test_mtx_stats();
530 for (i
= 0; i
< iter
; i
++) {
532 lck_mtx_test_unlock_mtx();
535 disable_all_test_mtx_stats();
537 //warming up the test for try_lock
538 for (i
= 0; i
< WARMUP_ITER
; i
++) {
539 lck_mtx_test_try_lock();
540 lck_mtx_test_unlock_mtx();
543 enable_all_test_mtx_stats();
545 for (i
= 0; i
< iter
; i
++) {
546 lck_mtx_test_try_lock();
547 lck_mtx_test_unlock_mtx();
554 lck_mtx_test_mtx_spin_uncontended(
559 disable_all_test_mtx_stats();
561 //warming up the test for lock_spin
562 for (i
= 0; i
< WARMUP_ITER
; i
++) {
563 lck_mtx_test_lock_spin();
564 lck_mtx_test_unlock_spin();
567 enable_all_test_mtx_stats();
569 for (i
= 0; i
< iter
; i
++) {
570 lck_mtx_test_lock_spin();
571 lck_mtx_test_unlock_spin();
574 disable_all_test_mtx_stats();
576 //warming up the test for try_lock_spin
577 for (i
= 0; i
< WARMUP_ITER
; i
++) {
578 lck_mtx_test_try_lock_spin();
579 lck_mtx_test_unlock_spin();
582 enable_all_test_mtx_stats();
584 for (i
= 0; i
< iter
; i
++) {
585 lck_mtx_test_try_lock_spin();
586 lck_mtx_test_unlock_spin();
589 disable_all_test_mtx_stats();
591 //warming up the test for lock_spin_always
592 for (i
= 0; i
< WARMUP_ITER
; i
++) {
593 lck_mtx_test_lock_spin_always();
594 lck_mtx_test_unlock_spin();
597 enable_all_test_mtx_stats();
599 for (i
= 0; i
< iter
; i
++) {
600 lck_mtx_test_lock_spin_always();
601 lck_mtx_test_unlock_spin();
604 disable_all_test_mtx_stats();
606 //warming up the test for try_lock_spin_always
607 for (i
= 0; i
< WARMUP_ITER
; i
++) {
608 lck_mtx_test_try_lock_spin_always();
609 lck_mtx_test_unlock_spin();
612 enable_all_test_mtx_stats();
614 for (i
= 0; i
< iter
; i
++) {
615 lck_mtx_test_try_lock_spin_always();
616 lck_mtx_test_unlock_spin();
623 lck_mtx_test_mtx_uncontended(
628 erase_all_test_mtx_stats();
629 lck_mtx_test_mtx_lock_uncontended(iter
);
630 lck_mtx_test_mtx_spin_uncontended(iter
);
632 return get_test_mtx_stats_string(buffer
,size
);
636 static int wait_barrier
;
637 static int iterations
;
638 static uint64_t start_loop_time
;
639 static uint64_t start_loop_time_run
;
640 static uint64_t end_loop_time
;
641 static uint64_t end_loop_time_run
;
643 struct lck_mtx_thread_arg
{
646 thread_t other_thread
;
650 test_mtx_lock_unlock_contended_thread(
652 __unused wait_result_t wr
)
655 struct lck_mtx_thread_arg
*info
= (struct lck_mtx_thread_arg
*) arg
;
656 thread_t other_thread
;
660 printf("Starting thread %p\n", current_thread());
662 while(os_atomic_load(&info
->other_thread
, acquire
) == NULL
);
663 other_thread
= info
->other_thread
;
665 printf("Other thread %p\n", other_thread
);
667 my_locked
= &info
->my_locked
;
668 other_locked
= info
->other_locked
;
671 val
= os_atomic_inc(&synch
, relaxed
);
672 while(os_atomic_load(&synch
, relaxed
) < 2);
674 //warming up the test
675 for (i
= 0; i
< WARMUP_ITER
; i
++) {
678 os_atomic_xchg(my_locked
, 1 , relaxed
);
679 if (i
!= WARMUP_ITER
- 1) {
680 while(os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
);
681 os_atomic_xchg(my_locked
, 0 , relaxed
);
684 lck_mtx_test_unlock();
686 if (i
!= WARMUP_ITER
- 1)
687 while(os_atomic_load(other_locked
, relaxed
) == 0);
690 printf("warmup done %p\n", current_thread());
691 os_atomic_inc(&synch
, relaxed
);
692 while(os_atomic_load(&synch
, relaxed
) < 4);
696 erase_all_test_mtx_stats();
700 * synch the threads so they start
703 os_atomic_inc(&synch
, relaxed
);
704 while(os_atomic_load(&synch
, relaxed
) < 6);
706 for (i
= 0; i
< iterations
; i
++) {
709 os_atomic_xchg(my_locked
, 1 , relaxed
);
710 if (i
!= iterations
- 1) {
711 while(os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
);
712 os_atomic_xchg(my_locked
, 0 , relaxed
);
714 lck_mtx_test_unlock_mtx();
716 if (i
!= iterations
- 1)
717 while(os_atomic_load(other_locked
, relaxed
) == 0);
721 os_atomic_inc(&wait_barrier
, relaxed
);
722 thread_wakeup((event_t
) &wait_barrier
);
723 thread_terminate_self();
728 lck_mtx_test_mtx_contended(
733 thread_t thread1
, thread2
;
734 kern_return_t result
;
735 struct lck_mtx_thread_arg targs
[2] = {};
740 erase_all_test_mtx_stats();
742 targs
[0].other_thread
= NULL
;
743 targs
[1].other_thread
= NULL
;
745 result
= kernel_thread_start((thread_continue_t
)test_mtx_lock_unlock_contended_thread
, &targs
[0], &thread1
);
746 if (result
!= KERN_SUCCESS
) {
750 result
= kernel_thread_start((thread_continue_t
)test_mtx_lock_unlock_contended_thread
, &targs
[1], &thread2
);
751 if (result
!= KERN_SUCCESS
) {
752 thread_deallocate(thread1
);
756 /* this are t1 args */
757 targs
[0].my_locked
= 0;
758 targs
[0].other_locked
= &targs
[1].my_locked
;
760 os_atomic_xchg(&targs
[0].other_thread
, thread2
, release
);
762 /* this are t2 args */
763 targs
[1].my_locked
= 0;
764 targs
[1].other_locked
= &targs
[0].my_locked
;
766 os_atomic_xchg(&targs
[1].other_thread
, thread1
, release
);
768 while (os_atomic_load(&wait_barrier
, relaxed
) != 2) {
769 assert_wait((event_t
) &wait_barrier
, THREAD_UNINT
);
770 if (os_atomic_load(&wait_barrier
, relaxed
) != 2) {
771 (void) thread_block(THREAD_CONTINUE_NULL
);
773 clear_wait(current_thread(), THREAD_AWAKENED
);
777 thread_deallocate(thread1
);
778 thread_deallocate(thread2
);
780 return get_test_mtx_stats_string(buffer
, buffer_size
);
784 test_mtx_lck_unlock_contended_loop_time_thread(
786 __unused wait_result_t wr
)
789 struct lck_mtx_thread_arg
*info
= (struct lck_mtx_thread_arg
*) arg
;
790 thread_t other_thread
;
794 printf("Starting thread %p\n", current_thread());
796 while(os_atomic_load(&info
->other_thread
, acquire
) == NULL
);
797 other_thread
= info
->other_thread
;
799 printf("Other thread %p\n", other_thread
);
801 my_locked
= &info
->my_locked
;
802 other_locked
= info
->other_locked
;
805 val
= os_atomic_inc(&synch
, relaxed
);
806 while(os_atomic_load(&synch
, relaxed
) < 2);
808 //warming up the test
809 for (i
= 0; i
< WARMUP_ITER
; i
++) {
810 lck_mtx_lock(&test_mtx
);
812 os_atomic_xchg(my_locked
, 1 , relaxed
);
813 if (i
!= WARMUP_ITER
- 1) {
814 while(os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
);
815 os_atomic_xchg(my_locked
, 0 , relaxed
);
818 lck_mtx_unlock(&test_mtx
);
820 if (i
!= WARMUP_ITER
- 1)
821 while(os_atomic_load(other_locked
, relaxed
) == 0);
824 printf("warmup done %p\n", current_thread());
826 os_atomic_inc(&synch
, relaxed
);
827 while(os_atomic_load(&synch
, relaxed
) < 4);
832 * synch the threads so they start
835 os_atomic_inc(&synch
, relaxed
);
836 while(os_atomic_load(&synch
, relaxed
) < 6);
839 start_loop_time_run
= thread_get_runtime_self();
840 start_loop_time
= mach_absolute_time();
843 for (i
= 0; i
< iterations
; i
++) {
844 lck_mtx_lock(&test_mtx
);
846 os_atomic_xchg(my_locked
, 1 , relaxed
);
847 if (i
!= iterations
- 1) {
848 while(os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
);
849 os_atomic_xchg(my_locked
, 0 , relaxed
);
852 lck_mtx_unlock(&test_mtx
);
854 if (i
!= iterations
- 1)
855 while(os_atomic_load(other_locked
, relaxed
) == 0);
859 end_loop_time
= mach_absolute_time();
860 end_loop_time_run
= thread_get_runtime_self();
863 os_atomic_inc(&wait_barrier
, relaxed
);
864 thread_wakeup((event_t
) &wait_barrier
);
865 thread_terminate_self();
870 lck_mtx_test_mtx_contended_loop_time(
875 thread_t thread1
, thread2
;
876 kern_return_t result
;
878 struct lck_mtx_thread_arg targs
[2] = {};
882 uint64_t time
, time_run
;
884 targs
[0].other_thread
= NULL
;
885 targs
[1].other_thread
= NULL
;
887 result
= kernel_thread_start((thread_continue_t
)test_mtx_lck_unlock_contended_loop_time_thread
, &targs
[0], &thread1
);
888 if (result
!= KERN_SUCCESS
) {
892 result
= kernel_thread_start((thread_continue_t
)test_mtx_lck_unlock_contended_loop_time_thread
, &targs
[1], &thread2
);
893 if (result
!= KERN_SUCCESS
) {
894 thread_deallocate(thread1
);
898 /* this are t1 args */
899 targs
[0].my_locked
= 0;
900 targs
[0].other_locked
= &targs
[1].my_locked
;
902 os_atomic_xchg(&targs
[0].other_thread
, thread2
, release
);
904 /* this are t2 args */
905 targs
[1].my_locked
= 0;
906 targs
[1].other_locked
= &targs
[0].my_locked
;
908 os_atomic_xchg(&targs
[1].other_thread
, thread1
, release
);
910 while (os_atomic_load(&wait_barrier
, acquire
) != 2) {
911 assert_wait((event_t
) &wait_barrier
, THREAD_UNINT
);
912 if (os_atomic_load(&wait_barrier
, acquire
) != 2) {
913 (void) thread_block(THREAD_CONTINUE_NULL
);
915 clear_wait(current_thread(), THREAD_AWAKENED
);
919 thread_deallocate(thread1
);
920 thread_deallocate(thread2
);
922 absolutetime_to_nanoseconds(end_loop_time
- start_loop_time
, &time
);
923 absolutetime_to_nanoseconds(end_loop_time_run
- start_loop_time_run
, &time_run
);
925 ret
= snprintf(buffer
, buffer_size
, "\n");
926 ret
+= snprintf(&buffer
[ret
], buffer_size
- ret
, "total time %llu ns total run time %llu ns ", time
, time_run
);
927 ret
+= print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS
, &buffer
[ret
], buffer_size
- ret
);
928 ret
+= snprintf(&buffer
[ret
], buffer_size
- ret
, "\n");