1 #include <mach_ldebug.h>
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
8 #include <kern/locks.h>
9 #include <kern/misc_protos.h>
10 #include <kern/kalloc.h>
11 #include <kern/thread.h>
12 #include <kern/processor.h>
13 #include <kern/sched_prim.h>
14 #include <kern/debug.h>
15 #include <libkern/section_keywords.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_cpu.h>
18 #include <machine/atomic.h>
20 #include <kern/kalloc.h>
22 #include <sys/kdebug.h>
24 static lck_mtx_t test_mtx
;
25 static lck_grp_t test_mtx_grp
;
26 static lck_grp_attr_t test_mtx_grp_attr
;
27 static lck_attr_t test_mtx_attr
;
29 static lck_grp_t test_mtx_stats_grp
;
30 static lck_grp_attr_t test_mtx_stats_grp_attr
;
31 static lck_attr_t test_mtx_stats_attr
;
33 struct lck_mtx_test_stats_elem
{
42 #define TEST_MTX_LOCK_STATS 0
43 #define TEST_MTX_TRY_LOCK_STATS 1
44 #define TEST_MTX_LOCK_SPIN_STATS 2
45 #define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3
46 #define TEST_MTX_TRY_LOCK_SPIN_STATS 4
47 #define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5
48 #define TEST_MTX_UNLOCK_MTX_STATS 6
49 #define TEST_MTX_UNLOCK_SPIN_STATS 7
50 #define TEST_MTX_MAX_STATS 8
52 struct lck_mtx_test_stats_elem lck_mtx_test_stats
[TEST_MTX_MAX_STATS
];
53 atomic_bool enabled
= TRUE
;
56 init_test_mtx_stats(void)
60 lck_grp_attr_setdefault(&test_mtx_stats_grp_attr
);
61 lck_grp_init(&test_mtx_stats_grp
, "testlck_stats_mtx", &test_mtx_stats_grp_attr
);
62 lck_attr_setdefault(&test_mtx_stats_attr
);
64 atomic_store(&enabled
, TRUE
);
65 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
66 memset(&lck_mtx_test_stats
[i
], 0, sizeof(struct lck_mtx_test_stats_elem
));
67 lck_mtx_test_stats
[i
].min
= ~0;
68 lck_spin_init(&lck_mtx_test_stats
[i
].lock
, &test_mtx_stats_grp
, &test_mtx_stats_attr
);
73 update_test_mtx_stats(
78 if (atomic_load(&enabled
) == TRUE
) {
79 assert(type
< TEST_MTX_MAX_STATS
);
82 uint64_t elapsed
= end
- start
;
83 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[type
];
85 lck_spin_lock(&stat
->lock
);
89 stat
->avg
= stat
->tot
/ stat
->samples
;
90 if (stat
->max
< elapsed
) {
93 if (stat
->min
> elapsed
) {
96 lck_spin_unlock(&stat
->lock
);
101 erase_test_mtx_stats(
104 assert(type
< TEST_MTX_MAX_STATS
);
105 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[type
];
107 lck_spin_lock(&stat
->lock
);
115 lck_spin_unlock(&stat
->lock
);
119 erase_all_test_mtx_stats(void)
122 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
123 erase_test_mtx_stats(i
);
128 disable_all_test_mtx_stats(void)
130 atomic_store(&enabled
, FALSE
);
134 enable_all_test_mtx_stats(void)
136 atomic_store(&enabled
, TRUE
);
140 print_test_mtx_stats_string_name(
147 case TEST_MTX_LOCK_STATS
:
148 type
= "TEST_MTX_LOCK_STATS";
150 case TEST_MTX_TRY_LOCK_STATS
:
151 type
= "TEST_MTX_TRY_LOCK_STATS";
153 case TEST_MTX_LOCK_SPIN_STATS
:
154 type
= "TEST_MTX_LOCK_SPIN_STATS";
156 case TEST_MTX_LOCK_SPIN_ALWAYS_STATS
:
157 type
= "TEST_MTX_LOCK_SPIN_ALWAYS_STATS";
159 case TEST_MTX_TRY_LOCK_SPIN_STATS
:
160 type
= "TEST_MTX_TRY_LOCK_SPIN_STATS";
162 case TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
:
163 type
= "TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS";
165 case TEST_MTX_UNLOCK_MTX_STATS
:
166 type
= "TEST_MTX_UNLOCK_MTX_STATS";
168 case TEST_MTX_UNLOCK_SPIN_STATS
:
169 type
= "TEST_MTX_UNLOCK_SPIN_STATS";
175 return snprintf(buffer
, size
, "%s ", type
);
179 get_test_mtx_stats_string(
186 ret
= snprintf(&buffer
[string_off
], size
, "\n");
191 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
192 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[i
];
194 ret
= snprintf(&buffer
[string_off
], size
, "{ ");
198 lck_spin_lock(&stat
->lock
);
201 ret
= snprintf(&buffer
[string_off
], size
, "samples %llu, ", stat
->samples
);
205 absolutetime_to_nanoseconds(stat
->tot
, &time
);
206 ret
= snprintf(&buffer
[string_off
], size
, "tot %llu ns, ", time
);
210 absolutetime_to_nanoseconds(stat
->avg
, &time
);
211 ret
= snprintf(&buffer
[string_off
], size
, "avg %llu ns, ", time
);
215 absolutetime_to_nanoseconds(stat
->max
, &time
);
216 ret
= snprintf(&buffer
[string_off
], size
, "max %llu ns, ", time
);
220 absolutetime_to_nanoseconds(stat
->min
, &time
);
221 ret
= snprintf(&buffer
[string_off
], size
, "min %llu ns", time
);
225 lck_spin_unlock(&stat
->lock
);
227 ret
= snprintf(&buffer
[string_off
], size
, " } ");
231 ret
= print_test_mtx_stats_string_name(i
, &buffer
[string_off
], size
);
235 ret
= snprintf(&buffer
[string_off
], size
, "\n");
244 lck_mtx_test_init(void)
246 static int first
= 0;
249 * This should be substituted with a version
250 * of dispatch_once for kernel (rdar:39537874)
252 if (os_atomic_load(&first
, acquire
) >= 2) {
256 if (os_atomic_cmpxchg(&first
, 0, 1, relaxed
)) {
257 lck_grp_attr_setdefault(&test_mtx_grp_attr
);
258 lck_grp_init(&test_mtx_grp
, "testlck_mtx", &test_mtx_grp_attr
);
259 lck_attr_setdefault(&test_mtx_attr
);
260 lck_mtx_init(&test_mtx
, &test_mtx_grp
, &test_mtx_attr
);
262 init_test_mtx_stats();
264 os_atomic_inc(&first
, release
);
267 while (os_atomic_load(&first
, acquire
) < 2) {
273 lck_mtx_test_lock(void)
277 start
= mach_absolute_time();
279 lck_mtx_lock(&test_mtx
);
281 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_STATS
);
285 lck_mtx_test_try_lock(void)
289 start
= mach_absolute_time();
291 lck_mtx_try_lock(&test_mtx
);
293 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_STATS
);
297 lck_mtx_test_lock_spin(void)
301 start
= mach_absolute_time();
303 lck_mtx_lock_spin(&test_mtx
);
305 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_SPIN_STATS
);
309 lck_mtx_test_lock_spin_always(void)
313 start
= mach_absolute_time();
315 lck_mtx_lock_spin_always(&test_mtx
);
317 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_SPIN_ALWAYS_STATS
);
321 lck_mtx_test_try_lock_spin(void)
325 start
= mach_absolute_time();
327 lck_mtx_try_lock_spin(&test_mtx
);
329 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_STATS
);
333 lck_mtx_test_try_lock_spin_always(void)
337 start
= mach_absolute_time();
339 lck_mtx_try_lock_spin_always(&test_mtx
);
341 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
);
345 lck_mtx_test_unlock(void)
349 start
= mach_absolute_time();
351 lck_mtx_unlock(&test_mtx
);
353 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS
);
357 lck_mtx_test_unlock_mtx(void)
361 start
= mach_absolute_time();
363 lck_mtx_unlock(&test_mtx
);
365 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS
);
369 lck_mtx_test_unlock_spin(void)
373 start
= mach_absolute_time();
375 lck_mtx_unlock(&test_mtx
);
377 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS
);
380 #define WARMUP_ITER 1000
383 lck_mtx_test_mtx_uncontended_loop_time(
384 int iter
, char *buffer
, int size
)
387 uint64_t tot_time
[TEST_MTX_MAX_STATS
];
388 uint64_t run_time
[TEST_MTX_MAX_STATS
];
392 //warming up the test
393 for (i
= 0; i
< WARMUP_ITER
; i
++) {
394 lck_mtx_lock(&test_mtx
);
395 lck_mtx_unlock(&test_mtx
);
398 start_run
= thread_get_runtime_self();
399 start
= mach_absolute_time();
401 for (i
= 0; i
< iter
; i
++) {
402 lck_mtx_lock(&test_mtx
);
403 lck_mtx_unlock(&test_mtx
);
406 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_STATS
]);
407 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_STATS
]);
409 //warming up the test
410 for (i
= 0; i
< WARMUP_ITER
; i
++) {
411 lck_mtx_try_lock(&test_mtx
);
412 lck_mtx_unlock(&test_mtx
);
415 start_run
= thread_get_runtime_self();
416 start
= mach_absolute_time();
418 for (i
= 0; i
< iter
; i
++) {
419 lck_mtx_try_lock(&test_mtx
);
420 lck_mtx_unlock(&test_mtx
);
423 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_STATS
]);
424 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_STATS
]);
426 //warming up the test
427 for (i
= 0; i
< WARMUP_ITER
; i
++) {
428 lck_mtx_lock_spin(&test_mtx
);
429 lck_mtx_unlock(&test_mtx
);
432 start_run
= thread_get_runtime_self();
433 start
= mach_absolute_time();
435 for (i
= 0; i
< iter
; i
++) {
436 lck_mtx_lock_spin(&test_mtx
);
437 lck_mtx_unlock(&test_mtx
);
440 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_SPIN_STATS
]);
441 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_SPIN_STATS
]);
443 //warming up the test
444 for (i
= 0; i
< WARMUP_ITER
; i
++) {
445 lck_mtx_lock_spin_always(&test_mtx
);
446 lck_mtx_unlock(&test_mtx
);
449 start_run
= thread_get_runtime_self();
450 start
= mach_absolute_time();
452 for (i
= 0; i
< iter
; i
++) {
453 lck_mtx_lock_spin_always(&test_mtx
);
454 lck_mtx_unlock(&test_mtx
);
457 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_SPIN_ALWAYS_STATS
]);
458 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_SPIN_ALWAYS_STATS
]);
460 //warming up the test
461 for (i
= 0; i
< WARMUP_ITER
; i
++) {
462 lck_mtx_try_lock_spin(&test_mtx
);
463 lck_mtx_unlock(&test_mtx
);
466 start_run
= thread_get_runtime_self();
467 start
= mach_absolute_time();
469 for (i
= 0; i
< iter
; i
++) {
470 lck_mtx_try_lock_spin(&test_mtx
);
471 lck_mtx_unlock(&test_mtx
);
474 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_SPIN_STATS
]);
475 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_SPIN_STATS
]);
477 //warming up the test
478 for (i
= 0; i
< WARMUP_ITER
; i
++) {
479 lck_mtx_try_lock_spin_always(&test_mtx
);
480 lck_mtx_unlock(&test_mtx
);
483 start_run
= thread_get_runtime_self();
484 start
= mach_absolute_time();
486 for (i
= 0; i
< iter
; i
++) {
487 lck_mtx_try_lock_spin_always(&test_mtx
);
488 lck_mtx_unlock(&test_mtx
);
491 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
]);
492 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
]);
497 ret
= snprintf(&buffer
[string_off
], size
, "\n");
501 for (i
= 0; i
< TEST_MTX_MAX_STATS
- 2; i
++) {
502 ret
= snprintf(&buffer
[string_off
], size
, "total time %llu ns total run time %llu ns ", tot_time
[i
], run_time
[i
]);
506 ret
= print_test_mtx_stats_string_name(i
, &buffer
[string_off
], size
);
510 ret
= snprintf(&buffer
[string_off
], size
, "\n");
519 lck_mtx_test_mtx_lock_uncontended(
524 disable_all_test_mtx_stats();
526 //warming up the test for lock
527 for (i
= 0; i
< WARMUP_ITER
; i
++) {
529 lck_mtx_test_unlock_mtx();
532 enable_all_test_mtx_stats();
534 for (i
= 0; i
< iter
; i
++) {
536 lck_mtx_test_unlock_mtx();
539 disable_all_test_mtx_stats();
541 //warming up the test for try_lock
542 for (i
= 0; i
< WARMUP_ITER
; i
++) {
543 lck_mtx_test_try_lock();
544 lck_mtx_test_unlock_mtx();
547 enable_all_test_mtx_stats();
549 for (i
= 0; i
< iter
; i
++) {
550 lck_mtx_test_try_lock();
551 lck_mtx_test_unlock_mtx();
558 lck_mtx_test_mtx_spin_uncontended(
563 disable_all_test_mtx_stats();
565 //warming up the test for lock_spin
566 for (i
= 0; i
< WARMUP_ITER
; i
++) {
567 lck_mtx_test_lock_spin();
568 lck_mtx_test_unlock_spin();
571 enable_all_test_mtx_stats();
573 for (i
= 0; i
< iter
; i
++) {
574 lck_mtx_test_lock_spin();
575 lck_mtx_test_unlock_spin();
578 disable_all_test_mtx_stats();
580 //warming up the test for try_lock_spin
581 for (i
= 0; i
< WARMUP_ITER
; i
++) {
582 lck_mtx_test_try_lock_spin();
583 lck_mtx_test_unlock_spin();
586 enable_all_test_mtx_stats();
588 for (i
= 0; i
< iter
; i
++) {
589 lck_mtx_test_try_lock_spin();
590 lck_mtx_test_unlock_spin();
593 disable_all_test_mtx_stats();
595 //warming up the test for lock_spin_always
596 for (i
= 0; i
< WARMUP_ITER
; i
++) {
597 lck_mtx_test_lock_spin_always();
598 lck_mtx_test_unlock_spin();
601 enable_all_test_mtx_stats();
603 for (i
= 0; i
< iter
; i
++) {
604 lck_mtx_test_lock_spin_always();
605 lck_mtx_test_unlock_spin();
608 disable_all_test_mtx_stats();
610 //warming up the test for try_lock_spin_always
611 for (i
= 0; i
< WARMUP_ITER
; i
++) {
612 lck_mtx_test_try_lock_spin_always();
613 lck_mtx_test_unlock_spin();
616 enable_all_test_mtx_stats();
618 for (i
= 0; i
< iter
; i
++) {
619 lck_mtx_test_try_lock_spin_always();
620 lck_mtx_test_unlock_spin();
627 lck_mtx_test_mtx_uncontended(
632 erase_all_test_mtx_stats();
633 lck_mtx_test_mtx_lock_uncontended(iter
);
634 lck_mtx_test_mtx_spin_uncontended(iter
);
636 return get_test_mtx_stats_string(buffer
, size
);
640 static int wait_barrier
;
641 static int iterations
;
642 static uint64_t start_loop_time
;
643 static uint64_t start_loop_time_run
;
644 static uint64_t end_loop_time
;
645 static uint64_t end_loop_time_run
;
647 struct lck_mtx_thread_arg
{
650 thread_t other_thread
;
654 test_mtx_lock_unlock_contended_thread(
656 __unused wait_result_t wr
)
659 struct lck_mtx_thread_arg
*info
= (struct lck_mtx_thread_arg
*) arg
;
660 thread_t other_thread
;
664 printf("Starting thread %p\n", current_thread());
666 while (os_atomic_load(&info
->other_thread
, acquire
) == NULL
) {
669 other_thread
= info
->other_thread
;
671 printf("Other thread %p\n", other_thread
);
673 my_locked
= &info
->my_locked
;
674 other_locked
= info
->other_locked
;
677 val
= os_atomic_inc(&synch
, relaxed
);
678 while (os_atomic_load(&synch
, relaxed
) < 2) {
682 //warming up the test
683 for (i
= 0; i
< WARMUP_ITER
; i
++) {
686 os_atomic_xchg(my_locked
, 1, relaxed
);
687 if (i
!= WARMUP_ITER
- 1) {
688 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
691 os_atomic_xchg(my_locked
, 0, relaxed
);
694 lck_mtx_test_unlock();
696 if (i
!= WARMUP_ITER
- 1) {
697 while (os_atomic_load(other_locked
, relaxed
) == 0) {
703 printf("warmup done %p\n", current_thread());
704 os_atomic_inc(&synch
, relaxed
);
705 while (os_atomic_load(&synch
, relaxed
) < 4) {
711 erase_all_test_mtx_stats();
716 * synch the threads so they start
719 os_atomic_inc(&synch
, relaxed
);
720 while (os_atomic_load(&synch
, relaxed
) < 6) {
724 for (i
= 0; i
< iterations
; i
++) {
727 os_atomic_xchg(my_locked
, 1, relaxed
);
728 if (i
!= iterations
- 1) {
729 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
732 os_atomic_xchg(my_locked
, 0, relaxed
);
734 lck_mtx_test_unlock_mtx();
736 if (i
!= iterations
- 1) {
737 while (os_atomic_load(other_locked
, relaxed
) == 0) {
743 os_atomic_inc(&wait_barrier
, relaxed
);
744 thread_wakeup((event_t
) &wait_barrier
);
745 thread_terminate_self();
750 lck_mtx_test_mtx_contended(
755 thread_t thread1
, thread2
;
756 kern_return_t result
;
757 struct lck_mtx_thread_arg targs
[2] = {};
762 erase_all_test_mtx_stats();
764 targs
[0].other_thread
= NULL
;
765 targs
[1].other_thread
= NULL
;
767 result
= kernel_thread_start((thread_continue_t
)test_mtx_lock_unlock_contended_thread
, &targs
[0], &thread1
);
768 if (result
!= KERN_SUCCESS
) {
772 result
= kernel_thread_start((thread_continue_t
)test_mtx_lock_unlock_contended_thread
, &targs
[1], &thread2
);
773 if (result
!= KERN_SUCCESS
) {
774 thread_deallocate(thread1
);
778 /* this are t1 args */
779 targs
[0].my_locked
= 0;
780 targs
[0].other_locked
= &targs
[1].my_locked
;
782 os_atomic_xchg(&targs
[0].other_thread
, thread2
, release
);
784 /* this are t2 args */
785 targs
[1].my_locked
= 0;
786 targs
[1].other_locked
= &targs
[0].my_locked
;
788 os_atomic_xchg(&targs
[1].other_thread
, thread1
, release
);
790 while (os_atomic_load(&wait_barrier
, relaxed
) != 2) {
791 assert_wait((event_t
) &wait_barrier
, THREAD_UNINT
);
792 if (os_atomic_load(&wait_barrier
, relaxed
) != 2) {
793 (void) thread_block(THREAD_CONTINUE_NULL
);
795 clear_wait(current_thread(), THREAD_AWAKENED
);
799 thread_deallocate(thread1
);
800 thread_deallocate(thread2
);
802 return get_test_mtx_stats_string(buffer
, buffer_size
);
806 test_mtx_lck_unlock_contended_loop_time_thread(
808 __unused wait_result_t wr
)
811 struct lck_mtx_thread_arg
*info
= (struct lck_mtx_thread_arg
*) arg
;
812 thread_t other_thread
;
816 printf("Starting thread %p\n", current_thread());
818 while (os_atomic_load(&info
->other_thread
, acquire
) == NULL
) {
821 other_thread
= info
->other_thread
;
823 printf("Other thread %p\n", other_thread
);
825 my_locked
= &info
->my_locked
;
826 other_locked
= info
->other_locked
;
829 val
= os_atomic_inc(&synch
, relaxed
);
830 while (os_atomic_load(&synch
, relaxed
) < 2) {
834 //warming up the test
835 for (i
= 0; i
< WARMUP_ITER
; i
++) {
836 lck_mtx_lock(&test_mtx
);
838 os_atomic_xchg(my_locked
, 1, relaxed
);
839 if (i
!= WARMUP_ITER
- 1) {
840 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
843 os_atomic_xchg(my_locked
, 0, relaxed
);
846 lck_mtx_unlock(&test_mtx
);
848 if (i
!= WARMUP_ITER
- 1) {
849 while (os_atomic_load(other_locked
, relaxed
) == 0) {
855 printf("warmup done %p\n", current_thread());
857 os_atomic_inc(&synch
, relaxed
);
858 while (os_atomic_load(&synch
, relaxed
) < 4) {
865 * synch the threads so they start
868 os_atomic_inc(&synch
, relaxed
);
869 while (os_atomic_load(&synch
, relaxed
) < 6) {
874 start_loop_time_run
= thread_get_runtime_self();
875 start_loop_time
= mach_absolute_time();
878 for (i
= 0; i
< iterations
; i
++) {
879 lck_mtx_lock(&test_mtx
);
881 os_atomic_xchg(my_locked
, 1, relaxed
);
882 if (i
!= iterations
- 1) {
883 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
886 os_atomic_xchg(my_locked
, 0, relaxed
);
889 lck_mtx_unlock(&test_mtx
);
891 if (i
!= iterations
- 1) {
892 while (os_atomic_load(other_locked
, relaxed
) == 0) {
899 end_loop_time
= mach_absolute_time();
900 end_loop_time_run
= thread_get_runtime_self();
903 os_atomic_inc(&wait_barrier
, relaxed
);
904 thread_wakeup((event_t
) &wait_barrier
);
905 thread_terminate_self();
910 lck_mtx_test_mtx_contended_loop_time(
915 thread_t thread1
, thread2
;
916 kern_return_t result
;
918 struct lck_mtx_thread_arg targs
[2] = {};
922 uint64_t time
, time_run
;
924 targs
[0].other_thread
= NULL
;
925 targs
[1].other_thread
= NULL
;
927 result
= kernel_thread_start((thread_continue_t
)test_mtx_lck_unlock_contended_loop_time_thread
, &targs
[0], &thread1
);
928 if (result
!= KERN_SUCCESS
) {
932 result
= kernel_thread_start((thread_continue_t
)test_mtx_lck_unlock_contended_loop_time_thread
, &targs
[1], &thread2
);
933 if (result
!= KERN_SUCCESS
) {
934 thread_deallocate(thread1
);
938 /* this are t1 args */
939 targs
[0].my_locked
= 0;
940 targs
[0].other_locked
= &targs
[1].my_locked
;
942 os_atomic_xchg(&targs
[0].other_thread
, thread2
, release
);
944 /* this are t2 args */
945 targs
[1].my_locked
= 0;
946 targs
[1].other_locked
= &targs
[0].my_locked
;
948 os_atomic_xchg(&targs
[1].other_thread
, thread1
, release
);
950 while (os_atomic_load(&wait_barrier
, acquire
) != 2) {
951 assert_wait((event_t
) &wait_barrier
, THREAD_UNINT
);
952 if (os_atomic_load(&wait_barrier
, acquire
) != 2) {
953 (void) thread_block(THREAD_CONTINUE_NULL
);
955 clear_wait(current_thread(), THREAD_AWAKENED
);
959 thread_deallocate(thread1
);
960 thread_deallocate(thread2
);
962 absolutetime_to_nanoseconds(end_loop_time
- start_loop_time
, &time
);
963 absolutetime_to_nanoseconds(end_loop_time_run
- start_loop_time_run
, &time_run
);
965 ret
= snprintf(buffer
, buffer_size
, "\n");
966 ret
+= snprintf(&buffer
[ret
], buffer_size
- ret
, "total time %llu ns total run time %llu ns ", time
, time_run
);
967 ret
+= print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS
, &buffer
[ret
], buffer_size
- ret
);
968 ret
+= snprintf(&buffer
[ret
], buffer_size
- ret
, "\n");