1 #include <mach_ldebug.h>
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
8 #include <kern/locks.h>
9 #include <kern/misc_protos.h>
10 #include <kern/kalloc.h>
11 #include <kern/thread.h>
12 #include <kern/processor.h>
13 #include <kern/sched_prim.h>
14 #include <kern/debug.h>
15 #include <libkern/section_keywords.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_cpu.h>
18 #include <machine/atomic.h>
20 #include <kern/kalloc.h>
22 #include <sys/kdebug.h>
24 static lck_mtx_t test_mtx
;
25 static lck_grp_t test_mtx_grp
;
26 static lck_grp_attr_t test_mtx_grp_attr
;
27 static lck_attr_t test_mtx_attr
;
29 static lck_grp_t test_mtx_stats_grp
;
30 static lck_grp_attr_t test_mtx_stats_grp_attr
;
31 static lck_attr_t test_mtx_stats_attr
;
33 struct lck_mtx_test_stats_elem
{
42 #define TEST_MTX_LOCK_STATS 0
43 #define TEST_MTX_TRY_LOCK_STATS 1
44 #define TEST_MTX_LOCK_SPIN_STATS 2
45 #define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3
46 #define TEST_MTX_TRY_LOCK_SPIN_STATS 4
47 #define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5
48 #define TEST_MTX_UNLOCK_MTX_STATS 6
49 #define TEST_MTX_UNLOCK_SPIN_STATS 7
50 #define TEST_MTX_MAX_STATS 8
52 struct lck_mtx_test_stats_elem lck_mtx_test_stats
[TEST_MTX_MAX_STATS
];
53 atomic_bool enabled
= TRUE
;
56 init_test_mtx_stats(void)
60 lck_grp_attr_setdefault(&test_mtx_stats_grp_attr
);
61 lck_grp_init(&test_mtx_stats_grp
, "testlck_stats_mtx", &test_mtx_stats_grp_attr
);
62 lck_attr_setdefault(&test_mtx_stats_attr
);
64 atomic_store(&enabled
, TRUE
);
65 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
66 memset(&lck_mtx_test_stats
[i
], 0, sizeof(struct lck_mtx_test_stats_elem
));
67 lck_mtx_test_stats
[i
].min
= ~0;
68 lck_spin_init(&lck_mtx_test_stats
[i
].lock
, &test_mtx_stats_grp
, &test_mtx_stats_attr
);
73 update_test_mtx_stats(
78 if (atomic_load(&enabled
) == TRUE
) {
79 assert(type
< TEST_MTX_MAX_STATS
);
82 uint64_t elapsed
= end
- start
;
83 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[type
];
85 lck_spin_lock(&stat
->lock
);
89 stat
->avg
= stat
->tot
/ stat
->samples
;
90 if (stat
->max
< elapsed
) {
93 if (stat
->min
> elapsed
) {
96 lck_spin_unlock(&stat
->lock
);
101 erase_test_mtx_stats(
104 assert(type
< TEST_MTX_MAX_STATS
);
105 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[type
];
107 lck_spin_lock(&stat
->lock
);
115 lck_spin_unlock(&stat
->lock
);
119 erase_all_test_mtx_stats(void)
122 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
123 erase_test_mtx_stats(i
);
128 disable_all_test_mtx_stats(void)
130 atomic_store(&enabled
, FALSE
);
134 enable_all_test_mtx_stats(void)
136 atomic_store(&enabled
, TRUE
);
140 print_test_mtx_stats_string_name(
147 case TEST_MTX_LOCK_STATS
:
148 type
= "TEST_MTX_LOCK_STATS";
150 case TEST_MTX_TRY_LOCK_STATS
:
151 type
= "TEST_MTX_TRY_LOCK_STATS";
153 case TEST_MTX_LOCK_SPIN_STATS
:
154 type
= "TEST_MTX_LOCK_SPIN_STATS";
156 case TEST_MTX_LOCK_SPIN_ALWAYS_STATS
:
157 type
= "TEST_MTX_LOCK_SPIN_ALWAYS_STATS";
159 case TEST_MTX_TRY_LOCK_SPIN_STATS
:
160 type
= "TEST_MTX_TRY_LOCK_SPIN_STATS";
162 case TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
:
163 type
= "TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS";
165 case TEST_MTX_UNLOCK_MTX_STATS
:
166 type
= "TEST_MTX_UNLOCK_MTX_STATS";
168 case TEST_MTX_UNLOCK_SPIN_STATS
:
169 type
= "TEST_MTX_UNLOCK_SPIN_STATS";
175 return scnprintf(buffer
, size
, "%s ", type
);
179 get_test_mtx_stats_string(
186 ret
= scnprintf(&buffer
[string_off
], size
, "\n");
191 for (i
= 0; i
< TEST_MTX_MAX_STATS
; i
++) {
192 struct lck_mtx_test_stats_elem
* stat
= &lck_mtx_test_stats
[i
];
194 ret
= scnprintf(&buffer
[string_off
], size
, "{ ");
198 lck_spin_lock(&stat
->lock
);
201 ret
= scnprintf(&buffer
[string_off
], size
, "samples %llu, ", stat
->samples
);
205 absolutetime_to_nanoseconds(stat
->tot
, &time
);
206 ret
= scnprintf(&buffer
[string_off
], size
, "tot %llu ns, ", time
);
210 absolutetime_to_nanoseconds(stat
->avg
, &time
);
211 ret
= scnprintf(&buffer
[string_off
], size
, "avg %llu ns, ", time
);
215 absolutetime_to_nanoseconds(stat
->max
, &time
);
216 ret
= scnprintf(&buffer
[string_off
], size
, "max %llu ns, ", time
);
220 absolutetime_to_nanoseconds(stat
->min
, &time
);
221 ret
= scnprintf(&buffer
[string_off
], size
, "min %llu ns", time
);
225 lck_spin_unlock(&stat
->lock
);
227 ret
= scnprintf(&buffer
[string_off
], size
, " } ");
231 ret
= print_test_mtx_stats_string_name(i
, &buffer
[string_off
], size
);
235 ret
= scnprintf(&buffer
[string_off
], size
, "\n");
244 lck_mtx_test_init(void)
246 static int first
= 0;
249 * This should be substituted with a version
250 * of dispatch_once for kernel (rdar:39537874)
252 if (os_atomic_load(&first
, acquire
) >= 2) {
256 if (os_atomic_cmpxchg(&first
, 0, 1, relaxed
)) {
257 lck_grp_attr_setdefault(&test_mtx_grp_attr
);
258 lck_grp_init(&test_mtx_grp
, "testlck_mtx", &test_mtx_grp_attr
);
259 lck_attr_setdefault(&test_mtx_attr
);
260 lck_mtx_init(&test_mtx
, &test_mtx_grp
, &test_mtx_attr
);
262 init_test_mtx_stats();
264 os_atomic_inc(&first
, release
);
267 while (os_atomic_load(&first
, acquire
) < 2) {
273 lck_mtx_test_lock(void)
277 start
= mach_absolute_time();
279 lck_mtx_lock(&test_mtx
);
281 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_STATS
);
285 lck_mtx_test_try_lock(void)
289 start
= mach_absolute_time();
291 lck_mtx_try_lock(&test_mtx
);
293 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_STATS
);
297 lck_mtx_test_lock_spin(void)
301 start
= mach_absolute_time();
303 lck_mtx_lock_spin(&test_mtx
);
305 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_SPIN_STATS
);
309 lck_mtx_test_lock_spin_always(void)
313 start
= mach_absolute_time();
315 lck_mtx_lock_spin_always(&test_mtx
);
317 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_LOCK_SPIN_ALWAYS_STATS
);
321 lck_mtx_test_try_lock_spin(void)
325 start
= mach_absolute_time();
327 lck_mtx_try_lock_spin(&test_mtx
);
329 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_STATS
);
333 lck_mtx_test_try_lock_spin_always(void)
337 start
= mach_absolute_time();
339 lck_mtx_try_lock_spin_always(&test_mtx
);
341 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
);
345 lck_mtx_test_unlock(void)
349 start
= mach_absolute_time();
351 lck_mtx_unlock(&test_mtx
);
353 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS
);
357 lck_mtx_test_unlock_mtx(void)
361 start
= mach_absolute_time();
363 lck_mtx_unlock(&test_mtx
);
365 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS
);
369 lck_mtx_test_unlock_spin(void)
373 start
= mach_absolute_time();
375 lck_mtx_unlock(&test_mtx
);
377 update_test_mtx_stats(start
, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS
);
380 #define WARMUP_ITER 1000
383 lck_mtx_test_mtx_uncontended_loop_time(
384 int iter
, char *buffer
, int size
)
387 uint64_t tot_time
[TEST_MTX_MAX_STATS
];
388 uint64_t run_time
[TEST_MTX_MAX_STATS
];
392 //warming up the test
393 for (i
= 0; i
< WARMUP_ITER
; i
++) {
394 lck_mtx_lock(&test_mtx
);
395 lck_mtx_unlock(&test_mtx
);
398 start_run
= thread_get_runtime_self();
399 start
= mach_absolute_time();
401 for (i
= 0; i
< iter
; i
++) {
402 lck_mtx_lock(&test_mtx
);
403 lck_mtx_unlock(&test_mtx
);
406 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_STATS
]);
407 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_STATS
]);
409 //warming up the test
410 for (i
= 0; i
< WARMUP_ITER
; i
++) {
411 lck_mtx_try_lock(&test_mtx
);
412 lck_mtx_unlock(&test_mtx
);
415 start_run
= thread_get_runtime_self();
416 start
= mach_absolute_time();
418 for (i
= 0; i
< iter
; i
++) {
419 lck_mtx_try_lock(&test_mtx
);
420 lck_mtx_unlock(&test_mtx
);
423 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_STATS
]);
424 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_STATS
]);
426 //warming up the test
427 for (i
= 0; i
< WARMUP_ITER
; i
++) {
428 lck_mtx_lock_spin(&test_mtx
);
429 lck_mtx_unlock(&test_mtx
);
432 start_run
= thread_get_runtime_self();
433 start
= mach_absolute_time();
435 for (i
= 0; i
< iter
; i
++) {
436 lck_mtx_lock_spin(&test_mtx
);
437 lck_mtx_unlock(&test_mtx
);
440 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_SPIN_STATS
]);
441 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_SPIN_STATS
]);
443 //warming up the test
444 for (i
= 0; i
< WARMUP_ITER
; i
++) {
445 lck_mtx_lock_spin_always(&test_mtx
);
446 lck_mtx_unlock(&test_mtx
);
449 start_run
= thread_get_runtime_self();
450 start
= mach_absolute_time();
452 for (i
= 0; i
< iter
; i
++) {
453 lck_mtx_lock_spin_always(&test_mtx
);
454 lck_mtx_unlock(&test_mtx
);
457 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_LOCK_SPIN_ALWAYS_STATS
]);
458 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_LOCK_SPIN_ALWAYS_STATS
]);
460 //warming up the test
461 for (i
= 0; i
< WARMUP_ITER
; i
++) {
462 lck_mtx_try_lock_spin(&test_mtx
);
463 lck_mtx_unlock(&test_mtx
);
466 start_run
= thread_get_runtime_self();
467 start
= mach_absolute_time();
469 for (i
= 0; i
< iter
; i
++) {
470 lck_mtx_try_lock_spin(&test_mtx
);
471 lck_mtx_unlock(&test_mtx
);
474 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_SPIN_STATS
]);
475 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_SPIN_STATS
]);
477 //warming up the test
478 for (i
= 0; i
< WARMUP_ITER
; i
++) {
479 lck_mtx_try_lock_spin_always(&test_mtx
);
480 lck_mtx_unlock(&test_mtx
);
483 start_run
= thread_get_runtime_self();
484 start
= mach_absolute_time();
486 for (i
= 0; i
< iter
; i
++) {
487 lck_mtx_try_lock_spin_always(&test_mtx
);
488 lck_mtx_unlock(&test_mtx
);
491 absolutetime_to_nanoseconds(mach_absolute_time() - start
, &tot_time
[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
]);
492 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run
, &run_time
[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS
]);
497 ret
= scnprintf(&buffer
[string_off
], size
, "\n");
501 for (i
= 0; i
< TEST_MTX_MAX_STATS
- 2; i
++) {
502 ret
= scnprintf(&buffer
[string_off
], size
, "total time %llu ns total run time %llu ns ", tot_time
[i
], run_time
[i
]);
506 ret
= print_test_mtx_stats_string_name(i
, &buffer
[string_off
], size
);
510 ret
= scnprintf(&buffer
[string_off
], size
, "\n");
519 lck_mtx_test_mtx_lock_uncontended(
524 disable_all_test_mtx_stats();
526 //warming up the test for lock
527 for (i
= 0; i
< WARMUP_ITER
; i
++) {
529 lck_mtx_test_unlock_mtx();
532 enable_all_test_mtx_stats();
534 for (i
= 0; i
< iter
; i
++) {
536 lck_mtx_test_unlock_mtx();
539 disable_all_test_mtx_stats();
541 //warming up the test for try_lock
542 for (i
= 0; i
< WARMUP_ITER
; i
++) {
543 lck_mtx_test_try_lock();
544 lck_mtx_test_unlock_mtx();
547 enable_all_test_mtx_stats();
549 for (i
= 0; i
< iter
; i
++) {
550 lck_mtx_test_try_lock();
551 lck_mtx_test_unlock_mtx();
558 lck_mtx_test_mtx_spin_uncontended(
563 disable_all_test_mtx_stats();
565 //warming up the test for lock_spin
566 for (i
= 0; i
< WARMUP_ITER
; i
++) {
567 lck_mtx_test_lock_spin();
568 lck_mtx_test_unlock_spin();
571 enable_all_test_mtx_stats();
573 for (i
= 0; i
< iter
; i
++) {
574 lck_mtx_test_lock_spin();
575 lck_mtx_test_unlock_spin();
578 disable_all_test_mtx_stats();
580 //warming up the test for try_lock_spin
581 for (i
= 0; i
< WARMUP_ITER
; i
++) {
582 lck_mtx_test_try_lock_spin();
583 lck_mtx_test_unlock_spin();
586 enable_all_test_mtx_stats();
588 for (i
= 0; i
< iter
; i
++) {
589 lck_mtx_test_try_lock_spin();
590 lck_mtx_test_unlock_spin();
593 disable_all_test_mtx_stats();
595 //warming up the test for lock_spin_always
596 for (i
= 0; i
< WARMUP_ITER
; i
++) {
597 lck_mtx_test_lock_spin_always();
598 lck_mtx_test_unlock_spin();
601 enable_all_test_mtx_stats();
603 for (i
= 0; i
< iter
; i
++) {
604 lck_mtx_test_lock_spin_always();
605 lck_mtx_test_unlock_spin();
608 disable_all_test_mtx_stats();
610 //warming up the test for try_lock_spin_always
611 for (i
= 0; i
< WARMUP_ITER
; i
++) {
612 lck_mtx_test_try_lock_spin_always();
613 lck_mtx_test_unlock_spin();
616 enable_all_test_mtx_stats();
618 for (i
= 0; i
< iter
; i
++) {
619 lck_mtx_test_try_lock_spin_always();
620 lck_mtx_test_unlock_spin();
627 lck_mtx_test_mtx_uncontended(
632 erase_all_test_mtx_stats();
633 lck_mtx_test_mtx_lock_uncontended(iter
);
634 lck_mtx_test_mtx_spin_uncontended(iter
);
636 return get_test_mtx_stats_string(buffer
, size
);
640 static int wait_barrier
;
641 static int iterations
;
642 static uint64_t start_loop_time
;
643 static uint64_t start_loop_time_run
;
644 static uint64_t end_loop_time
;
645 static uint64_t end_loop_time_run
;
647 struct lck_mtx_thread_arg
{
650 thread_t other_thread
;
655 test_mtx_lock_unlock_contended_thread(
657 __unused wait_result_t wr
)
660 struct lck_mtx_thread_arg
*info
= (struct lck_mtx_thread_arg
*) arg
;
661 thread_t other_thread
;
665 uint64_t start
, stop
;
667 printf("Starting thread %p\n", current_thread());
669 while (os_atomic_load(&info
->other_thread
, acquire
) == NULL
) {
672 other_thread
= info
->other_thread
;
674 printf("Other thread %p\n", other_thread
);
676 my_locked
= &info
->my_locked
;
677 other_locked
= info
->other_locked
;
681 val
= os_atomic_inc(&synch
, relaxed
);
682 while (os_atomic_load(&synch
, relaxed
) < 2) {
686 //warming up the test
687 for (i
= 0; i
< WARMUP_ITER
; i
++) {
689 int prev
= os_atomic_load(other_locked
, relaxed
);
690 os_atomic_add(my_locked
, 1, relaxed
);
691 if (i
!= WARMUP_ITER
- 1) {
692 if (type
== FULL_CONTENDED
) {
693 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
697 start
= mach_absolute_time();
698 stop
= start
+ (MutexSpin
/ 2);
699 while (mach_absolute_time() < stop
) {
705 lck_mtx_test_unlock();
707 if (i
!= WARMUP_ITER
- 1) {
708 while (os_atomic_load(other_locked
, relaxed
) == prev
) {
714 printf("warmup done %p\n", current_thread());
715 os_atomic_inc(&synch
, relaxed
);
716 while (os_atomic_load(&synch
, relaxed
) < 4) {
722 erase_all_test_mtx_stats();
727 * synch the threads so they start
730 os_atomic_inc(&synch
, relaxed
);
731 while (os_atomic_load(&synch
, relaxed
) < 6) {
735 for (i
= 0; i
< iterations
; i
++) {
737 int prev
= os_atomic_load(other_locked
, relaxed
);
738 os_atomic_add(my_locked
, 1, relaxed
);
739 if (i
!= iterations
- 1) {
740 if (type
== FULL_CONTENDED
) {
741 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
745 start
= mach_absolute_time();
746 stop
= start
+ (MutexSpin
/ 2);
747 while (mach_absolute_time() < stop
) {
752 lck_mtx_test_unlock_mtx();
754 if (i
!= iterations
- 1) {
755 while (os_atomic_load(other_locked
, relaxed
) == prev
) {
761 os_atomic_inc(&wait_barrier
, relaxed
);
762 thread_wakeup((event_t
) &wait_barrier
);
763 thread_terminate_self();
768 lck_mtx_test_mtx_contended(
774 thread_t thread1
, thread2
;
775 kern_return_t result
;
776 struct lck_mtx_thread_arg targs
[2] = {};
781 if (type
< 0 || type
> MAX_CONDENDED
) {
782 printf("%s invalid type %d\n", __func__
, type
);
786 erase_all_test_mtx_stats();
788 targs
[0].other_thread
= NULL
;
789 targs
[1].other_thread
= NULL
;
790 targs
[0].type
= type
;
791 targs
[1].type
= type
;
793 result
= kernel_thread_start((thread_continue_t
)test_mtx_lock_unlock_contended_thread
, &targs
[0], &thread1
);
794 if (result
!= KERN_SUCCESS
) {
798 result
= kernel_thread_start((thread_continue_t
)test_mtx_lock_unlock_contended_thread
, &targs
[1], &thread2
);
799 if (result
!= KERN_SUCCESS
) {
800 thread_deallocate(thread1
);
804 /* this are t1 args */
805 targs
[0].my_locked
= 0;
806 targs
[0].other_locked
= &targs
[1].my_locked
;
808 os_atomic_xchg(&targs
[0].other_thread
, thread2
, release
);
810 /* this are t2 args */
811 targs
[1].my_locked
= 0;
812 targs
[1].other_locked
= &targs
[0].my_locked
;
814 os_atomic_xchg(&targs
[1].other_thread
, thread1
, release
);
816 while (os_atomic_load(&wait_barrier
, relaxed
) != 2) {
817 assert_wait((event_t
) &wait_barrier
, THREAD_UNINT
);
818 if (os_atomic_load(&wait_barrier
, relaxed
) != 2) {
819 (void) thread_block(THREAD_CONTINUE_NULL
);
821 clear_wait(current_thread(), THREAD_AWAKENED
);
825 thread_deallocate(thread1
);
826 thread_deallocate(thread2
);
828 return get_test_mtx_stats_string(buffer
, buffer_size
);
832 test_mtx_lck_unlock_contended_loop_time_thread(
834 __unused wait_result_t wr
)
837 struct lck_mtx_thread_arg
*info
= (struct lck_mtx_thread_arg
*) arg
;
838 thread_t other_thread
;
842 uint64_t start
, stop
;
844 printf("Starting thread %p\n", current_thread());
846 while (os_atomic_load(&info
->other_thread
, acquire
) == NULL
) {
849 other_thread
= info
->other_thread
;
851 printf("Other thread %p\n", other_thread
);
853 my_locked
= &info
->my_locked
;
854 other_locked
= info
->other_locked
;
858 val
= os_atomic_inc(&synch
, relaxed
);
859 while (os_atomic_load(&synch
, relaxed
) < 2) {
863 //warming up the test
864 for (i
= 0; i
< WARMUP_ITER
; i
++) {
865 lck_mtx_lock(&test_mtx
);
867 int prev
= os_atomic_load(other_locked
, relaxed
);
868 os_atomic_add(my_locked
, 1, relaxed
);
869 if (i
!= WARMUP_ITER
- 1) {
870 if (type
== FULL_CONTENDED
) {
871 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
875 start
= mach_absolute_time();
876 stop
= start
+ (MutexSpin
/ 2);
877 while (mach_absolute_time() < stop
) {
883 lck_mtx_unlock(&test_mtx
);
885 if (i
!= WARMUP_ITER
- 1) {
886 while (os_atomic_load(other_locked
, relaxed
) == prev
) {
892 printf("warmup done %p\n", current_thread());
894 os_atomic_inc(&synch
, relaxed
);
895 while (os_atomic_load(&synch
, relaxed
) < 4) {
902 * synch the threads so they start
905 os_atomic_inc(&synch
, relaxed
);
906 while (os_atomic_load(&synch
, relaxed
) < 6) {
911 start_loop_time_run
= thread_get_runtime_self();
912 start_loop_time
= mach_absolute_time();
915 for (i
= 0; i
< iterations
; i
++) {
916 lck_mtx_lock(&test_mtx
);
918 int prev
= os_atomic_load(other_locked
, relaxed
);
919 os_atomic_add(my_locked
, 1, relaxed
);
920 if (i
!= iterations
- 1) {
921 if (type
== FULL_CONTENDED
) {
922 while (os_atomic_load(&other_thread
->state
, relaxed
) & TH_RUN
) {
926 start
= mach_absolute_time();
927 stop
= start
+ (MutexSpin
/ 2);
928 while (mach_absolute_time() < stop
) {
934 lck_mtx_unlock(&test_mtx
);
936 if (i
!= iterations
- 1) {
937 while (os_atomic_load(other_locked
, relaxed
) == prev
) {
944 end_loop_time
= mach_absolute_time();
945 end_loop_time_run
= thread_get_runtime_self();
948 os_atomic_inc(&wait_barrier
, relaxed
);
949 thread_wakeup((event_t
) &wait_barrier
);
950 thread_terminate_self();
955 lck_mtx_test_mtx_contended_loop_time(
961 thread_t thread1
, thread2
;
962 kern_return_t result
;
964 struct lck_mtx_thread_arg targs
[2] = {};
968 uint64_t time
, time_run
;
970 if (type
< 0 || type
> MAX_CONDENDED
) {
971 printf("%s invalid type %d\n", __func__
, type
);
975 targs
[0].other_thread
= NULL
;
976 targs
[1].other_thread
= NULL
;
978 result
= kernel_thread_start((thread_continue_t
)test_mtx_lck_unlock_contended_loop_time_thread
, &targs
[0], &thread1
);
979 if (result
!= KERN_SUCCESS
) {
983 result
= kernel_thread_start((thread_continue_t
)test_mtx_lck_unlock_contended_loop_time_thread
, &targs
[1], &thread2
);
984 if (result
!= KERN_SUCCESS
) {
985 thread_deallocate(thread1
);
989 /* this are t1 args */
990 targs
[0].my_locked
= 0;
991 targs
[0].other_locked
= &targs
[1].my_locked
;
992 targs
[0].type
= type
;
993 targs
[1].type
= type
;
995 os_atomic_xchg(&targs
[0].other_thread
, thread2
, release
);
997 /* this are t2 args */
998 targs
[1].my_locked
= 0;
999 targs
[1].other_locked
= &targs
[0].my_locked
;
1001 os_atomic_xchg(&targs
[1].other_thread
, thread1
, release
);
1003 while (os_atomic_load(&wait_barrier
, acquire
) != 2) {
1004 assert_wait((event_t
) &wait_barrier
, THREAD_UNINT
);
1005 if (os_atomic_load(&wait_barrier
, acquire
) != 2) {
1006 (void) thread_block(THREAD_CONTINUE_NULL
);
1008 clear_wait(current_thread(), THREAD_AWAKENED
);
1012 thread_deallocate(thread1
);
1013 thread_deallocate(thread2
);
1015 absolutetime_to_nanoseconds(end_loop_time
- start_loop_time
, &time
);
1016 absolutetime_to_nanoseconds(end_loop_time_run
- start_loop_time_run
, &time_run
);
1018 ret
= scnprintf(buffer
, buffer_size
, "\n");
1019 ret
+= scnprintf(&buffer
[ret
], buffer_size
- ret
, "total time %llu ns total run time %llu ns ", time
, time_run
);
1020 ret
+= print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS
, &buffer
[ret
], buffer_size
- ret
);
1021 ret
+= scnprintf(&buffer
[ret
], buffer_size
- ret
, "\n");