]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/test_lock.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / kern / test_lock.c
CommitLineData
d9a64523
A
1#include <mach_ldebug.h>
2#include <debug.h>
3
4#include <mach/kern_return.h>
5#include <mach/mach_host_server.h>
6#include <mach_debug/lockgroup_info.h>
7
8#include <kern/locks.h>
9#include <kern/misc_protos.h>
10#include <kern/kalloc.h>
11#include <kern/thread.h>
12#include <kern/processor.h>
13#include <kern/sched_prim.h>
14#include <kern/debug.h>
15#include <libkern/section_keywords.h>
16#include <machine/atomic.h>
17#include <machine/machine_cpu.h>
18#include <machine/atomic.h>
19#include <string.h>
20#include <kern/kalloc.h>
21
22#include <sys/kdebug.h>
23
24static lck_mtx_t test_mtx;
25static lck_grp_t test_mtx_grp;
26static lck_grp_attr_t test_mtx_grp_attr;
27static lck_attr_t test_mtx_attr;
28
29static lck_grp_t test_mtx_stats_grp;
30static lck_grp_attr_t test_mtx_stats_grp_attr;
31static lck_attr_t test_mtx_stats_attr;
32
33struct lck_mtx_test_stats_elem {
34 lck_spin_t lock;
35 uint64_t samples;
36 uint64_t avg;
37 uint64_t max;
38 uint64_t min;
39 uint64_t tot;
40};
41
42#define TEST_MTX_LOCK_STATS 0
43#define TEST_MTX_TRY_LOCK_STATS 1
44#define TEST_MTX_LOCK_SPIN_STATS 2
45#define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3
46#define TEST_MTX_TRY_LOCK_SPIN_STATS 4
47#define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5
48#define TEST_MTX_UNLOCK_MTX_STATS 6
49#define TEST_MTX_UNLOCK_SPIN_STATS 7
50#define TEST_MTX_MAX_STATS 8
51
52struct lck_mtx_test_stats_elem lck_mtx_test_stats[TEST_MTX_MAX_STATS];
53atomic_bool enabled = TRUE;
54
55static void
56init_test_mtx_stats(void)
57{
58 int i;
59
60 lck_grp_attr_setdefault(&test_mtx_stats_grp_attr);
61 lck_grp_init(&test_mtx_stats_grp, "testlck_stats_mtx", &test_mtx_stats_grp_attr);
62 lck_attr_setdefault(&test_mtx_stats_attr);
63
64 atomic_store(&enabled, TRUE);
65 for(i = 0; i < TEST_MTX_MAX_STATS; i++){
66 memset(&lck_mtx_test_stats[i], 0 , sizeof(struct lck_mtx_test_stats_elem));
67 lck_mtx_test_stats[i].min = ~0;
68 lck_spin_init(&lck_mtx_test_stats[i].lock, &test_mtx_stats_grp, &test_mtx_stats_attr);
69 }
70}
71
72static void
73update_test_mtx_stats(
74 uint64_t start,
75 uint64_t end,
76 uint type)
77{
78 if (atomic_load(&enabled) == TRUE) {
79 assert(type < TEST_MTX_MAX_STATS);
80 assert(start <= end);
81
82 uint64_t elapsed = end - start;
83 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[type];
84
85 lck_spin_lock(&stat->lock);
86
87 stat->samples++;
88 stat->tot += elapsed;
89 stat->avg = stat->tot / stat->samples;
90 if (stat->max < elapsed)
91 stat->max = elapsed;
92 if (stat->min > elapsed)
93 stat->min = elapsed;
94 lck_spin_unlock(&stat->lock);
95 }
96}
97
98static void
99erase_test_mtx_stats(
100 uint type)
101{
102 assert(type < TEST_MTX_MAX_STATS);
103 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[type];
104
105 lck_spin_lock(&stat->lock);
106
107 stat->samples = 0;
108 stat->tot = 0;
109 stat->avg = 0;
110 stat->max = 0;
111 stat->min = ~0;
112
113 lck_spin_unlock(&stat->lock);
114}
115
116void
117erase_all_test_mtx_stats(void)
118{
119 int i;
120 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
121 erase_test_mtx_stats(i);
122 }
123}
124
125static void
126disable_all_test_mtx_stats(void)
127{
128 atomic_store(&enabled, FALSE);
129}
130
131static void
132enable_all_test_mtx_stats(void)
133{
134 atomic_store(&enabled, TRUE);
135}
136
137static int
138print_test_mtx_stats_string_name(
139 int type_num,
140 char* buffer,
141 int size)
142{
143 char* type = "";
144 switch (type_num) {
145 case TEST_MTX_LOCK_STATS:
146 type = "TEST_MTX_LOCK_STATS";
147 break;
148 case TEST_MTX_TRY_LOCK_STATS:
149 type = "TEST_MTX_TRY_LOCK_STATS";
150 break;
151 case TEST_MTX_LOCK_SPIN_STATS:
152 type = "TEST_MTX_LOCK_SPIN_STATS";
153 break;
154 case TEST_MTX_LOCK_SPIN_ALWAYS_STATS:
155 type = "TEST_MTX_LOCK_SPIN_ALWAYS_STATS";
156 break;
157 case TEST_MTX_TRY_LOCK_SPIN_STATS:
158 type = "TEST_MTX_TRY_LOCK_SPIN_STATS";
159 break;
160 case TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS:
161 type = "TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS";
162 break;
163 case TEST_MTX_UNLOCK_MTX_STATS:
164 type = "TEST_MTX_UNLOCK_MTX_STATS";
165 break;
166 case TEST_MTX_UNLOCK_SPIN_STATS:
167 type = "TEST_MTX_UNLOCK_SPIN_STATS";
168 break;
169 default:
170 break;
171 }
172
173 return snprintf(buffer, size, "%s ", type);
174}
175
176int
177get_test_mtx_stats_string(
178 char* buffer,
179 int size)
180{
181 int string_off = 0;
182 int ret = 0;
183
184 ret = snprintf(&buffer[string_off], size, "\n");
185 size -= ret;
186 string_off += ret;
187
188 int i;
189 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
190 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[i];
191
192 ret = snprintf(&buffer[string_off], size, "{ ");
193 size -= ret;
194 string_off += ret;
195
196 lck_spin_lock(&stat->lock);
197 uint64_t time;
198
199 ret = snprintf(&buffer[string_off], size, "samples %llu, ", stat->samples);
200 size -= ret;
201 string_off += ret;
202
203 absolutetime_to_nanoseconds(stat->tot, &time);
204 ret = snprintf(&buffer[string_off], size, "tot %llu ns, ", time);
205 size -= ret;
206 string_off += ret;
207
208 absolutetime_to_nanoseconds(stat->avg, &time);
209 ret = snprintf(&buffer[string_off], size, "avg %llu ns, ", time);
210 size -= ret;
211 string_off += ret;
212
213 absolutetime_to_nanoseconds(stat->max, &time);
214 ret = snprintf(&buffer[string_off], size, "max %llu ns, ", time);
215 size -= ret;
216 string_off += ret;
217
218 absolutetime_to_nanoseconds(stat->min, &time);
219 ret = snprintf(&buffer[string_off], size, "min %llu ns", time);
220 size -= ret;
221 string_off += ret;
222
223 lck_spin_unlock(&stat->lock);
224
225 ret = snprintf(&buffer[string_off], size, " } ");
226 size -= ret;
227 string_off += ret;
228
229 ret = print_test_mtx_stats_string_name(i, &buffer[string_off], size);
230 size -= ret;
231 string_off += ret;
232
233 ret = snprintf(&buffer[string_off], size, "\n");
234 size -= ret;
235 string_off += ret;
236 }
237
238 return string_off;
239}
240
241void
242lck_mtx_test_init(void)
243{
244 static int first = 0;
245
246 /*
247 * This should be substituted with a version
248 * of dispatch_once for kernel (rdar:39537874)
249 */
250 if (os_atomic_load(&first, acquire) >= 2)
251 return;
252
253 if (os_atomic_cmpxchg(&first, 0, 1, relaxed)){
254 lck_grp_attr_setdefault(&test_mtx_grp_attr);
255 lck_grp_init(&test_mtx_grp, "testlck_mtx", &test_mtx_grp_attr);
256 lck_attr_setdefault(&test_mtx_attr);
257 lck_mtx_init(&test_mtx, &test_mtx_grp, &test_mtx_attr);
258
259 init_test_mtx_stats();
260
261 os_atomic_inc(&first, release);
262 }
263
264 while(os_atomic_load(&first, acquire) < 2);
265}
266
267void
268lck_mtx_test_lock(void)
269{
270 uint64_t start;
271
272 start = mach_absolute_time();
273
274 lck_mtx_lock(&test_mtx);
275
276 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_STATS);
277}
278
279static void
280lck_mtx_test_try_lock(void)
281{
282 uint64_t start;
283
284 start = mach_absolute_time();
285
286 lck_mtx_try_lock(&test_mtx);
287
288 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_STATS);
289}
290
291static void
292lck_mtx_test_lock_spin(void)
293{
294 uint64_t start;
295
296 start = mach_absolute_time();
297
298 lck_mtx_lock_spin(&test_mtx);
299
300 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_SPIN_STATS);
301}
302
303static void
304lck_mtx_test_lock_spin_always(void)
305{
306 uint64_t start;
307
308 start = mach_absolute_time();
309
310 lck_mtx_lock_spin_always(&test_mtx);
311
312 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_SPIN_ALWAYS_STATS);
313}
314
315static void
316lck_mtx_test_try_lock_spin(void)
317{
318 uint64_t start;
319
320 start = mach_absolute_time();
321
322 lck_mtx_try_lock_spin(&test_mtx);
323
324 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_STATS);
325}
326
327static void
328lck_mtx_test_try_lock_spin_always(void)
329{
330 uint64_t start;
331
332 start = mach_absolute_time();
333
334 lck_mtx_try_lock_spin_always(&test_mtx);
335
336 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS);
337}
338
339void
340lck_mtx_test_unlock(void)
341{
342 uint64_t start;
343
344 start = mach_absolute_time();
345
346 lck_mtx_unlock(&test_mtx);
347
348 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS);
349}
350
351static void
352lck_mtx_test_unlock_mtx(void)
353{
354 uint64_t start;
355
356 start = mach_absolute_time();
357
358 lck_mtx_unlock(&test_mtx);
359
360 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS);
361}
362
363static void
364lck_mtx_test_unlock_spin(void)
365{
366 uint64_t start;
367
368 start = mach_absolute_time();
369
370 lck_mtx_unlock(&test_mtx);
371
372 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS);
373}
374
375#define WARMUP_ITER 1000
376
377int
378lck_mtx_test_mtx_uncontended_loop_time(
379 int iter, char *buffer, int size)
380{
381 int i;
382 uint64_t tot_time[TEST_MTX_MAX_STATS];
383 uint64_t run_time[TEST_MTX_MAX_STATS];
384 uint64_t start;
385 uint64_t start_run;
386
387 //warming up the test
388 for (i = 0; i < WARMUP_ITER; i++) {
389 lck_mtx_lock(&test_mtx);
390 lck_mtx_unlock(&test_mtx);
391 }
392
393 start_run = thread_get_runtime_self();
394 start = mach_absolute_time();
395
396 for (i = 0; i < iter; i++) {
397 lck_mtx_lock(&test_mtx);
398 lck_mtx_unlock(&test_mtx);
399 }
400
401 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_STATS]);
402 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_STATS]);
403
404 //warming up the test
405 for (i = 0; i < WARMUP_ITER; i++) {
406 lck_mtx_try_lock(&test_mtx);
407 lck_mtx_unlock(&test_mtx);
408 }
409
410 start_run = thread_get_runtime_self();
411 start = mach_absolute_time();
412
413 for (i = 0; i < iter; i++) {
414 lck_mtx_try_lock(&test_mtx);
415 lck_mtx_unlock(&test_mtx);
416 }
417
418 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_STATS]);
419 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_STATS]);
420
421 //warming up the test
422 for (i = 0; i < WARMUP_ITER; i++) {
423 lck_mtx_lock_spin(&test_mtx);
424 lck_mtx_unlock(&test_mtx);
425 }
426
427 start_run = thread_get_runtime_self();
428 start = mach_absolute_time();
429
430 for (i = 0; i < iter; i++) {
431 lck_mtx_lock_spin(&test_mtx);
432 lck_mtx_unlock(&test_mtx);
433 }
434
435 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_SPIN_STATS]);
436 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_SPIN_STATS]);
437
438 //warming up the test
439 for (i = 0; i < WARMUP_ITER; i++) {
440 lck_mtx_lock_spin_always(&test_mtx);
441 lck_mtx_unlock(&test_mtx);
442 }
443
444 start_run = thread_get_runtime_self();
445 start = mach_absolute_time();
446
447 for (i = 0; i < iter; i++) {
448 lck_mtx_lock_spin_always(&test_mtx);
449 lck_mtx_unlock(&test_mtx);
450 }
451
452 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_SPIN_ALWAYS_STATS]);
453 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_SPIN_ALWAYS_STATS]);
454
455 //warming up the test
456 for (i = 0; i < WARMUP_ITER; i++) {
457 lck_mtx_try_lock_spin(&test_mtx);
458 lck_mtx_unlock(&test_mtx);
459 }
460
461 start_run = thread_get_runtime_self();
462 start = mach_absolute_time();
463
464 for (i = 0; i < iter; i++) {
465 lck_mtx_try_lock_spin(&test_mtx);
466 lck_mtx_unlock(&test_mtx);
467 }
468
469 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_SPIN_STATS]);
470 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_SPIN_STATS]);
471
472 //warming up the test
473 for (i = 0; i < WARMUP_ITER; i++) {
474 lck_mtx_try_lock_spin_always(&test_mtx);
475 lck_mtx_unlock(&test_mtx);
476 }
477
478 start_run = thread_get_runtime_self();
479 start = mach_absolute_time();
480
481 for (i = 0; i < iter; i++) {
482 lck_mtx_try_lock_spin_always(&test_mtx);
483 lck_mtx_unlock(&test_mtx);
484 }
485
486 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS]);
487 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS]);
488
489 int string_off = 0;
490 int ret = 0;
491
492 ret = snprintf(&buffer[string_off], size, "\n");
493 size -= ret;
494 string_off += ret;
495
496 for (i = 0; i < TEST_MTX_MAX_STATS - 2; i++) {
497
498 ret = snprintf(&buffer[string_off], size, "total time %llu ns total run time %llu ns ", tot_time[i], run_time[i]);
499 size -= ret;
500 string_off += ret;
501
502 ret = print_test_mtx_stats_string_name(i, &buffer[string_off], size);
503 size -= ret;
504 string_off += ret;
505
506 ret = snprintf(&buffer[string_off], size, "\n");
507 size -= ret;
508 string_off += ret;
509 }
510
511 return string_off;
512}
513
514static kern_return_t
515lck_mtx_test_mtx_lock_uncontended(
516 int iter)
517{
518 int i;
519
520 disable_all_test_mtx_stats();
521
522 //warming up the test for lock
523 for (i = 0; i < WARMUP_ITER; i++) {
524 lck_mtx_test_lock();
525 lck_mtx_test_unlock_mtx();
526 }
527
528 enable_all_test_mtx_stats();
529
530 for (i = 0; i < iter; i++) {
531 lck_mtx_test_lock();
532 lck_mtx_test_unlock_mtx();
533 }
534
535 disable_all_test_mtx_stats();
536
537 //warming up the test for try_lock
538 for (i = 0; i < WARMUP_ITER; i++) {
539 lck_mtx_test_try_lock();
540 lck_mtx_test_unlock_mtx();
541 }
542
543 enable_all_test_mtx_stats();
544
545 for (i = 0; i < iter; i++) {
546 lck_mtx_test_try_lock();
547 lck_mtx_test_unlock_mtx();
548 }
549
550 return KERN_SUCCESS;
551}
552
553static kern_return_t
554lck_mtx_test_mtx_spin_uncontended(
555 int iter)
556{
557 int i;
558
559 disable_all_test_mtx_stats();
560
561 //warming up the test for lock_spin
562 for (i = 0; i < WARMUP_ITER; i++) {
563 lck_mtx_test_lock_spin();
564 lck_mtx_test_unlock_spin();
565 }
566
567 enable_all_test_mtx_stats();
568
569 for (i = 0; i < iter; i++) {
570 lck_mtx_test_lock_spin();
571 lck_mtx_test_unlock_spin();
572 }
573
574 disable_all_test_mtx_stats();
575
576 //warming up the test for try_lock_spin
577 for (i = 0; i < WARMUP_ITER; i++) {
578 lck_mtx_test_try_lock_spin();
579 lck_mtx_test_unlock_spin();
580 }
581
582 enable_all_test_mtx_stats();
583
584 for (i = 0; i < iter; i++) {
585 lck_mtx_test_try_lock_spin();
586 lck_mtx_test_unlock_spin();
587 }
588
589 disable_all_test_mtx_stats();
590
591 //warming up the test for lock_spin_always
592 for (i = 0; i < WARMUP_ITER; i++) {
593 lck_mtx_test_lock_spin_always();
594 lck_mtx_test_unlock_spin();
595 }
596
597 enable_all_test_mtx_stats();
598
599 for (i = 0; i < iter; i++) {
600 lck_mtx_test_lock_spin_always();
601 lck_mtx_test_unlock_spin();
602 }
603
604 disable_all_test_mtx_stats();
605
606 //warming up the test for try_lock_spin_always
607 for (i = 0; i < WARMUP_ITER; i++) {
608 lck_mtx_test_try_lock_spin_always();
609 lck_mtx_test_unlock_spin();
610 }
611
612 enable_all_test_mtx_stats();
613
614 for (i = 0; i < iter; i++) {
615 lck_mtx_test_try_lock_spin_always();
616 lck_mtx_test_unlock_spin();
617 }
618
619 return KERN_SUCCESS;
620}
621
622int
623lck_mtx_test_mtx_uncontended(
624 int iter,
625 char *buffer,
626 int size)
627{
628 erase_all_test_mtx_stats();
629 lck_mtx_test_mtx_lock_uncontended(iter);
630 lck_mtx_test_mtx_spin_uncontended(iter);
631
632 return get_test_mtx_stats_string(buffer,size);
633}
634
635static int synch;
636static int wait_barrier;
637static int iterations;
638static uint64_t start_loop_time;
639static uint64_t start_loop_time_run;
640static uint64_t end_loop_time;
641static uint64_t end_loop_time_run;
642
643struct lck_mtx_thread_arg {
644 int my_locked;
645 int* other_locked;
646 thread_t other_thread;
647};
648
649static void
650test_mtx_lock_unlock_contended_thread(
651 void *arg,
652 __unused wait_result_t wr)
653{
654 int i, val;
655 struct lck_mtx_thread_arg *info = (struct lck_mtx_thread_arg *) arg;
656 thread_t other_thread;
657 int* my_locked;
658 int* other_locked;
659
660 printf("Starting thread %p\n", current_thread());
661
662 while(os_atomic_load(&info->other_thread, acquire) == NULL);
663 other_thread = info->other_thread;
664
665 printf("Other thread %p\n", other_thread);
666
667 my_locked = &info->my_locked;
668 other_locked = info->other_locked;
669
670 *my_locked = 0;
671 val = os_atomic_inc(&synch, relaxed);
672 while(os_atomic_load(&synch, relaxed) < 2);
673
674 //warming up the test
675 for (i = 0; i < WARMUP_ITER; i++) {
676 lck_mtx_test_lock();
677
678 os_atomic_xchg(my_locked, 1 , relaxed);
679 if (i != WARMUP_ITER - 1) {
680 while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN);
681 os_atomic_xchg(my_locked, 0 , relaxed);
682 }
683
684 lck_mtx_test_unlock();
685
686 if (i != WARMUP_ITER - 1)
687 while(os_atomic_load(other_locked, relaxed) == 0);
688 }
689
690 printf("warmup done %p\n", current_thread());
691 os_atomic_inc(&synch, relaxed);
692 while(os_atomic_load(&synch, relaxed) < 4);
693
694 //erase statistics
695 if (val == 1)
696 erase_all_test_mtx_stats();
697
698 *my_locked = 0;
699 /*
700 * synch the threads so they start
701 * concurrently.
702 */
703 os_atomic_inc(&synch, relaxed);
704 while(os_atomic_load(&synch, relaxed) < 6);
705
706 for (i = 0; i < iterations; i++) {
707 lck_mtx_test_lock();
708
709 os_atomic_xchg(my_locked, 1 , relaxed);
710 if (i != iterations - 1) {
711 while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN);
712 os_atomic_xchg(my_locked, 0 , relaxed);
713 }
714 lck_mtx_test_unlock_mtx();
715
716 if (i != iterations - 1)
717 while(os_atomic_load(other_locked, relaxed) == 0);
718
719 }
720
721 os_atomic_inc(&wait_barrier, relaxed);
722 thread_wakeup((event_t) &wait_barrier);
723 thread_terminate_self();
724}
725
726
727kern_return_t
728lck_mtx_test_mtx_contended(
729 int iter,
730 char* buffer,
731 int buffer_size)
732{
733 thread_t thread1, thread2;
734 kern_return_t result;
735 struct lck_mtx_thread_arg targs[2] = {};
736 synch = 0;
737 wait_barrier = 0;
738 iterations = iter;
739
740 erase_all_test_mtx_stats();
741
742 targs[0].other_thread = NULL;
743 targs[1].other_thread = NULL;
744
745 result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[0], &thread1);
746 if (result != KERN_SUCCESS) {
747 return 0;
748 }
749
750 result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[1], &thread2);
751 if (result != KERN_SUCCESS) {
752 thread_deallocate(thread1);
753 return 0;
754 }
755
756 /* this are t1 args */
757 targs[0].my_locked = 0;
758 targs[0].other_locked = &targs[1].my_locked;
759
760 os_atomic_xchg(&targs[0].other_thread, thread2, release);
761
762 /* this are t2 args */
763 targs[1].my_locked = 0;
764 targs[1].other_locked = &targs[0].my_locked;
765
766 os_atomic_xchg(&targs[1].other_thread, thread1, release);
767
768 while (os_atomic_load(&wait_barrier, relaxed) != 2) {
769 assert_wait((event_t) &wait_barrier, THREAD_UNINT);
770 if (os_atomic_load(&wait_barrier, relaxed) != 2) {
771 (void) thread_block(THREAD_CONTINUE_NULL);
772 } else {
773 clear_wait(current_thread(), THREAD_AWAKENED);
774 }
775 }
776
777 thread_deallocate(thread1);
778 thread_deallocate(thread2);
779
780 return get_test_mtx_stats_string(buffer, buffer_size);
781}
782
783static void
784test_mtx_lck_unlock_contended_loop_time_thread(
785 __unused void *arg,
786 __unused wait_result_t wr)
787{
788 int i, val;
789 struct lck_mtx_thread_arg *info = (struct lck_mtx_thread_arg *) arg;
790 thread_t other_thread;
791 int* my_locked;
792 int* other_locked;
793
794 printf("Starting thread %p\n", current_thread());
795
796 while(os_atomic_load(&info->other_thread, acquire) == NULL);
797 other_thread = info->other_thread;
798
799 printf("Other thread %p\n", other_thread);
800
801 my_locked = &info->my_locked;
802 other_locked = info->other_locked;
803
804 *my_locked = 0;
805 val = os_atomic_inc(&synch, relaxed);
806 while(os_atomic_load(&synch, relaxed) < 2);
807
808 //warming up the test
809 for (i = 0; i < WARMUP_ITER; i++) {
810 lck_mtx_lock(&test_mtx);
811
812 os_atomic_xchg(my_locked, 1 , relaxed);
813 if (i != WARMUP_ITER - 1) {
814 while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN);
815 os_atomic_xchg(my_locked, 0 , relaxed);
816 }
817
818 lck_mtx_unlock(&test_mtx);
819
820 if (i != WARMUP_ITER - 1)
821 while(os_atomic_load(other_locked, relaxed) == 0);
822 }
823
824 printf("warmup done %p\n", current_thread());
825
826 os_atomic_inc(&synch, relaxed);
827 while(os_atomic_load(&synch, relaxed) < 4);
828
829 *my_locked = 0;
830
831 /*
832 * synch the threads so they start
833 * concurrently.
834 */
835 os_atomic_inc(&synch, relaxed);
836 while(os_atomic_load(&synch, relaxed) < 6);
837
838 if (val == 1) {
839 start_loop_time_run = thread_get_runtime_self();
840 start_loop_time = mach_absolute_time();
841 }
842
843 for (i = 0; i < iterations; i++) {
844 lck_mtx_lock(&test_mtx);
845
846 os_atomic_xchg(my_locked, 1 , relaxed);
847 if (i != iterations - 1) {
848 while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN);
849 os_atomic_xchg(my_locked, 0 , relaxed);
850 }
851
852 lck_mtx_unlock(&test_mtx);
853
854 if (i != iterations - 1)
855 while(os_atomic_load(other_locked, relaxed) == 0);
856 }
857
858 if (val == 1) {
859 end_loop_time = mach_absolute_time();
860 end_loop_time_run = thread_get_runtime_self();
861 }
862
863 os_atomic_inc(&wait_barrier, relaxed);
864 thread_wakeup((event_t) &wait_barrier);
865 thread_terminate_self();
866}
867
868
869int
870lck_mtx_test_mtx_contended_loop_time(
871 int iter,
872 char *buffer,
873 int buffer_size)
874{
875 thread_t thread1, thread2;
876 kern_return_t result;
877 int ret;
878 struct lck_mtx_thread_arg targs[2] = {};
879 synch = 0;
880 wait_barrier = 0;
881 iterations = iter;
882 uint64_t time, time_run;
883
884 targs[0].other_thread = NULL;
885 targs[1].other_thread = NULL;
886
887 result = kernel_thread_start((thread_continue_t)test_mtx_lck_unlock_contended_loop_time_thread, &targs[0], &thread1);
888 if (result != KERN_SUCCESS) {
889 return 0;
890 }
891
892 result = kernel_thread_start((thread_continue_t)test_mtx_lck_unlock_contended_loop_time_thread, &targs[1], &thread2);
893 if (result != KERN_SUCCESS) {
894 thread_deallocate(thread1);
895 return 0;
896 }
897
898 /* this are t1 args */
899 targs[0].my_locked = 0;
900 targs[0].other_locked = &targs[1].my_locked;
901
902 os_atomic_xchg(&targs[0].other_thread, thread2, release);
903
904 /* this are t2 args */
905 targs[1].my_locked = 0;
906 targs[1].other_locked = &targs[0].my_locked;
907
908 os_atomic_xchg(&targs[1].other_thread, thread1, release);
909
910 while (os_atomic_load(&wait_barrier, acquire) != 2) {
911 assert_wait((event_t) &wait_barrier, THREAD_UNINT);
912 if (os_atomic_load(&wait_barrier, acquire) != 2) {
913 (void) thread_block(THREAD_CONTINUE_NULL);
914 } else {
915 clear_wait(current_thread(), THREAD_AWAKENED);
916 }
917 }
918
919 thread_deallocate(thread1);
920 thread_deallocate(thread2);
921
922 absolutetime_to_nanoseconds(end_loop_time - start_loop_time, &time);
923 absolutetime_to_nanoseconds(end_loop_time_run - start_loop_time_run, &time_run);
924
925 ret = snprintf(buffer, buffer_size, "\n");
926 ret += snprintf(&buffer[ret], buffer_size - ret, "total time %llu ns total run time %llu ns ", time, time_run);
927 ret += print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS, &buffer[ret], buffer_size - ret);
928 ret += snprintf(&buffer[ret], buffer_size - ret, "\n");
929
930 return ret;
931}
932