]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/test_lock.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / kern / test_lock.c
1 #include <mach_ldebug.h>
2 #include <debug.h>
3
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
7
8 #include <kern/locks.h>
9 #include <kern/misc_protos.h>
10 #include <kern/kalloc.h>
11 #include <kern/thread.h>
12 #include <kern/processor.h>
13 #include <kern/sched_prim.h>
14 #include <kern/debug.h>
15 #include <libkern/section_keywords.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_cpu.h>
18 #include <machine/atomic.h>
19 #include <string.h>
20 #include <kern/kalloc.h>
21
22 #include <sys/kdebug.h>
23
24 static lck_mtx_t test_mtx;
25 static lck_grp_t test_mtx_grp;
26 static lck_grp_attr_t test_mtx_grp_attr;
27 static lck_attr_t test_mtx_attr;
28
29 static lck_grp_t test_mtx_stats_grp;
30 static lck_grp_attr_t test_mtx_stats_grp_attr;
31 static lck_attr_t test_mtx_stats_attr;
32
33 struct lck_mtx_test_stats_elem {
34 lck_spin_t lock;
35 uint64_t samples;
36 uint64_t avg;
37 uint64_t max;
38 uint64_t min;
39 uint64_t tot;
40 };
41
42 #define TEST_MTX_LOCK_STATS 0
43 #define TEST_MTX_TRY_LOCK_STATS 1
44 #define TEST_MTX_LOCK_SPIN_STATS 2
45 #define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3
46 #define TEST_MTX_TRY_LOCK_SPIN_STATS 4
47 #define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5
48 #define TEST_MTX_UNLOCK_MTX_STATS 6
49 #define TEST_MTX_UNLOCK_SPIN_STATS 7
50 #define TEST_MTX_MAX_STATS 8
51
52 struct lck_mtx_test_stats_elem lck_mtx_test_stats[TEST_MTX_MAX_STATS];
53 atomic_bool enabled = TRUE;
54
55 static void
56 init_test_mtx_stats(void)
57 {
58 int i;
59
60 lck_grp_attr_setdefault(&test_mtx_stats_grp_attr);
61 lck_grp_init(&test_mtx_stats_grp, "testlck_stats_mtx", &test_mtx_stats_grp_attr);
62 lck_attr_setdefault(&test_mtx_stats_attr);
63
64 atomic_store(&enabled, TRUE);
65 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
66 memset(&lck_mtx_test_stats[i], 0, sizeof(struct lck_mtx_test_stats_elem));
67 lck_mtx_test_stats[i].min = ~0;
68 lck_spin_init(&lck_mtx_test_stats[i].lock, &test_mtx_stats_grp, &test_mtx_stats_attr);
69 }
70 }
71
72 static void
73 update_test_mtx_stats(
74 uint64_t start,
75 uint64_t end,
76 uint type)
77 {
78 if (atomic_load(&enabled) == TRUE) {
79 assert(type < TEST_MTX_MAX_STATS);
80 assert(start <= end);
81
82 uint64_t elapsed = end - start;
83 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[type];
84
85 lck_spin_lock(&stat->lock);
86
87 stat->samples++;
88 stat->tot += elapsed;
89 stat->avg = stat->tot / stat->samples;
90 if (stat->max < elapsed) {
91 stat->max = elapsed;
92 }
93 if (stat->min > elapsed) {
94 stat->min = elapsed;
95 }
96 lck_spin_unlock(&stat->lock);
97 }
98 }
99
100 static void
101 erase_test_mtx_stats(
102 uint type)
103 {
104 assert(type < TEST_MTX_MAX_STATS);
105 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[type];
106
107 lck_spin_lock(&stat->lock);
108
109 stat->samples = 0;
110 stat->tot = 0;
111 stat->avg = 0;
112 stat->max = 0;
113 stat->min = ~0;
114
115 lck_spin_unlock(&stat->lock);
116 }
117
118 void
119 erase_all_test_mtx_stats(void)
120 {
121 int i;
122 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
123 erase_test_mtx_stats(i);
124 }
125 }
126
127 static void
128 disable_all_test_mtx_stats(void)
129 {
130 atomic_store(&enabled, FALSE);
131 }
132
133 static void
134 enable_all_test_mtx_stats(void)
135 {
136 atomic_store(&enabled, TRUE);
137 }
138
139 static int
140 print_test_mtx_stats_string_name(
141 int type_num,
142 char* buffer,
143 int size)
144 {
145 char* type = "";
146 switch (type_num) {
147 case TEST_MTX_LOCK_STATS:
148 type = "TEST_MTX_LOCK_STATS";
149 break;
150 case TEST_MTX_TRY_LOCK_STATS:
151 type = "TEST_MTX_TRY_LOCK_STATS";
152 break;
153 case TEST_MTX_LOCK_SPIN_STATS:
154 type = "TEST_MTX_LOCK_SPIN_STATS";
155 break;
156 case TEST_MTX_LOCK_SPIN_ALWAYS_STATS:
157 type = "TEST_MTX_LOCK_SPIN_ALWAYS_STATS";
158 break;
159 case TEST_MTX_TRY_LOCK_SPIN_STATS:
160 type = "TEST_MTX_TRY_LOCK_SPIN_STATS";
161 break;
162 case TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS:
163 type = "TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS";
164 break;
165 case TEST_MTX_UNLOCK_MTX_STATS:
166 type = "TEST_MTX_UNLOCK_MTX_STATS";
167 break;
168 case TEST_MTX_UNLOCK_SPIN_STATS:
169 type = "TEST_MTX_UNLOCK_SPIN_STATS";
170 break;
171 default:
172 break;
173 }
174
175 return snprintf(buffer, size, "%s ", type);
176 }
177
178 int
179 get_test_mtx_stats_string(
180 char* buffer,
181 int size)
182 {
183 int string_off = 0;
184 int ret = 0;
185
186 ret = snprintf(&buffer[string_off], size, "\n");
187 size -= ret;
188 string_off += ret;
189
190 int i;
191 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
192 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[i];
193
194 ret = snprintf(&buffer[string_off], size, "{ ");
195 size -= ret;
196 string_off += ret;
197
198 lck_spin_lock(&stat->lock);
199 uint64_t time;
200
201 ret = snprintf(&buffer[string_off], size, "samples %llu, ", stat->samples);
202 size -= ret;
203 string_off += ret;
204
205 absolutetime_to_nanoseconds(stat->tot, &time);
206 ret = snprintf(&buffer[string_off], size, "tot %llu ns, ", time);
207 size -= ret;
208 string_off += ret;
209
210 absolutetime_to_nanoseconds(stat->avg, &time);
211 ret = snprintf(&buffer[string_off], size, "avg %llu ns, ", time);
212 size -= ret;
213 string_off += ret;
214
215 absolutetime_to_nanoseconds(stat->max, &time);
216 ret = snprintf(&buffer[string_off], size, "max %llu ns, ", time);
217 size -= ret;
218 string_off += ret;
219
220 absolutetime_to_nanoseconds(stat->min, &time);
221 ret = snprintf(&buffer[string_off], size, "min %llu ns", time);
222 size -= ret;
223 string_off += ret;
224
225 lck_spin_unlock(&stat->lock);
226
227 ret = snprintf(&buffer[string_off], size, " } ");
228 size -= ret;
229 string_off += ret;
230
231 ret = print_test_mtx_stats_string_name(i, &buffer[string_off], size);
232 size -= ret;
233 string_off += ret;
234
235 ret = snprintf(&buffer[string_off], size, "\n");
236 size -= ret;
237 string_off += ret;
238 }
239
240 return string_off;
241 }
242
243 void
244 lck_mtx_test_init(void)
245 {
246 static int first = 0;
247
248 /*
249 * This should be substituted with a version
250 * of dispatch_once for kernel (rdar:39537874)
251 */
252 if (os_atomic_load(&first, acquire) >= 2) {
253 return;
254 }
255
256 if (os_atomic_cmpxchg(&first, 0, 1, relaxed)) {
257 lck_grp_attr_setdefault(&test_mtx_grp_attr);
258 lck_grp_init(&test_mtx_grp, "testlck_mtx", &test_mtx_grp_attr);
259 lck_attr_setdefault(&test_mtx_attr);
260 lck_mtx_init(&test_mtx, &test_mtx_grp, &test_mtx_attr);
261
262 init_test_mtx_stats();
263
264 os_atomic_inc(&first, release);
265 }
266
267 while (os_atomic_load(&first, acquire) < 2) {
268 ;
269 }
270 }
271
272 void
273 lck_mtx_test_lock(void)
274 {
275 uint64_t start;
276
277 start = mach_absolute_time();
278
279 lck_mtx_lock(&test_mtx);
280
281 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_STATS);
282 }
283
284 static void
285 lck_mtx_test_try_lock(void)
286 {
287 uint64_t start;
288
289 start = mach_absolute_time();
290
291 lck_mtx_try_lock(&test_mtx);
292
293 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_STATS);
294 }
295
296 static void
297 lck_mtx_test_lock_spin(void)
298 {
299 uint64_t start;
300
301 start = mach_absolute_time();
302
303 lck_mtx_lock_spin(&test_mtx);
304
305 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_SPIN_STATS);
306 }
307
308 static void
309 lck_mtx_test_lock_spin_always(void)
310 {
311 uint64_t start;
312
313 start = mach_absolute_time();
314
315 lck_mtx_lock_spin_always(&test_mtx);
316
317 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_SPIN_ALWAYS_STATS);
318 }
319
320 static void
321 lck_mtx_test_try_lock_spin(void)
322 {
323 uint64_t start;
324
325 start = mach_absolute_time();
326
327 lck_mtx_try_lock_spin(&test_mtx);
328
329 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_STATS);
330 }
331
332 static void
333 lck_mtx_test_try_lock_spin_always(void)
334 {
335 uint64_t start;
336
337 start = mach_absolute_time();
338
339 lck_mtx_try_lock_spin_always(&test_mtx);
340
341 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS);
342 }
343
344 void
345 lck_mtx_test_unlock(void)
346 {
347 uint64_t start;
348
349 start = mach_absolute_time();
350
351 lck_mtx_unlock(&test_mtx);
352
353 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS);
354 }
355
356 static void
357 lck_mtx_test_unlock_mtx(void)
358 {
359 uint64_t start;
360
361 start = mach_absolute_time();
362
363 lck_mtx_unlock(&test_mtx);
364
365 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS);
366 }
367
368 static void
369 lck_mtx_test_unlock_spin(void)
370 {
371 uint64_t start;
372
373 start = mach_absolute_time();
374
375 lck_mtx_unlock(&test_mtx);
376
377 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS);
378 }
379
380 #define WARMUP_ITER 1000
381
382 int
383 lck_mtx_test_mtx_uncontended_loop_time(
384 int iter, char *buffer, int size)
385 {
386 int i;
387 uint64_t tot_time[TEST_MTX_MAX_STATS];
388 uint64_t run_time[TEST_MTX_MAX_STATS];
389 uint64_t start;
390 uint64_t start_run;
391
392 //warming up the test
393 for (i = 0; i < WARMUP_ITER; i++) {
394 lck_mtx_lock(&test_mtx);
395 lck_mtx_unlock(&test_mtx);
396 }
397
398 start_run = thread_get_runtime_self();
399 start = mach_absolute_time();
400
401 for (i = 0; i < iter; i++) {
402 lck_mtx_lock(&test_mtx);
403 lck_mtx_unlock(&test_mtx);
404 }
405
406 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_STATS]);
407 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_STATS]);
408
409 //warming up the test
410 for (i = 0; i < WARMUP_ITER; i++) {
411 lck_mtx_try_lock(&test_mtx);
412 lck_mtx_unlock(&test_mtx);
413 }
414
415 start_run = thread_get_runtime_self();
416 start = mach_absolute_time();
417
418 for (i = 0; i < iter; i++) {
419 lck_mtx_try_lock(&test_mtx);
420 lck_mtx_unlock(&test_mtx);
421 }
422
423 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_STATS]);
424 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_STATS]);
425
426 //warming up the test
427 for (i = 0; i < WARMUP_ITER; i++) {
428 lck_mtx_lock_spin(&test_mtx);
429 lck_mtx_unlock(&test_mtx);
430 }
431
432 start_run = thread_get_runtime_self();
433 start = mach_absolute_time();
434
435 for (i = 0; i < iter; i++) {
436 lck_mtx_lock_spin(&test_mtx);
437 lck_mtx_unlock(&test_mtx);
438 }
439
440 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_SPIN_STATS]);
441 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_SPIN_STATS]);
442
443 //warming up the test
444 for (i = 0; i < WARMUP_ITER; i++) {
445 lck_mtx_lock_spin_always(&test_mtx);
446 lck_mtx_unlock(&test_mtx);
447 }
448
449 start_run = thread_get_runtime_self();
450 start = mach_absolute_time();
451
452 for (i = 0; i < iter; i++) {
453 lck_mtx_lock_spin_always(&test_mtx);
454 lck_mtx_unlock(&test_mtx);
455 }
456
457 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_SPIN_ALWAYS_STATS]);
458 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_SPIN_ALWAYS_STATS]);
459
460 //warming up the test
461 for (i = 0; i < WARMUP_ITER; i++) {
462 lck_mtx_try_lock_spin(&test_mtx);
463 lck_mtx_unlock(&test_mtx);
464 }
465
466 start_run = thread_get_runtime_self();
467 start = mach_absolute_time();
468
469 for (i = 0; i < iter; i++) {
470 lck_mtx_try_lock_spin(&test_mtx);
471 lck_mtx_unlock(&test_mtx);
472 }
473
474 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_SPIN_STATS]);
475 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_SPIN_STATS]);
476
477 //warming up the test
478 for (i = 0; i < WARMUP_ITER; i++) {
479 lck_mtx_try_lock_spin_always(&test_mtx);
480 lck_mtx_unlock(&test_mtx);
481 }
482
483 start_run = thread_get_runtime_self();
484 start = mach_absolute_time();
485
486 for (i = 0; i < iter; i++) {
487 lck_mtx_try_lock_spin_always(&test_mtx);
488 lck_mtx_unlock(&test_mtx);
489 }
490
491 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS]);
492 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS]);
493
494 int string_off = 0;
495 int ret = 0;
496
497 ret = snprintf(&buffer[string_off], size, "\n");
498 size -= ret;
499 string_off += ret;
500
501 for (i = 0; i < TEST_MTX_MAX_STATS - 2; i++) {
502 ret = snprintf(&buffer[string_off], size, "total time %llu ns total run time %llu ns ", tot_time[i], run_time[i]);
503 size -= ret;
504 string_off += ret;
505
506 ret = print_test_mtx_stats_string_name(i, &buffer[string_off], size);
507 size -= ret;
508 string_off += ret;
509
510 ret = snprintf(&buffer[string_off], size, "\n");
511 size -= ret;
512 string_off += ret;
513 }
514
515 return string_off;
516 }
517
518 static kern_return_t
519 lck_mtx_test_mtx_lock_uncontended(
520 int iter)
521 {
522 int i;
523
524 disable_all_test_mtx_stats();
525
526 //warming up the test for lock
527 for (i = 0; i < WARMUP_ITER; i++) {
528 lck_mtx_test_lock();
529 lck_mtx_test_unlock_mtx();
530 }
531
532 enable_all_test_mtx_stats();
533
534 for (i = 0; i < iter; i++) {
535 lck_mtx_test_lock();
536 lck_mtx_test_unlock_mtx();
537 }
538
539 disable_all_test_mtx_stats();
540
541 //warming up the test for try_lock
542 for (i = 0; i < WARMUP_ITER; i++) {
543 lck_mtx_test_try_lock();
544 lck_mtx_test_unlock_mtx();
545 }
546
547 enable_all_test_mtx_stats();
548
549 for (i = 0; i < iter; i++) {
550 lck_mtx_test_try_lock();
551 lck_mtx_test_unlock_mtx();
552 }
553
554 return KERN_SUCCESS;
555 }
556
557 static kern_return_t
558 lck_mtx_test_mtx_spin_uncontended(
559 int iter)
560 {
561 int i;
562
563 disable_all_test_mtx_stats();
564
565 //warming up the test for lock_spin
566 for (i = 0; i < WARMUP_ITER; i++) {
567 lck_mtx_test_lock_spin();
568 lck_mtx_test_unlock_spin();
569 }
570
571 enable_all_test_mtx_stats();
572
573 for (i = 0; i < iter; i++) {
574 lck_mtx_test_lock_spin();
575 lck_mtx_test_unlock_spin();
576 }
577
578 disable_all_test_mtx_stats();
579
580 //warming up the test for try_lock_spin
581 for (i = 0; i < WARMUP_ITER; i++) {
582 lck_mtx_test_try_lock_spin();
583 lck_mtx_test_unlock_spin();
584 }
585
586 enable_all_test_mtx_stats();
587
588 for (i = 0; i < iter; i++) {
589 lck_mtx_test_try_lock_spin();
590 lck_mtx_test_unlock_spin();
591 }
592
593 disable_all_test_mtx_stats();
594
595 //warming up the test for lock_spin_always
596 for (i = 0; i < WARMUP_ITER; i++) {
597 lck_mtx_test_lock_spin_always();
598 lck_mtx_test_unlock_spin();
599 }
600
601 enable_all_test_mtx_stats();
602
603 for (i = 0; i < iter; i++) {
604 lck_mtx_test_lock_spin_always();
605 lck_mtx_test_unlock_spin();
606 }
607
608 disable_all_test_mtx_stats();
609
610 //warming up the test for try_lock_spin_always
611 for (i = 0; i < WARMUP_ITER; i++) {
612 lck_mtx_test_try_lock_spin_always();
613 lck_mtx_test_unlock_spin();
614 }
615
616 enable_all_test_mtx_stats();
617
618 for (i = 0; i < iter; i++) {
619 lck_mtx_test_try_lock_spin_always();
620 lck_mtx_test_unlock_spin();
621 }
622
623 return KERN_SUCCESS;
624 }
625
626 int
627 lck_mtx_test_mtx_uncontended(
628 int iter,
629 char *buffer,
630 int size)
631 {
632 erase_all_test_mtx_stats();
633 lck_mtx_test_mtx_lock_uncontended(iter);
634 lck_mtx_test_mtx_spin_uncontended(iter);
635
636 return get_test_mtx_stats_string(buffer, size);
637 }
638
639 static int synch;
640 static int wait_barrier;
641 static int iterations;
642 static uint64_t start_loop_time;
643 static uint64_t start_loop_time_run;
644 static uint64_t end_loop_time;
645 static uint64_t end_loop_time_run;
646
647 struct lck_mtx_thread_arg {
648 int my_locked;
649 int* other_locked;
650 thread_t other_thread;
651 };
652
653 static void
654 test_mtx_lock_unlock_contended_thread(
655 void *arg,
656 __unused wait_result_t wr)
657 {
658 int i, val;
659 struct lck_mtx_thread_arg *info = (struct lck_mtx_thread_arg *) arg;
660 thread_t other_thread;
661 int* my_locked;
662 int* other_locked;
663
664 printf("Starting thread %p\n", current_thread());
665
666 while (os_atomic_load(&info->other_thread, acquire) == NULL) {
667 ;
668 }
669 other_thread = info->other_thread;
670
671 printf("Other thread %p\n", other_thread);
672
673 my_locked = &info->my_locked;
674 other_locked = info->other_locked;
675
676 *my_locked = 0;
677 val = os_atomic_inc(&synch, relaxed);
678 while (os_atomic_load(&synch, relaxed) < 2) {
679 ;
680 }
681
682 //warming up the test
683 for (i = 0; i < WARMUP_ITER; i++) {
684 lck_mtx_test_lock();
685
686 os_atomic_xchg(my_locked, 1, relaxed);
687 if (i != WARMUP_ITER - 1) {
688 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
689 ;
690 }
691 os_atomic_xchg(my_locked, 0, relaxed);
692 }
693
694 lck_mtx_test_unlock();
695
696 if (i != WARMUP_ITER - 1) {
697 while (os_atomic_load(other_locked, relaxed) == 0) {
698 ;
699 }
700 }
701 }
702
703 printf("warmup done %p\n", current_thread());
704 os_atomic_inc(&synch, relaxed);
705 while (os_atomic_load(&synch, relaxed) < 4) {
706 ;
707 }
708
709 //erase statistics
710 if (val == 1) {
711 erase_all_test_mtx_stats();
712 }
713
714 *my_locked = 0;
715 /*
716 * synch the threads so they start
717 * concurrently.
718 */
719 os_atomic_inc(&synch, relaxed);
720 while (os_atomic_load(&synch, relaxed) < 6) {
721 ;
722 }
723
724 for (i = 0; i < iterations; i++) {
725 lck_mtx_test_lock();
726
727 os_atomic_xchg(my_locked, 1, relaxed);
728 if (i != iterations - 1) {
729 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
730 ;
731 }
732 os_atomic_xchg(my_locked, 0, relaxed);
733 }
734 lck_mtx_test_unlock_mtx();
735
736 if (i != iterations - 1) {
737 while (os_atomic_load(other_locked, relaxed) == 0) {
738 ;
739 }
740 }
741 }
742
743 os_atomic_inc(&wait_barrier, relaxed);
744 thread_wakeup((event_t) &wait_barrier);
745 thread_terminate_self();
746 }
747
748
749 kern_return_t
750 lck_mtx_test_mtx_contended(
751 int iter,
752 char* buffer,
753 int buffer_size)
754 {
755 thread_t thread1, thread2;
756 kern_return_t result;
757 struct lck_mtx_thread_arg targs[2] = {};
758 synch = 0;
759 wait_barrier = 0;
760 iterations = iter;
761
762 erase_all_test_mtx_stats();
763
764 targs[0].other_thread = NULL;
765 targs[1].other_thread = NULL;
766
767 result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[0], &thread1);
768 if (result != KERN_SUCCESS) {
769 return 0;
770 }
771
772 result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[1], &thread2);
773 if (result != KERN_SUCCESS) {
774 thread_deallocate(thread1);
775 return 0;
776 }
777
778 /* this are t1 args */
779 targs[0].my_locked = 0;
780 targs[0].other_locked = &targs[1].my_locked;
781
782 os_atomic_xchg(&targs[0].other_thread, thread2, release);
783
784 /* this are t2 args */
785 targs[1].my_locked = 0;
786 targs[1].other_locked = &targs[0].my_locked;
787
788 os_atomic_xchg(&targs[1].other_thread, thread1, release);
789
790 while (os_atomic_load(&wait_barrier, relaxed) != 2) {
791 assert_wait((event_t) &wait_barrier, THREAD_UNINT);
792 if (os_atomic_load(&wait_barrier, relaxed) != 2) {
793 (void) thread_block(THREAD_CONTINUE_NULL);
794 } else {
795 clear_wait(current_thread(), THREAD_AWAKENED);
796 }
797 }
798
799 thread_deallocate(thread1);
800 thread_deallocate(thread2);
801
802 return get_test_mtx_stats_string(buffer, buffer_size);
803 }
804
805 static void
806 test_mtx_lck_unlock_contended_loop_time_thread(
807 __unused void *arg,
808 __unused wait_result_t wr)
809 {
810 int i, val;
811 struct lck_mtx_thread_arg *info = (struct lck_mtx_thread_arg *) arg;
812 thread_t other_thread;
813 int* my_locked;
814 int* other_locked;
815
816 printf("Starting thread %p\n", current_thread());
817
818 while (os_atomic_load(&info->other_thread, acquire) == NULL) {
819 ;
820 }
821 other_thread = info->other_thread;
822
823 printf("Other thread %p\n", other_thread);
824
825 my_locked = &info->my_locked;
826 other_locked = info->other_locked;
827
828 *my_locked = 0;
829 val = os_atomic_inc(&synch, relaxed);
830 while (os_atomic_load(&synch, relaxed) < 2) {
831 ;
832 }
833
834 //warming up the test
835 for (i = 0; i < WARMUP_ITER; i++) {
836 lck_mtx_lock(&test_mtx);
837
838 os_atomic_xchg(my_locked, 1, relaxed);
839 if (i != WARMUP_ITER - 1) {
840 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
841 ;
842 }
843 os_atomic_xchg(my_locked, 0, relaxed);
844 }
845
846 lck_mtx_unlock(&test_mtx);
847
848 if (i != WARMUP_ITER - 1) {
849 while (os_atomic_load(other_locked, relaxed) == 0) {
850 ;
851 }
852 }
853 }
854
855 printf("warmup done %p\n", current_thread());
856
857 os_atomic_inc(&synch, relaxed);
858 while (os_atomic_load(&synch, relaxed) < 4) {
859 ;
860 }
861
862 *my_locked = 0;
863
864 /*
865 * synch the threads so they start
866 * concurrently.
867 */
868 os_atomic_inc(&synch, relaxed);
869 while (os_atomic_load(&synch, relaxed) < 6) {
870 ;
871 }
872
873 if (val == 1) {
874 start_loop_time_run = thread_get_runtime_self();
875 start_loop_time = mach_absolute_time();
876 }
877
878 for (i = 0; i < iterations; i++) {
879 lck_mtx_lock(&test_mtx);
880
881 os_atomic_xchg(my_locked, 1, relaxed);
882 if (i != iterations - 1) {
883 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
884 ;
885 }
886 os_atomic_xchg(my_locked, 0, relaxed);
887 }
888
889 lck_mtx_unlock(&test_mtx);
890
891 if (i != iterations - 1) {
892 while (os_atomic_load(other_locked, relaxed) == 0) {
893 ;
894 }
895 }
896 }
897
898 if (val == 1) {
899 end_loop_time = mach_absolute_time();
900 end_loop_time_run = thread_get_runtime_self();
901 }
902
903 os_atomic_inc(&wait_barrier, relaxed);
904 thread_wakeup((event_t) &wait_barrier);
905 thread_terminate_self();
906 }
907
908
909 int
910 lck_mtx_test_mtx_contended_loop_time(
911 int iter,
912 char *buffer,
913 int buffer_size)
914 {
915 thread_t thread1, thread2;
916 kern_return_t result;
917 int ret;
918 struct lck_mtx_thread_arg targs[2] = {};
919 synch = 0;
920 wait_barrier = 0;
921 iterations = iter;
922 uint64_t time, time_run;
923
924 targs[0].other_thread = NULL;
925 targs[1].other_thread = NULL;
926
927 result = kernel_thread_start((thread_continue_t)test_mtx_lck_unlock_contended_loop_time_thread, &targs[0], &thread1);
928 if (result != KERN_SUCCESS) {
929 return 0;
930 }
931
932 result = kernel_thread_start((thread_continue_t)test_mtx_lck_unlock_contended_loop_time_thread, &targs[1], &thread2);
933 if (result != KERN_SUCCESS) {
934 thread_deallocate(thread1);
935 return 0;
936 }
937
938 /* this are t1 args */
939 targs[0].my_locked = 0;
940 targs[0].other_locked = &targs[1].my_locked;
941
942 os_atomic_xchg(&targs[0].other_thread, thread2, release);
943
944 /* this are t2 args */
945 targs[1].my_locked = 0;
946 targs[1].other_locked = &targs[0].my_locked;
947
948 os_atomic_xchg(&targs[1].other_thread, thread1, release);
949
950 while (os_atomic_load(&wait_barrier, acquire) != 2) {
951 assert_wait((event_t) &wait_barrier, THREAD_UNINT);
952 if (os_atomic_load(&wait_barrier, acquire) != 2) {
953 (void) thread_block(THREAD_CONTINUE_NULL);
954 } else {
955 clear_wait(current_thread(), THREAD_AWAKENED);
956 }
957 }
958
959 thread_deallocate(thread1);
960 thread_deallocate(thread2);
961
962 absolutetime_to_nanoseconds(end_loop_time - start_loop_time, &time);
963 absolutetime_to_nanoseconds(end_loop_time_run - start_loop_time_run, &time_run);
964
965 ret = snprintf(buffer, buffer_size, "\n");
966 ret += snprintf(&buffer[ret], buffer_size - ret, "total time %llu ns total run time %llu ns ", time, time_run);
967 ret += print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS, &buffer[ret], buffer_size - ret);
968 ret += snprintf(&buffer[ret], buffer_size - ret, "\n");
969
970 return ret;
971 }