]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/test_lock.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / test_lock.c
1 #include <mach_ldebug.h>
2 #include <debug.h>
3
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
7
8 #include <kern/locks.h>
9 #include <kern/misc_protos.h>
10 #include <kern/kalloc.h>
11 #include <kern/thread.h>
12 #include <kern/processor.h>
13 #include <kern/sched_prim.h>
14 #include <kern/debug.h>
15 #include <libkern/section_keywords.h>
16 #include <machine/atomic.h>
17 #include <machine/machine_cpu.h>
18 #include <machine/atomic.h>
19 #include <string.h>
20 #include <kern/kalloc.h>
21
22 #include <sys/kdebug.h>
23
24 static lck_mtx_t test_mtx;
25 static lck_grp_t test_mtx_grp;
26 static lck_grp_attr_t test_mtx_grp_attr;
27 static lck_attr_t test_mtx_attr;
28
29 static lck_grp_t test_mtx_stats_grp;
30 static lck_grp_attr_t test_mtx_stats_grp_attr;
31 static lck_attr_t test_mtx_stats_attr;
32
33 struct lck_mtx_test_stats_elem {
34 lck_spin_t lock;
35 uint64_t samples;
36 uint64_t avg;
37 uint64_t max;
38 uint64_t min;
39 uint64_t tot;
40 };
41
42 #define TEST_MTX_LOCK_STATS 0
43 #define TEST_MTX_TRY_LOCK_STATS 1
44 #define TEST_MTX_LOCK_SPIN_STATS 2
45 #define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3
46 #define TEST_MTX_TRY_LOCK_SPIN_STATS 4
47 #define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5
48 #define TEST_MTX_UNLOCK_MTX_STATS 6
49 #define TEST_MTX_UNLOCK_SPIN_STATS 7
50 #define TEST_MTX_MAX_STATS 8
51
52 struct lck_mtx_test_stats_elem lck_mtx_test_stats[TEST_MTX_MAX_STATS];
53 atomic_bool enabled = TRUE;
54
55 static void
56 init_test_mtx_stats(void)
57 {
58 int i;
59
60 lck_grp_attr_setdefault(&test_mtx_stats_grp_attr);
61 lck_grp_init(&test_mtx_stats_grp, "testlck_stats_mtx", &test_mtx_stats_grp_attr);
62 lck_attr_setdefault(&test_mtx_stats_attr);
63
64 atomic_store(&enabled, TRUE);
65 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
66 memset(&lck_mtx_test_stats[i], 0, sizeof(struct lck_mtx_test_stats_elem));
67 lck_mtx_test_stats[i].min = ~0;
68 lck_spin_init(&lck_mtx_test_stats[i].lock, &test_mtx_stats_grp, &test_mtx_stats_attr);
69 }
70 }
71
72 static void
73 update_test_mtx_stats(
74 uint64_t start,
75 uint64_t end,
76 uint type)
77 {
78 if (atomic_load(&enabled) == TRUE) {
79 assert(type < TEST_MTX_MAX_STATS);
80 assert(start <= end);
81
82 uint64_t elapsed = end - start;
83 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[type];
84
85 lck_spin_lock(&stat->lock);
86
87 stat->samples++;
88 stat->tot += elapsed;
89 stat->avg = stat->tot / stat->samples;
90 if (stat->max < elapsed) {
91 stat->max = elapsed;
92 }
93 if (stat->min > elapsed) {
94 stat->min = elapsed;
95 }
96 lck_spin_unlock(&stat->lock);
97 }
98 }
99
100 static void
101 erase_test_mtx_stats(
102 uint type)
103 {
104 assert(type < TEST_MTX_MAX_STATS);
105 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[type];
106
107 lck_spin_lock(&stat->lock);
108
109 stat->samples = 0;
110 stat->tot = 0;
111 stat->avg = 0;
112 stat->max = 0;
113 stat->min = ~0;
114
115 lck_spin_unlock(&stat->lock);
116 }
117
118 void
119 erase_all_test_mtx_stats(void)
120 {
121 int i;
122 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
123 erase_test_mtx_stats(i);
124 }
125 }
126
127 static void
128 disable_all_test_mtx_stats(void)
129 {
130 atomic_store(&enabled, FALSE);
131 }
132
133 static void
134 enable_all_test_mtx_stats(void)
135 {
136 atomic_store(&enabled, TRUE);
137 }
138
139 static int
140 print_test_mtx_stats_string_name(
141 int type_num,
142 char* buffer,
143 int size)
144 {
145 char* type = "";
146 switch (type_num) {
147 case TEST_MTX_LOCK_STATS:
148 type = "TEST_MTX_LOCK_STATS";
149 break;
150 case TEST_MTX_TRY_LOCK_STATS:
151 type = "TEST_MTX_TRY_LOCK_STATS";
152 break;
153 case TEST_MTX_LOCK_SPIN_STATS:
154 type = "TEST_MTX_LOCK_SPIN_STATS";
155 break;
156 case TEST_MTX_LOCK_SPIN_ALWAYS_STATS:
157 type = "TEST_MTX_LOCK_SPIN_ALWAYS_STATS";
158 break;
159 case TEST_MTX_TRY_LOCK_SPIN_STATS:
160 type = "TEST_MTX_TRY_LOCK_SPIN_STATS";
161 break;
162 case TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS:
163 type = "TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS";
164 break;
165 case TEST_MTX_UNLOCK_MTX_STATS:
166 type = "TEST_MTX_UNLOCK_MTX_STATS";
167 break;
168 case TEST_MTX_UNLOCK_SPIN_STATS:
169 type = "TEST_MTX_UNLOCK_SPIN_STATS";
170 break;
171 default:
172 break;
173 }
174
175 return scnprintf(buffer, size, "%s ", type);
176 }
177
178 int
179 get_test_mtx_stats_string(
180 char* buffer,
181 int size)
182 {
183 int string_off = 0;
184 int ret = 0;
185
186 ret = scnprintf(&buffer[string_off], size, "\n");
187 size -= ret;
188 string_off += ret;
189
190 int i;
191 for (i = 0; i < TEST_MTX_MAX_STATS; i++) {
192 struct lck_mtx_test_stats_elem* stat = &lck_mtx_test_stats[i];
193
194 ret = scnprintf(&buffer[string_off], size, "{ ");
195 size -= ret;
196 string_off += ret;
197
198 lck_spin_lock(&stat->lock);
199 uint64_t time;
200
201 ret = scnprintf(&buffer[string_off], size, "samples %llu, ", stat->samples);
202 size -= ret;
203 string_off += ret;
204
205 absolutetime_to_nanoseconds(stat->tot, &time);
206 ret = scnprintf(&buffer[string_off], size, "tot %llu ns, ", time);
207 size -= ret;
208 string_off += ret;
209
210 absolutetime_to_nanoseconds(stat->avg, &time);
211 ret = scnprintf(&buffer[string_off], size, "avg %llu ns, ", time);
212 size -= ret;
213 string_off += ret;
214
215 absolutetime_to_nanoseconds(stat->max, &time);
216 ret = scnprintf(&buffer[string_off], size, "max %llu ns, ", time);
217 size -= ret;
218 string_off += ret;
219
220 absolutetime_to_nanoseconds(stat->min, &time);
221 ret = scnprintf(&buffer[string_off], size, "min %llu ns", time);
222 size -= ret;
223 string_off += ret;
224
225 lck_spin_unlock(&stat->lock);
226
227 ret = scnprintf(&buffer[string_off], size, " } ");
228 size -= ret;
229 string_off += ret;
230
231 ret = print_test_mtx_stats_string_name(i, &buffer[string_off], size);
232 size -= ret;
233 string_off += ret;
234
235 ret = scnprintf(&buffer[string_off], size, "\n");
236 size -= ret;
237 string_off += ret;
238 }
239
240 return string_off;
241 }
242
243 void
244 lck_mtx_test_init(void)
245 {
246 static int first = 0;
247
248 /*
249 * This should be substituted with a version
250 * of dispatch_once for kernel (rdar:39537874)
251 */
252 if (os_atomic_load(&first, acquire) >= 2) {
253 return;
254 }
255
256 if (os_atomic_cmpxchg(&first, 0, 1, relaxed)) {
257 lck_grp_attr_setdefault(&test_mtx_grp_attr);
258 lck_grp_init(&test_mtx_grp, "testlck_mtx", &test_mtx_grp_attr);
259 lck_attr_setdefault(&test_mtx_attr);
260 lck_mtx_init(&test_mtx, &test_mtx_grp, &test_mtx_attr);
261
262 init_test_mtx_stats();
263
264 os_atomic_inc(&first, release);
265 }
266
267 while (os_atomic_load(&first, acquire) < 2) {
268 ;
269 }
270 }
271
272 void
273 lck_mtx_test_lock(void)
274 {
275 uint64_t start;
276
277 start = mach_absolute_time();
278
279 lck_mtx_lock(&test_mtx);
280
281 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_STATS);
282 }
283
284 static void
285 lck_mtx_test_try_lock(void)
286 {
287 uint64_t start;
288
289 start = mach_absolute_time();
290
291 lck_mtx_try_lock(&test_mtx);
292
293 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_STATS);
294 }
295
296 static void
297 lck_mtx_test_lock_spin(void)
298 {
299 uint64_t start;
300
301 start = mach_absolute_time();
302
303 lck_mtx_lock_spin(&test_mtx);
304
305 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_SPIN_STATS);
306 }
307
308 static void
309 lck_mtx_test_lock_spin_always(void)
310 {
311 uint64_t start;
312
313 start = mach_absolute_time();
314
315 lck_mtx_lock_spin_always(&test_mtx);
316
317 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_LOCK_SPIN_ALWAYS_STATS);
318 }
319
320 static void
321 lck_mtx_test_try_lock_spin(void)
322 {
323 uint64_t start;
324
325 start = mach_absolute_time();
326
327 lck_mtx_try_lock_spin(&test_mtx);
328
329 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_STATS);
330 }
331
332 static void
333 lck_mtx_test_try_lock_spin_always(void)
334 {
335 uint64_t start;
336
337 start = mach_absolute_time();
338
339 lck_mtx_try_lock_spin_always(&test_mtx);
340
341 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS);
342 }
343
344 void
345 lck_mtx_test_unlock(void)
346 {
347 uint64_t start;
348
349 start = mach_absolute_time();
350
351 lck_mtx_unlock(&test_mtx);
352
353 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS);
354 }
355
356 static void
357 lck_mtx_test_unlock_mtx(void)
358 {
359 uint64_t start;
360
361 start = mach_absolute_time();
362
363 lck_mtx_unlock(&test_mtx);
364
365 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_MTX_STATS);
366 }
367
368 static void
369 lck_mtx_test_unlock_spin(void)
370 {
371 uint64_t start;
372
373 start = mach_absolute_time();
374
375 lck_mtx_unlock(&test_mtx);
376
377 update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS);
378 }
379
380 #define WARMUP_ITER 1000
381
382 int
383 lck_mtx_test_mtx_uncontended_loop_time(
384 int iter, char *buffer, int size)
385 {
386 int i;
387 uint64_t tot_time[TEST_MTX_MAX_STATS];
388 uint64_t run_time[TEST_MTX_MAX_STATS];
389 uint64_t start;
390 uint64_t start_run;
391
392 //warming up the test
393 for (i = 0; i < WARMUP_ITER; i++) {
394 lck_mtx_lock(&test_mtx);
395 lck_mtx_unlock(&test_mtx);
396 }
397
398 start_run = thread_get_runtime_self();
399 start = mach_absolute_time();
400
401 for (i = 0; i < iter; i++) {
402 lck_mtx_lock(&test_mtx);
403 lck_mtx_unlock(&test_mtx);
404 }
405
406 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_STATS]);
407 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_STATS]);
408
409 //warming up the test
410 for (i = 0; i < WARMUP_ITER; i++) {
411 lck_mtx_try_lock(&test_mtx);
412 lck_mtx_unlock(&test_mtx);
413 }
414
415 start_run = thread_get_runtime_self();
416 start = mach_absolute_time();
417
418 for (i = 0; i < iter; i++) {
419 lck_mtx_try_lock(&test_mtx);
420 lck_mtx_unlock(&test_mtx);
421 }
422
423 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_STATS]);
424 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_STATS]);
425
426 //warming up the test
427 for (i = 0; i < WARMUP_ITER; i++) {
428 lck_mtx_lock_spin(&test_mtx);
429 lck_mtx_unlock(&test_mtx);
430 }
431
432 start_run = thread_get_runtime_self();
433 start = mach_absolute_time();
434
435 for (i = 0; i < iter; i++) {
436 lck_mtx_lock_spin(&test_mtx);
437 lck_mtx_unlock(&test_mtx);
438 }
439
440 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_SPIN_STATS]);
441 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_SPIN_STATS]);
442
443 //warming up the test
444 for (i = 0; i < WARMUP_ITER; i++) {
445 lck_mtx_lock_spin_always(&test_mtx);
446 lck_mtx_unlock(&test_mtx);
447 }
448
449 start_run = thread_get_runtime_self();
450 start = mach_absolute_time();
451
452 for (i = 0; i < iter; i++) {
453 lck_mtx_lock_spin_always(&test_mtx);
454 lck_mtx_unlock(&test_mtx);
455 }
456
457 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_LOCK_SPIN_ALWAYS_STATS]);
458 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_LOCK_SPIN_ALWAYS_STATS]);
459
460 //warming up the test
461 for (i = 0; i < WARMUP_ITER; i++) {
462 lck_mtx_try_lock_spin(&test_mtx);
463 lck_mtx_unlock(&test_mtx);
464 }
465
466 start_run = thread_get_runtime_self();
467 start = mach_absolute_time();
468
469 for (i = 0; i < iter; i++) {
470 lck_mtx_try_lock_spin(&test_mtx);
471 lck_mtx_unlock(&test_mtx);
472 }
473
474 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_SPIN_STATS]);
475 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_SPIN_STATS]);
476
477 //warming up the test
478 for (i = 0; i < WARMUP_ITER; i++) {
479 lck_mtx_try_lock_spin_always(&test_mtx);
480 lck_mtx_unlock(&test_mtx);
481 }
482
483 start_run = thread_get_runtime_self();
484 start = mach_absolute_time();
485
486 for (i = 0; i < iter; i++) {
487 lck_mtx_try_lock_spin_always(&test_mtx);
488 lck_mtx_unlock(&test_mtx);
489 }
490
491 absolutetime_to_nanoseconds(mach_absolute_time() - start, &tot_time[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS]);
492 absolutetime_to_nanoseconds(thread_get_runtime_self() - start_run, &run_time[TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS]);
493
494 int string_off = 0;
495 int ret = 0;
496
497 ret = scnprintf(&buffer[string_off], size, "\n");
498 size -= ret;
499 string_off += ret;
500
501 for (i = 0; i < TEST_MTX_MAX_STATS - 2; i++) {
502 ret = scnprintf(&buffer[string_off], size, "total time %llu ns total run time %llu ns ", tot_time[i], run_time[i]);
503 size -= ret;
504 string_off += ret;
505
506 ret = print_test_mtx_stats_string_name(i, &buffer[string_off], size);
507 size -= ret;
508 string_off += ret;
509
510 ret = scnprintf(&buffer[string_off], size, "\n");
511 size -= ret;
512 string_off += ret;
513 }
514
515 return string_off;
516 }
517
518 static kern_return_t
519 lck_mtx_test_mtx_lock_uncontended(
520 int iter)
521 {
522 int i;
523
524 disable_all_test_mtx_stats();
525
526 //warming up the test for lock
527 for (i = 0; i < WARMUP_ITER; i++) {
528 lck_mtx_test_lock();
529 lck_mtx_test_unlock_mtx();
530 }
531
532 enable_all_test_mtx_stats();
533
534 for (i = 0; i < iter; i++) {
535 lck_mtx_test_lock();
536 lck_mtx_test_unlock_mtx();
537 }
538
539 disable_all_test_mtx_stats();
540
541 //warming up the test for try_lock
542 for (i = 0; i < WARMUP_ITER; i++) {
543 lck_mtx_test_try_lock();
544 lck_mtx_test_unlock_mtx();
545 }
546
547 enable_all_test_mtx_stats();
548
549 for (i = 0; i < iter; i++) {
550 lck_mtx_test_try_lock();
551 lck_mtx_test_unlock_mtx();
552 }
553
554 return KERN_SUCCESS;
555 }
556
557 static kern_return_t
558 lck_mtx_test_mtx_spin_uncontended(
559 int iter)
560 {
561 int i;
562
563 disable_all_test_mtx_stats();
564
565 //warming up the test for lock_spin
566 for (i = 0; i < WARMUP_ITER; i++) {
567 lck_mtx_test_lock_spin();
568 lck_mtx_test_unlock_spin();
569 }
570
571 enable_all_test_mtx_stats();
572
573 for (i = 0; i < iter; i++) {
574 lck_mtx_test_lock_spin();
575 lck_mtx_test_unlock_spin();
576 }
577
578 disable_all_test_mtx_stats();
579
580 //warming up the test for try_lock_spin
581 for (i = 0; i < WARMUP_ITER; i++) {
582 lck_mtx_test_try_lock_spin();
583 lck_mtx_test_unlock_spin();
584 }
585
586 enable_all_test_mtx_stats();
587
588 for (i = 0; i < iter; i++) {
589 lck_mtx_test_try_lock_spin();
590 lck_mtx_test_unlock_spin();
591 }
592
593 disable_all_test_mtx_stats();
594
595 //warming up the test for lock_spin_always
596 for (i = 0; i < WARMUP_ITER; i++) {
597 lck_mtx_test_lock_spin_always();
598 lck_mtx_test_unlock_spin();
599 }
600
601 enable_all_test_mtx_stats();
602
603 for (i = 0; i < iter; i++) {
604 lck_mtx_test_lock_spin_always();
605 lck_mtx_test_unlock_spin();
606 }
607
608 disable_all_test_mtx_stats();
609
610 //warming up the test for try_lock_spin_always
611 for (i = 0; i < WARMUP_ITER; i++) {
612 lck_mtx_test_try_lock_spin_always();
613 lck_mtx_test_unlock_spin();
614 }
615
616 enable_all_test_mtx_stats();
617
618 for (i = 0; i < iter; i++) {
619 lck_mtx_test_try_lock_spin_always();
620 lck_mtx_test_unlock_spin();
621 }
622
623 return KERN_SUCCESS;
624 }
625
626 int
627 lck_mtx_test_mtx_uncontended(
628 int iter,
629 char *buffer,
630 int size)
631 {
632 erase_all_test_mtx_stats();
633 lck_mtx_test_mtx_lock_uncontended(iter);
634 lck_mtx_test_mtx_spin_uncontended(iter);
635
636 return get_test_mtx_stats_string(buffer, size);
637 }
638
639 static int synch;
640 static int wait_barrier;
641 static int iterations;
642 static uint64_t start_loop_time;
643 static uint64_t start_loop_time_run;
644 static uint64_t end_loop_time;
645 static uint64_t end_loop_time_run;
646
647 struct lck_mtx_thread_arg {
648 int my_locked;
649 int* other_locked;
650 thread_t other_thread;
651 int type;
652 };
653
654 static void
655 test_mtx_lock_unlock_contended_thread(
656 void *arg,
657 __unused wait_result_t wr)
658 {
659 int i, val;
660 struct lck_mtx_thread_arg *info = (struct lck_mtx_thread_arg *) arg;
661 thread_t other_thread;
662 int* my_locked;
663 int* other_locked;
664 int type;
665 uint64_t start, stop;
666
667 printf("Starting thread %p\n", current_thread());
668
669 while (os_atomic_load(&info->other_thread, acquire) == NULL) {
670 ;
671 }
672 other_thread = info->other_thread;
673
674 printf("Other thread %p\n", other_thread);
675
676 my_locked = &info->my_locked;
677 other_locked = info->other_locked;
678 type = info->type;
679
680 *my_locked = 0;
681 val = os_atomic_inc(&synch, relaxed);
682 while (os_atomic_load(&synch, relaxed) < 2) {
683 ;
684 }
685
686 //warming up the test
687 for (i = 0; i < WARMUP_ITER; i++) {
688 lck_mtx_test_lock();
689 int prev = os_atomic_load(other_locked, relaxed);
690 os_atomic_add(my_locked, 1, relaxed);
691 if (i != WARMUP_ITER - 1) {
692 if (type == FULL_CONTENDED) {
693 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
694 ;
695 }
696 } else {
697 start = mach_absolute_time();
698 stop = start + (MutexSpin / 2);
699 while (mach_absolute_time() < stop) {
700 ;
701 }
702 }
703 }
704
705 lck_mtx_test_unlock();
706
707 if (i != WARMUP_ITER - 1) {
708 while (os_atomic_load(other_locked, relaxed) == prev) {
709 ;
710 }
711 }
712 }
713
714 printf("warmup done %p\n", current_thread());
715 os_atomic_inc(&synch, relaxed);
716 while (os_atomic_load(&synch, relaxed) < 4) {
717 ;
718 }
719
720 //erase statistics
721 if (val == 1) {
722 erase_all_test_mtx_stats();
723 }
724
725 *my_locked = 0;
726 /*
727 * synch the threads so they start
728 * concurrently.
729 */
730 os_atomic_inc(&synch, relaxed);
731 while (os_atomic_load(&synch, relaxed) < 6) {
732 ;
733 }
734
735 for (i = 0; i < iterations; i++) {
736 lck_mtx_test_lock();
737 int prev = os_atomic_load(other_locked, relaxed);
738 os_atomic_add(my_locked, 1, relaxed);
739 if (i != iterations - 1) {
740 if (type == FULL_CONTENDED) {
741 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
742 ;
743 }
744 } else {
745 start = mach_absolute_time();
746 stop = start + (MutexSpin / 2);
747 while (mach_absolute_time() < stop) {
748 ;
749 }
750 }
751 }
752 lck_mtx_test_unlock_mtx();
753
754 if (i != iterations - 1) {
755 while (os_atomic_load(other_locked, relaxed) == prev) {
756 ;
757 }
758 }
759 }
760
761 os_atomic_inc(&wait_barrier, relaxed);
762 thread_wakeup((event_t) &wait_barrier);
763 thread_terminate_self();
764 }
765
766
767 kern_return_t
768 lck_mtx_test_mtx_contended(
769 int iter,
770 char* buffer,
771 int buffer_size,
772 int type)
773 {
774 thread_t thread1, thread2;
775 kern_return_t result;
776 struct lck_mtx_thread_arg targs[2] = {};
777 synch = 0;
778 wait_barrier = 0;
779 iterations = iter;
780
781 if (type < 0 || type > MAX_CONDENDED) {
782 printf("%s invalid type %d\n", __func__, type);
783 return 0;
784 }
785
786 erase_all_test_mtx_stats();
787
788 targs[0].other_thread = NULL;
789 targs[1].other_thread = NULL;
790 targs[0].type = type;
791 targs[1].type = type;
792
793 result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[0], &thread1);
794 if (result != KERN_SUCCESS) {
795 return 0;
796 }
797
798 result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[1], &thread2);
799 if (result != KERN_SUCCESS) {
800 thread_deallocate(thread1);
801 return 0;
802 }
803
804 /* this are t1 args */
805 targs[0].my_locked = 0;
806 targs[0].other_locked = &targs[1].my_locked;
807
808 os_atomic_xchg(&targs[0].other_thread, thread2, release);
809
810 /* this are t2 args */
811 targs[1].my_locked = 0;
812 targs[1].other_locked = &targs[0].my_locked;
813
814 os_atomic_xchg(&targs[1].other_thread, thread1, release);
815
816 while (os_atomic_load(&wait_barrier, relaxed) != 2) {
817 assert_wait((event_t) &wait_barrier, THREAD_UNINT);
818 if (os_atomic_load(&wait_barrier, relaxed) != 2) {
819 (void) thread_block(THREAD_CONTINUE_NULL);
820 } else {
821 clear_wait(current_thread(), THREAD_AWAKENED);
822 }
823 }
824
825 thread_deallocate(thread1);
826 thread_deallocate(thread2);
827
828 return get_test_mtx_stats_string(buffer, buffer_size);
829 }
830
831 static void
832 test_mtx_lck_unlock_contended_loop_time_thread(
833 __unused void *arg,
834 __unused wait_result_t wr)
835 {
836 int i, val;
837 struct lck_mtx_thread_arg *info = (struct lck_mtx_thread_arg *) arg;
838 thread_t other_thread;
839 int* my_locked;
840 int* other_locked;
841 int type;
842 uint64_t start, stop;
843
844 printf("Starting thread %p\n", current_thread());
845
846 while (os_atomic_load(&info->other_thread, acquire) == NULL) {
847 ;
848 }
849 other_thread = info->other_thread;
850
851 printf("Other thread %p\n", other_thread);
852
853 my_locked = &info->my_locked;
854 other_locked = info->other_locked;
855 type = info->type;
856
857 *my_locked = 0;
858 val = os_atomic_inc(&synch, relaxed);
859 while (os_atomic_load(&synch, relaxed) < 2) {
860 ;
861 }
862
863 //warming up the test
864 for (i = 0; i < WARMUP_ITER; i++) {
865 lck_mtx_lock(&test_mtx);
866
867 int prev = os_atomic_load(other_locked, relaxed);
868 os_atomic_add(my_locked, 1, relaxed);
869 if (i != WARMUP_ITER - 1) {
870 if (type == FULL_CONTENDED) {
871 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
872 ;
873 }
874 } else {
875 start = mach_absolute_time();
876 stop = start + (MutexSpin / 2);
877 while (mach_absolute_time() < stop) {
878 ;
879 }
880 }
881 }
882
883 lck_mtx_unlock(&test_mtx);
884
885 if (i != WARMUP_ITER - 1) {
886 while (os_atomic_load(other_locked, relaxed) == prev) {
887 ;
888 }
889 }
890 }
891
892 printf("warmup done %p\n", current_thread());
893
894 os_atomic_inc(&synch, relaxed);
895 while (os_atomic_load(&synch, relaxed) < 4) {
896 ;
897 }
898
899 *my_locked = 0;
900
901 /*
902 * synch the threads so they start
903 * concurrently.
904 */
905 os_atomic_inc(&synch, relaxed);
906 while (os_atomic_load(&synch, relaxed) < 6) {
907 ;
908 }
909
910 if (val == 1) {
911 start_loop_time_run = thread_get_runtime_self();
912 start_loop_time = mach_absolute_time();
913 }
914
915 for (i = 0; i < iterations; i++) {
916 lck_mtx_lock(&test_mtx);
917
918 int prev = os_atomic_load(other_locked, relaxed);
919 os_atomic_add(my_locked, 1, relaxed);
920 if (i != iterations - 1) {
921 if (type == FULL_CONTENDED) {
922 while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) {
923 ;
924 }
925 } else {
926 start = mach_absolute_time();
927 stop = start + (MutexSpin / 2);
928 while (mach_absolute_time() < stop) {
929 ;
930 }
931 }
932 }
933
934 lck_mtx_unlock(&test_mtx);
935
936 if (i != iterations - 1) {
937 while (os_atomic_load(other_locked, relaxed) == prev) {
938 ;
939 }
940 }
941 }
942
943 if (val == 1) {
944 end_loop_time = mach_absolute_time();
945 end_loop_time_run = thread_get_runtime_self();
946 }
947
948 os_atomic_inc(&wait_barrier, relaxed);
949 thread_wakeup((event_t) &wait_barrier);
950 thread_terminate_self();
951 }
952
953
954 int
955 lck_mtx_test_mtx_contended_loop_time(
956 int iter,
957 char *buffer,
958 int buffer_size,
959 int type)
960 {
961 thread_t thread1, thread2;
962 kern_return_t result;
963 int ret;
964 struct lck_mtx_thread_arg targs[2] = {};
965 synch = 0;
966 wait_barrier = 0;
967 iterations = iter;
968 uint64_t time, time_run;
969
970 if (type < 0 || type > MAX_CONDENDED) {
971 printf("%s invalid type %d\n", __func__, type);
972 return 0;
973 }
974
975 targs[0].other_thread = NULL;
976 targs[1].other_thread = NULL;
977
978 result = kernel_thread_start((thread_continue_t)test_mtx_lck_unlock_contended_loop_time_thread, &targs[0], &thread1);
979 if (result != KERN_SUCCESS) {
980 return 0;
981 }
982
983 result = kernel_thread_start((thread_continue_t)test_mtx_lck_unlock_contended_loop_time_thread, &targs[1], &thread2);
984 if (result != KERN_SUCCESS) {
985 thread_deallocate(thread1);
986 return 0;
987 }
988
989 /* this are t1 args */
990 targs[0].my_locked = 0;
991 targs[0].other_locked = &targs[1].my_locked;
992 targs[0].type = type;
993 targs[1].type = type;
994
995 os_atomic_xchg(&targs[0].other_thread, thread2, release);
996
997 /* this are t2 args */
998 targs[1].my_locked = 0;
999 targs[1].other_locked = &targs[0].my_locked;
1000
1001 os_atomic_xchg(&targs[1].other_thread, thread1, release);
1002
1003 while (os_atomic_load(&wait_barrier, acquire) != 2) {
1004 assert_wait((event_t) &wait_barrier, THREAD_UNINT);
1005 if (os_atomic_load(&wait_barrier, acquire) != 2) {
1006 (void) thread_block(THREAD_CONTINUE_NULL);
1007 } else {
1008 clear_wait(current_thread(), THREAD_AWAKENED);
1009 }
1010 }
1011
1012 thread_deallocate(thread1);
1013 thread_deallocate(thread2);
1014
1015 absolutetime_to_nanoseconds(end_loop_time - start_loop_time, &time);
1016 absolutetime_to_nanoseconds(end_loop_time_run - start_loop_time_run, &time_run);
1017
1018 ret = scnprintf(buffer, buffer_size, "\n");
1019 ret += scnprintf(&buffer[ret], buffer_size - ret, "total time %llu ns total run time %llu ns ", time, time_run);
1020 ret += print_test_mtx_stats_string_name(TEST_MTX_LOCK_STATS, &buffer[ret], buffer_size - ret);
1021 ret += scnprintf(&buffer[ret], buffer_size - ret, "\n");
1022
1023 return ret;
1024 }