]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/zero-to-n/zero-to-n.c
cd1963c5655bd2fe63dbf67149837c8b53ad7fc7
[apple/xnu.git] / tools / tests / zero-to-n / zero-to-n.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <unistd.h>
29 #include <stdio.h>
30 #include <math.h>
31 #include <sys/kdebug.h>
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <errno.h>
35 #include <err.h>
36 #include <string.h>
37 #include <assert.h>
38 #include <sysexits.h>
39 #include <sys/sysctl.h>
40 #include <getopt.h>
41
42 #include <spawn.h>
43 #include <spawn_private.h>
44 #include <sys/spawn_internal.h>
45 #include <mach-o/dyld.h>
46
47 #include <mach/mach_time.h>
48 #include <mach/mach.h>
49 #include <mach/task.h>
50 #include <mach/semaphore.h>
51
52 #include <pthread/qos_private.h>
53
54 #include <sys/resource.h>
55
56 #include <stdatomic.h>
57
58 #include <os/tsd.h>
59
60 typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t;
61 typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t;
62
63 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
64 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
65 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
66
67 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
68 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
69 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
70 #define TRACEWORTHY_NANOS_TEST ( 2000000ll) /* 2 ms */
71
72 #if DEBUG
73 #define debug_log(args ...) printf(args)
74 #else
75 #define debug_log(args ...) do { } while(0)
76 #endif
77
78 /* Declarations */
79 static void* worker_thread(void *arg);
80 static void usage();
81 static int thread_setup(uint32_t my_id);
82 static my_policy_type_t parse_thread_policy(const char *str);
83 static void selfexec_with_apptype(int argc, char *argv[]);
84 static void parse_args(int argc, char *argv[]);
85
86 static __attribute__((aligned(128))) _Atomic uint32_t g_done_threads;
87 static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop = FALSE;
88 static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0;
89
90 /* Global variables (general) */
91 static uint32_t g_numcpus;
92 static uint32_t g_nphysicalcpu;
93 static uint32_t g_nlogicalcpu;
94 static uint32_t g_numthreads;
95 static wake_type_t g_waketype;
96 static policy_t g_policy;
97 static uint32_t g_iterations;
98 static struct mach_timebase_info g_mti;
99 static semaphore_t g_main_sem;
100 static uint64_t *g_thread_endtimes_abs;
101 static boolean_t g_verbose = FALSE;
102 static boolean_t g_do_affinity = FALSE;
103 static uint64_t g_starttime_abs;
104 static uint32_t g_iteration_sleeptime_us = 0;
105 static uint32_t g_priority = 0;
106 static uint32_t g_churn_pri = 0;
107 static uint32_t g_churn_count = 0;
108
109 static pthread_t* g_churn_threads = NULL;
110
111 /* Threshold for dropping a 'bad run' tracepoint */
112 static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS;
113
114 /* Have we re-execed to set apptype? */
115 static boolean_t g_seen_apptype = FALSE;
116
117 /* usleep in betweeen iterations */
118 static boolean_t g_do_sleep = TRUE;
119
120 /* Every thread spins until all threads have checked in */
121 static boolean_t g_do_all_spin = FALSE;
122
123 /* Every thread backgrounds temporarily before parking */
124 static boolean_t g_drop_priority = FALSE;
125
126 /* Test whether realtime threads are scheduled on the separate CPUs */
127 static boolean_t g_test_rt = FALSE;
128
129 /* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */
130 static boolean_t g_test_rt_smt = FALSE;
131
132 /* Test whether realtime threads are successfully avoiding CPU 0 on Intel */
133 static boolean_t g_test_rt_avoid0 = FALSE;
134
135 /* Print a histgram showing how many threads ran on each CPU */
136 static boolean_t g_histogram = FALSE;
137
138 /* One randomly chosen thread holds up the train for a certain duration. */
139 static boolean_t g_do_one_long_spin = FALSE;
140 static uint32_t g_one_long_spin_id = 0;
141 static uint64_t g_one_long_spin_length_abs = 0;
142 static uint64_t g_one_long_spin_length_ns = 0;
143
144 /* Each thread spins for a certain duration after waking up before blocking again. */
145 static boolean_t g_do_each_spin = FALSE;
146 static uint64_t g_each_spin_duration_abs = 0;
147 static uint64_t g_each_spin_duration_ns = 0;
148
149 /* Global variables (broadcast) */
150 static semaphore_t g_broadcastsem;
151 static semaphore_t g_leadersem;
152 static semaphore_t g_readysem;
153 static semaphore_t g_donesem;
154
155 /* Global variables (chain) */
156 static semaphore_t *g_semarr;
157
158 typedef struct {
159 __attribute__((aligned(128))) uint32_t current;
160 uint32_t accum;
161 } histogram_t;
162
163 static histogram_t *g_cpu_histogram;
164 static _Atomic uint64_t *g_cpu_map;
165
166 static uint64_t
167 abs_to_nanos(uint64_t abstime)
168 {
169 return (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom)));
170 }
171
172 static uint64_t
173 nanos_to_abs(uint64_t ns)
174 {
175 return (uint64_t)(ns * (((double)g_mti.denom) / ((double)g_mti.numer)));
176 }
177
178 inline static void
179 yield(void)
180 {
181 #if defined(__arm__) || defined(__arm64__)
182 asm volatile ("yield");
183 #elif defined(__x86_64__) || defined(__i386__)
184 asm volatile ("pause");
185 #else
186 #error Unrecognized architecture
187 #endif
188 }
189
190 static void *
191 churn_thread(__unused void *arg)
192 {
193 uint64_t spin_count = 0;
194
195 /*
196 * As a safety measure to avoid wedging, we will bail on the spin if
197 * it's been more than 1s after the most recent run start
198 */
199
200 while (g_churn_stop == FALSE &&
201 mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) {
202 spin_count++;
203 yield();
204 }
205
206 /* This is totally racy, but only here to detect if anyone stops early */
207 atomic_fetch_add_explicit(&g_churn_stopped_at, spin_count, memory_order_relaxed);
208
209 return NULL;
210 }
211
212 static void
213 create_churn_threads()
214 {
215 if (g_churn_count == 0) {
216 g_churn_count = g_numcpus - 1;
217 }
218
219 errno_t err;
220
221 struct sched_param param = { .sched_priority = (int)g_churn_pri };
222 pthread_attr_t attr;
223
224 /* Array for churn threads */
225 g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count);
226 assert(g_churn_threads);
227
228 if ((err = pthread_attr_init(&attr))) {
229 errc(EX_OSERR, err, "pthread_attr_init");
230 }
231
232 if ((err = pthread_attr_setschedparam(&attr, &param))) {
233 errc(EX_OSERR, err, "pthread_attr_setschedparam");
234 }
235
236 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
237 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
238 }
239
240 for (uint32_t i = 0; i < g_churn_count; i++) {
241 pthread_t new_thread;
242
243 if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL))) {
244 errc(EX_OSERR, err, "pthread_create");
245 }
246 g_churn_threads[i] = new_thread;
247 }
248
249 if ((err = pthread_attr_destroy(&attr))) {
250 errc(EX_OSERR, err, "pthread_attr_destroy");
251 }
252 }
253
254 static void
255 join_churn_threads(void)
256 {
257 if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) {
258 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
259 g_churn_stopped_at);
260 }
261
262 atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst);
263
264 /* Rejoin churn threads */
265 for (uint32_t i = 0; i < g_churn_count; i++) {
266 errno_t err = pthread_join(g_churn_threads[i], NULL);
267 if (err) {
268 errc(EX_OSERR, err, "pthread_join %d", i);
269 }
270 }
271 }
272
273 /*
274 * Figure out what thread policy to use
275 */
276 static my_policy_type_t
277 parse_thread_policy(const char *str)
278 {
279 if (strcmp(str, "timeshare") == 0) {
280 return MY_POLICY_TIMESHARE;
281 } else if (strcmp(str, "realtime") == 0) {
282 return MY_POLICY_REALTIME;
283 } else if (strcmp(str, "fixed") == 0) {
284 return MY_POLICY_FIXEDPRI;
285 } else {
286 errx(EX_USAGE, "Invalid thread policy \"%s\"", str);
287 }
288 }
289
290 /*
291 * Figure out what wakeup pattern to use
292 */
293 static wake_type_t
294 parse_wakeup_pattern(const char *str)
295 {
296 if (strcmp(str, "chain") == 0) {
297 return WAKE_CHAIN;
298 } else if (strcmp(str, "hop") == 0) {
299 return WAKE_HOP;
300 } else if (strcmp(str, "broadcast-single-sem") == 0) {
301 return WAKE_BROADCAST_ONESEM;
302 } else if (strcmp(str, "broadcast-per-thread") == 0) {
303 return WAKE_BROADCAST_PERTHREAD;
304 } else {
305 errx(EX_USAGE, "Invalid wakeup pattern \"%s\"", str);
306 }
307 }
308
309 /*
310 * Set policy
311 */
312 static int
313 thread_setup(uint32_t my_id)
314 {
315 kern_return_t kr;
316 errno_t ret;
317 thread_time_constraint_policy_data_t pol;
318
319 if (g_priority) {
320 int policy = SCHED_OTHER;
321 if (g_policy == MY_POLICY_FIXEDPRI) {
322 policy = SCHED_RR;
323 }
324
325 struct sched_param param = {.sched_priority = (int)g_priority};
326 if ((ret = pthread_setschedparam(pthread_self(), policy, &param))) {
327 errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id);
328 }
329 }
330
331 switch (g_policy) {
332 case MY_POLICY_TIMESHARE:
333 break;
334 case MY_POLICY_REALTIME:
335 /* Hard-coded realtime parameters (similar to what Digi uses) */
336 pol.period = 100000;
337 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS);
338 pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS);
339 pol.preemptible = 0; /* Ignored by OS */
340
341 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
342 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
343 mach_assert_zero_t(my_id, kr);
344 break;
345 case MY_POLICY_FIXEDPRI:
346 ret = pthread_set_fixedpriority_self();
347 if (ret) {
348 errc(EX_OSERR, ret, "pthread_set_fixedpriority_self");
349 }
350 break;
351 default:
352 errx(EX_USAGE, "invalid policy type %d", g_policy);
353 }
354
355 if (g_do_affinity) {
356 thread_affinity_policy_data_t affinity;
357
358 affinity.affinity_tag = my_id % 2;
359
360 kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
361 (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT);
362 mach_assert_zero_t(my_id, kr);
363 }
364
365 return 0;
366 }
367
368 /*
369 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
370 * and notify the main thread when done.
371 */
372 static void*
373 worker_thread(void *arg)
374 {
375 uint32_t my_id = (uint32_t)(uintptr_t)arg;
376 kern_return_t kr;
377
378 volatile double x = 0.0;
379 volatile double y = 0.0;
380
381 /* Set policy and so forth */
382 thread_setup(my_id);
383
384 for (uint32_t i = 0; i < g_iterations; i++) {
385 if (my_id == 0) {
386 /*
387 * Leader thread either wakes everyone up or starts the chain going.
388 */
389
390 /* Give the worker threads undisturbed time to finish before waiting on them */
391 if (g_do_sleep) {
392 usleep(g_iteration_sleeptime_us);
393 }
394
395 debug_log("%d Leader thread wait for ready\n", i);
396
397 /*
398 * Wait for everyone else to declare ready
399 * Is there a better way to do this that won't interfere with the rest of the chain?
400 * TODO: Invent 'semaphore wait for N signals'
401 */
402
403 for (uint32_t j = 0; j < g_numthreads - 1; j++) {
404 kr = semaphore_wait(g_readysem);
405 mach_assert_zero_t(my_id, kr);
406 }
407
408 debug_log("%d Leader thread wait\n", i);
409
410 if (i > 0) {
411 for (int cpuid = 0; cpuid < g_numcpus; cpuid++) {
412 if (g_cpu_histogram[cpuid].current == 1) {
413 atomic_fetch_or_explicit(&g_cpu_map[i - 1], (1UL << cpuid), memory_order_relaxed);
414 g_cpu_histogram[cpuid].current = 0;
415 }
416 }
417 }
418
419 /* Signal main thread and wait for start of iteration */
420
421 kr = semaphore_wait_signal(g_leadersem, g_main_sem);
422 mach_assert_zero_t(my_id, kr);
423
424 g_thread_endtimes_abs[my_id] = mach_absolute_time();
425
426 debug_log("%d Leader thread go\n", i);
427
428 assert_zero_t(my_id, atomic_load_explicit(&g_done_threads, memory_order_relaxed));
429
430 switch (g_waketype) {
431 case WAKE_BROADCAST_ONESEM:
432 kr = semaphore_signal_all(g_broadcastsem);
433 mach_assert_zero_t(my_id, kr);
434 break;
435 case WAKE_BROADCAST_PERTHREAD:
436 for (uint32_t j = 1; j < g_numthreads; j++) {
437 kr = semaphore_signal(g_semarr[j]);
438 mach_assert_zero_t(my_id, kr);
439 }
440 break;
441 case WAKE_CHAIN:
442 kr = semaphore_signal(g_semarr[my_id + 1]);
443 mach_assert_zero_t(my_id, kr);
444 break;
445 case WAKE_HOP:
446 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
447 mach_assert_zero_t(my_id, kr);
448 break;
449 }
450 } else {
451 /*
452 * Everyone else waits to be woken up,
453 * records when she wakes up, and possibly
454 * wakes up a friend.
455 */
456 switch (g_waketype) {
457 case WAKE_BROADCAST_ONESEM:
458 kr = semaphore_wait_signal(g_broadcastsem, g_readysem);
459 mach_assert_zero_t(my_id, kr);
460
461 g_thread_endtimes_abs[my_id] = mach_absolute_time();
462 break;
463
464 case WAKE_BROADCAST_PERTHREAD:
465 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
466 mach_assert_zero_t(my_id, kr);
467
468 g_thread_endtimes_abs[my_id] = mach_absolute_time();
469 break;
470
471 case WAKE_CHAIN:
472 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
473 mach_assert_zero_t(my_id, kr);
474
475 /* Signal the next thread *after* recording wake time */
476
477 g_thread_endtimes_abs[my_id] = mach_absolute_time();
478
479 if (my_id < (g_numthreads - 1)) {
480 kr = semaphore_signal(g_semarr[my_id + 1]);
481 mach_assert_zero_t(my_id, kr);
482 }
483
484 break;
485
486 case WAKE_HOP:
487 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
488 mach_assert_zero_t(my_id, kr);
489
490 /* Signal the next thread *after* recording wake time */
491
492 g_thread_endtimes_abs[my_id] = mach_absolute_time();
493
494 if (my_id < (g_numthreads - 1)) {
495 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
496 mach_assert_zero_t(my_id, kr);
497 } else {
498 kr = semaphore_signal_all(g_donesem);
499 mach_assert_zero_t(my_id, kr);
500 }
501
502 break;
503 }
504 }
505
506 unsigned int cpuid = _os_cpu_number();
507 assert(cpuid < g_numcpus);
508 debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid, i);
509 g_cpu_histogram[cpuid].current = 1;
510 g_cpu_histogram[cpuid].accum++;
511
512 if (g_do_one_long_spin && g_one_long_spin_id == my_id) {
513 /* One randomly chosen thread holds up the train for a while. */
514
515 uint64_t endspin = g_starttime_abs + g_one_long_spin_length_abs;
516 while (mach_absolute_time() < endspin) {
517 y = y + 1.5 + x;
518 x = sqrt(y);
519 }
520 }
521
522 if (g_do_each_spin) {
523 /* Each thread spins for a certain duration after waking up before blocking again. */
524
525 uint64_t endspin = mach_absolute_time() + g_each_spin_duration_abs;
526 while (mach_absolute_time() < endspin) {
527 y = y + 1.5 + x;
528 x = sqrt(y);
529 }
530 }
531
532 uint32_t done_threads;
533 done_threads = atomic_fetch_add_explicit(&g_done_threads, 1, memory_order_relaxed) + 1;
534
535 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads, i);
536
537 if (g_drop_priority) {
538 /* Drop priority to BG momentarily */
539 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG);
540 if (ret) {
541 errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG");
542 }
543 }
544
545 if (g_do_all_spin) {
546 /* Everyone spins until the last thread checks in. */
547
548 while (atomic_load_explicit(&g_done_threads, memory_order_relaxed) < g_numthreads) {
549 y = y + 1.5 + x;
550 x = sqrt(y);
551 }
552 }
553
554 if (g_drop_priority) {
555 /* Restore normal priority */
556 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0);
557 if (ret) {
558 errc(EX_OSERR, ret, "setpriority 0");
559 }
560 }
561
562 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i);
563 }
564
565 if (my_id == 0) {
566 /* Give the worker threads undisturbed time to finish before waiting on them */
567 if (g_do_sleep) {
568 usleep(g_iteration_sleeptime_us);
569 }
570
571 /* Wait for the worker threads to finish */
572 for (uint32_t i = 0; i < g_numthreads - 1; i++) {
573 kr = semaphore_wait(g_readysem);
574 mach_assert_zero_t(my_id, kr);
575 }
576
577 /* Tell everyone and the main thread that the last iteration is done */
578 debug_log("%d Leader thread done\n", g_iterations - 1);
579
580 for (int cpuid = 0; cpuid < g_numcpus; cpuid++) {
581 if (g_cpu_histogram[cpuid].current == 1) {
582 atomic_fetch_or_explicit(&g_cpu_map[g_iterations - 1], (1UL << cpuid), memory_order_relaxed);
583 g_cpu_histogram[cpuid].current = 0;
584 }
585 }
586
587 kr = semaphore_signal_all(g_main_sem);
588 mach_assert_zero_t(my_id, kr);
589 } else {
590 /* Hold up thread teardown so it doesn't affect the last iteration */
591 kr = semaphore_wait_signal(g_main_sem, g_readysem);
592 mach_assert_zero_t(my_id, kr);
593 }
594
595 return 0;
596 }
597
598 /*
599 * Given an array of uint64_t values, compute average, max, min, and standard deviation
600 */
601 static void
602 compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t *minp, float *stddevp)
603 {
604 uint32_t i;
605 uint64_t _sum = 0;
606 uint64_t _max = 0;
607 uint64_t _min = UINT64_MAX;
608 float _avg = 0;
609 float _dev = 0;
610
611 for (i = 0; i < count; i++) {
612 _sum += values[i];
613 _max = values[i] > _max ? values[i] : _max;
614 _min = values[i] < _min ? values[i] : _min;
615 }
616
617 _avg = ((float)_sum) / ((float)count);
618
619 _dev = 0;
620 for (i = 0; i < count; i++) {
621 _dev += powf((((float)values[i]) - _avg), 2);
622 }
623
624 _dev /= count;
625 _dev = sqrtf(_dev);
626
627 *averagep = _avg;
628 *maxp = _max;
629 *minp = _min;
630 *stddevp = _dev;
631 }
632
633 int
634 main(int argc, char **argv)
635 {
636 errno_t ret;
637 kern_return_t kr;
638
639 pthread_t *threads;
640 uint64_t *worst_latencies_ns;
641 uint64_t *worst_latencies_from_first_ns;
642 uint64_t max, min;
643 float avg, stddev;
644
645 bool test_fail = false;
646
647 for (int i = 0; i < argc; i++) {
648 if (strcmp(argv[i], "--switched_apptype") == 0) {
649 g_seen_apptype = TRUE;
650 }
651 }
652
653 if (!g_seen_apptype) {
654 selfexec_with_apptype(argc, argv);
655 }
656
657 parse_args(argc, argv);
658
659 srand((unsigned int)time(NULL));
660
661 mach_timebase_info(&g_mti);
662
663 size_t ncpu_size = sizeof(g_numcpus);
664 ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0);
665 if (ret) {
666 err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)");
667 }
668 assert(g_numcpus <= 64); /* g_cpu_map needs to be extended for > 64 cpus */
669
670 size_t physicalcpu_size = sizeof(g_nphysicalcpu);
671 ret = sysctlbyname("hw.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0);
672 if (ret) {
673 err(EX_OSERR, "Failed sysctlbyname(hw.physicalcpu)");
674 }
675
676 size_t logicalcpu_size = sizeof(g_nlogicalcpu);
677 ret = sysctlbyname("hw.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0);
678 if (ret) {
679 err(EX_OSERR, "Failed sysctlbyname(hw.logicalcpu)");
680 }
681
682 if (g_test_rt) {
683 if (g_numthreads == 0) {
684 g_numthreads = g_numcpus;
685 }
686 g_policy = MY_POLICY_REALTIME;
687 g_do_all_spin = TRUE;
688 g_histogram = true;
689 /* Don't change g_traceworthy_latency_ns if it's explicity been set to something other than the default */
690 if (g_traceworthy_latency_ns == TRACEWORTHY_NANOS) {
691 g_traceworthy_latency_ns = TRACEWORTHY_NANOS_TEST;
692 }
693 } else if (g_test_rt_smt) {
694 if (g_nlogicalcpu != 2 * g_nphysicalcpu) {
695 /* Not SMT */
696 printf("Attempt to run --test-rt-smt on a non-SMT device\n");
697 exit(0);
698 }
699
700 if (g_numthreads == 0) {
701 g_numthreads = g_nphysicalcpu;
702 }
703 g_policy = MY_POLICY_REALTIME;
704 g_do_all_spin = TRUE;
705 g_histogram = true;
706 } else if (g_test_rt_avoid0) {
707 #if defined(__x86_64__) || defined(__i386__)
708 if (g_numthreads == 0) {
709 g_numthreads = g_nphysicalcpu - 1;
710 }
711 if (g_numthreads == 0) {
712 printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n");
713 exit(0);
714 }
715 g_policy = MY_POLICY_REALTIME;
716 g_do_all_spin = TRUE;
717 g_histogram = true;
718 #else
719 printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n");
720 exit(0);
721 #endif
722 } else if (g_numthreads == 0) {
723 g_numthreads = g_numcpus;
724 }
725
726 if (g_do_each_spin) {
727 g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns);
728 }
729
730 /* Configure the long-spin thread to take up half of its computation */
731 if (g_do_one_long_spin) {
732 g_one_long_spin_length_ns = COMPUTATION_NANOS / 2;
733 g_one_long_spin_length_abs = nanos_to_abs(g_one_long_spin_length_ns);
734 }
735
736 /* Estimate the amount of time the cleanup phase needs to back off */
737 g_iteration_sleeptime_us = g_numthreads * 20;
738
739 uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1;
740 if (g_do_each_spin) {
741 g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC);
742 }
743 if (g_do_one_long_spin) {
744 g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC;
745 }
746
747 /* Arrays for threads and their wakeup times */
748 threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads);
749 assert(threads);
750
751 size_t endtimes_size = sizeof(uint64_t) * g_numthreads;
752
753 g_thread_endtimes_abs = (uint64_t*) valloc(endtimes_size);
754 assert(g_thread_endtimes_abs);
755
756 /* Ensure the allocation is pre-faulted */
757 ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size);
758 if (ret) {
759 errc(EX_OSERR, ret, "memset_s endtimes");
760 }
761
762 size_t latencies_size = sizeof(uint64_t) * g_iterations;
763
764 worst_latencies_ns = (uint64_t*) valloc(latencies_size);
765 assert(worst_latencies_ns);
766
767 /* Ensure the allocation is pre-faulted */
768 ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size);
769 if (ret) {
770 errc(EX_OSERR, ret, "memset_s latencies");
771 }
772
773 worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size);
774 assert(worst_latencies_from_first_ns);
775
776 /* Ensure the allocation is pre-faulted */
777 ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size);
778 if (ret) {
779 errc(EX_OSERR, ret, "memset_s latencies_from_first");
780 }
781
782 size_t histogram_size = sizeof(histogram_t) * g_numcpus;
783 g_cpu_histogram = (histogram_t *)valloc(histogram_size);
784 assert(g_cpu_histogram);
785 /* Ensure the allocation is pre-faulted */
786 ret = memset_s(g_cpu_histogram, histogram_size, 0, histogram_size);
787 if (ret) {
788 errc(EX_OSERR, ret, "memset_s g_cpu_histogram");
789 }
790
791 size_t map_size = sizeof(uint64_t) * g_iterations;
792 g_cpu_map = (_Atomic uint64_t *)valloc(map_size);
793 assert(g_cpu_map);
794 /* Ensure the allocation is pre-faulted */
795 ret = memset_s(g_cpu_map, map_size, 0, map_size);
796 if (ret) {
797 errc(EX_OSERR, ret, "memset_s g_cpu_map");
798 }
799
800 kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0);
801 mach_assert_zero(kr);
802
803 /* Either one big semaphore or one per thread */
804 if (g_waketype == WAKE_CHAIN ||
805 g_waketype == WAKE_BROADCAST_PERTHREAD ||
806 g_waketype == WAKE_HOP) {
807 g_semarr = valloc(sizeof(semaphore_t) * g_numthreads);
808 assert(g_semarr);
809
810 for (uint32_t i = 0; i < g_numthreads; i++) {
811 kr = semaphore_create(mach_task_self(), &g_semarr[i], SYNC_POLICY_FIFO, 0);
812 mach_assert_zero(kr);
813 }
814
815 g_leadersem = g_semarr[0];
816 } else {
817 kr = semaphore_create(mach_task_self(), &g_broadcastsem, SYNC_POLICY_FIFO, 0);
818 mach_assert_zero(kr);
819 kr = semaphore_create(mach_task_self(), &g_leadersem, SYNC_POLICY_FIFO, 0);
820 mach_assert_zero(kr);
821 }
822
823 if (g_waketype == WAKE_HOP) {
824 kr = semaphore_create(mach_task_self(), &g_donesem, SYNC_POLICY_FIFO, 0);
825 mach_assert_zero(kr);
826 }
827
828 kr = semaphore_create(mach_task_self(), &g_readysem, SYNC_POLICY_FIFO, 0);
829 mach_assert_zero(kr);
830
831 atomic_store_explicit(&g_done_threads, 0, memory_order_relaxed);
832
833 /* Create the threads */
834 for (uint32_t i = 0; i < g_numthreads; i++) {
835 ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i);
836 if (ret) {
837 errc(EX_OSERR, ret, "pthread_create %d", i);
838 }
839 }
840
841 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
842 if (ret) {
843 errc(EX_OSERR, ret, "setpriority");
844 }
845
846 thread_setup(0);
847
848 g_starttime_abs = mach_absolute_time();
849
850 if (g_churn_pri) {
851 create_churn_threads();
852 }
853
854 /* Let everyone get settled */
855 kr = semaphore_wait(g_main_sem);
856 mach_assert_zero(kr);
857
858 /* Give the system a bit more time to settle */
859 if (g_do_sleep) {
860 usleep(g_iteration_sleeptime_us);
861 }
862
863 /* Go! */
864 for (uint32_t i = 0; i < g_iterations; i++) {
865 uint32_t j;
866 uint64_t worst_abs = 0, best_abs = UINT64_MAX;
867
868 if (g_do_one_long_spin) {
869 g_one_long_spin_id = (uint32_t)rand() % g_numthreads;
870 }
871
872 debug_log("%d Main thread reset\n", i);
873
874 atomic_store_explicit(&g_done_threads, 0, memory_order_seq_cst);
875
876 g_starttime_abs = mach_absolute_time();
877
878 /* Fire them off and wait for worker threads to finish */
879 kr = semaphore_wait_signal(g_main_sem, g_leadersem);
880 mach_assert_zero(kr);
881
882 debug_log("%d Main thread return\n", i);
883
884 assert(atomic_load_explicit(&g_done_threads, memory_order_relaxed) == g_numthreads);
885
886 /*
887 * We report the worst latencies relative to start time
888 * and relative to the lead worker thread.
889 */
890 for (j = 0; j < g_numthreads; j++) {
891 uint64_t latency_abs;
892
893 latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs;
894 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
895 }
896
897 worst_latencies_ns[i] = abs_to_nanos(worst_abs);
898
899 worst_abs = 0;
900 for (j = 1; j < g_numthreads; j++) {
901 uint64_t latency_abs;
902
903 latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0];
904 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
905 best_abs = best_abs > latency_abs ? latency_abs : best_abs;
906 }
907
908 worst_latencies_from_first_ns[i] = abs_to_nanos(worst_abs);
909
910 /*
911 * In the event of a bad run, cut a trace point.
912 */
913 if (worst_latencies_from_first_ns[i] > g_traceworthy_latency_ns) {
914 /* Ariadne's ad-hoc test signpost */
915 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns[i], g_traceworthy_latency_ns, 0, 0);
916
917 if (g_verbose) {
918 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0);
919 }
920 }
921
922 /* Give the system a bit more time to settle */
923 if (g_do_sleep) {
924 usleep(g_iteration_sleeptime_us);
925 }
926 }
927
928 /* Rejoin threads */
929 for (uint32_t i = 0; i < g_numthreads; i++) {
930 ret = pthread_join(threads[i], NULL);
931 if (ret) {
932 errc(EX_OSERR, ret, "pthread_join %d", i);
933 }
934 }
935
936 if (g_churn_pri) {
937 join_churn_threads();
938 }
939
940 compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev);
941 printf("Results (from a stop):\n");
942 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
943 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
944 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
945 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
946
947 putchar('\n');
948
949 compute_stats(worst_latencies_from_first_ns, g_iterations, &avg, &max, &min, &stddev);
950 printf("Results (relative to first thread):\n");
951 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
952 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
953 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
954 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
955
956 #if 0
957 for (uint32_t i = 0; i < g_iterations; i++) {
958 printf("Iteration %d: %f us\n", i, worst_latencies_ns[i] / 1000.0);
959 }
960 #endif
961
962 if (g_histogram) {
963 putchar('\n');
964
965 for (uint32_t i = 0; i < g_numcpus; i++) {
966 printf("%d\t%d\n", i, g_cpu_histogram[i].accum);
967 }
968 }
969
970 if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) {
971 #define PRIMARY 0x5555555555555555ULL
972 #define SECONDARY 0xaaaaaaaaaaaaaaaaULL
973
974 int fail_count = 0;
975
976 for (uint32_t i = 0; i < g_iterations; i++) {
977 bool secondary = false;
978 bool fail = false;
979 uint64_t map = g_cpu_map[i];
980 if (g_test_rt_smt) {
981 /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */
982 secondary = (map & SECONDARY);
983 /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */
984 fail = ((map & PRIMARY) & ((map & SECONDARY) >> 1));
985 } else if (g_test_rt) {
986 fail = (__builtin_popcountll(map) != g_numthreads) && (worst_latencies_ns[i] > g_traceworthy_latency_ns);
987 } else if (g_test_rt_avoid0) {
988 fail = ((map & 0x1) == 0x1);
989 }
990 if (secondary || fail) {
991 printf("Iteration %d: 0x%llx%s%s\n", i, map,
992 secondary ? " SECONDARY" : "",
993 fail ? " FAIL" : "");
994 }
995 test_fail |= fail;
996 fail_count += fail;
997 }
998
999 if (test_fail && (g_iterations >= 100) && (fail_count <= g_iterations / 100)) {
1000 printf("99%% or better success rate\n");
1001 test_fail = 0;
1002 }
1003 }
1004
1005 free(threads);
1006 free(g_thread_endtimes_abs);
1007 free(worst_latencies_ns);
1008 free(worst_latencies_from_first_ns);
1009 free(g_cpu_histogram);
1010 free(g_cpu_map);
1011
1012 return test_fail;
1013 }
1014
1015 /*
1016 * WARNING: This is SPI specifically intended for use by launchd to start UI
1017 * apps. We use it here for a test tool only to opt into QoS using the same
1018 * policies. Do not use this outside xnu or libxpc/launchd.
1019 */
1020 static void
1021 selfexec_with_apptype(int argc, char *argv[])
1022 {
1023 int ret;
1024 posix_spawnattr_t attr;
1025 extern char **environ;
1026 char *new_argv[argc + 1 + 1 /* NULL */];
1027 int i;
1028 char prog[PATH_MAX];
1029 uint32_t prog_size = PATH_MAX;
1030
1031 ret = _NSGetExecutablePath(prog, &prog_size);
1032 if (ret) {
1033 err(EX_OSERR, "_NSGetExecutablePath");
1034 }
1035
1036 for (i = 0; i < argc; i++) {
1037 new_argv[i] = argv[i];
1038 }
1039
1040 new_argv[i] = "--switched_apptype";
1041 new_argv[i + 1] = NULL;
1042
1043 ret = posix_spawnattr_init(&attr);
1044 if (ret) {
1045 errc(EX_OSERR, ret, "posix_spawnattr_init");
1046 }
1047
1048 ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC);
1049 if (ret) {
1050 errc(EX_OSERR, ret, "posix_spawnattr_setflags");
1051 }
1052
1053 ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT);
1054 if (ret) {
1055 errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np");
1056 }
1057
1058 ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ);
1059 if (ret) {
1060 errc(EX_OSERR, ret, "posix_spawn");
1061 }
1062 }
1063
1064 /*
1065 * Admittedly not very attractive.
1066 */
1067 static void __attribute__((noreturn))
1068 usage()
1069 {
1070 errx(EX_USAGE, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
1071 "<realtime | timeshare | fixed> <iterations>\n\t\t"
1072 "[--trace <traceworthy latency in ns>] "
1073 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
1074 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]",
1075 getprogname());
1076 }
1077
1078 static struct option* g_longopts;
1079 static int option_index;
1080
1081 static uint32_t
1082 read_dec_arg()
1083 {
1084 char *cp;
1085 /* char* optarg is a magic global */
1086
1087 uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10);
1088
1089 if (cp == optarg || *cp) {
1090 errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"",
1091 g_longopts[option_index].name, optarg);
1092 }
1093
1094 return arg_val;
1095 }
1096
1097 static void
1098 parse_args(int argc, char *argv[])
1099 {
1100 enum {
1101 OPT_GETOPT = 0,
1102 OPT_SPIN_TIME,
1103 OPT_TRACE,
1104 OPT_PRIORITY,
1105 OPT_CHURN_PRI,
1106 OPT_CHURN_COUNT,
1107 };
1108
1109 static struct option longopts[] = {
1110 /* BEGIN IGNORE CODESTYLE */
1111 { "spin-time", required_argument, NULL, OPT_SPIN_TIME },
1112 { "trace", required_argument, NULL, OPT_TRACE },
1113 { "priority", required_argument, NULL, OPT_PRIORITY },
1114 { "churn-pri", required_argument, NULL, OPT_CHURN_PRI },
1115 { "churn-count", required_argument, NULL, OPT_CHURN_COUNT },
1116 { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE },
1117 { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE },
1118 { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE },
1119 { "affinity", no_argument, (int*)&g_do_affinity, TRUE },
1120 { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE },
1121 { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE },
1122 { "test-rt", no_argument, (int*)&g_test_rt, TRUE },
1123 { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE },
1124 { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE },
1125 { "histogram", no_argument, (int*)&g_histogram, TRUE },
1126 { "verbose", no_argument, (int*)&g_verbose, TRUE },
1127 { "help", no_argument, NULL, 'h' },
1128 { NULL, 0, NULL, 0 }
1129 /* END IGNORE CODESTYLE */
1130 };
1131
1132 g_longopts = longopts;
1133 int ch = 0;
1134
1135 while ((ch = getopt_long(argc, argv, "h", longopts, &option_index)) != -1) {
1136 switch (ch) {
1137 case OPT_GETOPT:
1138 /* getopt_long set a variable */
1139 break;
1140 case OPT_SPIN_TIME:
1141 g_do_each_spin = TRUE;
1142 g_each_spin_duration_ns = read_dec_arg();
1143 break;
1144 case OPT_TRACE:
1145 g_traceworthy_latency_ns = read_dec_arg();
1146 break;
1147 case OPT_PRIORITY:
1148 g_priority = read_dec_arg();
1149 break;
1150 case OPT_CHURN_PRI:
1151 g_churn_pri = read_dec_arg();
1152 break;
1153 case OPT_CHURN_COUNT:
1154 g_churn_count = read_dec_arg();
1155 break;
1156 case '?':
1157 case 'h':
1158 default:
1159 usage();
1160 /* NORETURN */
1161 }
1162 }
1163
1164 /*
1165 * getopt_long reorders all the options to the beginning of the argv array.
1166 * Jump past them to the non-option arguments.
1167 */
1168
1169 argc -= optind;
1170 argv += optind;
1171
1172 if (argc > 4) {
1173 warnx("Too many non-option arguments passed");
1174 usage();
1175 }
1176
1177 if (argc != 4) {
1178 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
1179 usage();
1180 }
1181
1182 char *cp;
1183
1184 /* How many threads? */
1185 g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10);
1186
1187 if (cp == argv[0] || *cp) {
1188 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]);
1189 }
1190
1191 /* What wakeup pattern? */
1192 g_waketype = parse_wakeup_pattern(argv[1]);
1193
1194 /* Policy */
1195 g_policy = parse_thread_policy(argv[2]);
1196
1197 /* Iterations */
1198 g_iterations = (uint32_t)strtoull(argv[3], &cp, 10);
1199
1200 if (cp == argv[3] || *cp) {
1201 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]);
1202 }
1203
1204 if (g_iterations < 1) {
1205 errx(EX_USAGE, "Must have at least one iteration");
1206 }
1207
1208 if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) {
1209 errx(EX_USAGE, "chain mode requires more than one thread");
1210 }
1211
1212 if (g_numthreads == 1 && g_waketype == WAKE_HOP) {
1213 errx(EX_USAGE, "hop mode requires more than one thread");
1214 }
1215 }