]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/zero-to-n/zero-to-n.c
39b7cd915e9a05de26cb17a37ae717a1aa6e42f1
[apple/xnu.git] / tools / tests / zero-to-n / zero-to-n.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <unistd.h>
29 #include <stdio.h>
30 #include <math.h>
31 #include <sys/kdebug.h>
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <errno.h>
35 #include <err.h>
36 #include <string.h>
37 #include <assert.h>
38 #include <sysexits.h>
39 #include <sys/sysctl.h>
40 #include <getopt.h>
41
42 #include <spawn.h>
43 #include <spawn_private.h>
44 #include <sys/spawn_internal.h>
45 #include <mach-o/dyld.h>
46
47 #include <mach/mach_time.h>
48 #include <mach/mach.h>
49 #include <mach/task.h>
50 #include <mach/semaphore.h>
51
52 #include <pthread/qos_private.h>
53
54 #include <sys/resource.h>
55
56 #include <stdatomic.h>
57
58 #include <os/tsd.h>
59
60 typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t;
61 typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t;
62
63 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
64 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
65 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
66
67 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
68 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
69 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
70
71 #if DEBUG
72 #define debug_log(args ...) printf(args)
73 #else
74 #define debug_log(args ...) do { } while(0)
75 #endif
76
77 /* Declarations */
78 static void* worker_thread(void *arg);
79 static void usage();
80 static int thread_setup(uint32_t my_id);
81 static my_policy_type_t parse_thread_policy(const char *str);
82 static void selfexec_with_apptype(int argc, char *argv[]);
83 static void parse_args(int argc, char *argv[]);
84
85 static __attribute__((aligned(128))) _Atomic uint32_t g_done_threads;
86 static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop = FALSE;
87 static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0;
88
89 /* Global variables (general) */
90 static uint32_t g_numcpus;
91 static uint32_t g_nphysicalcpu;
92 static uint32_t g_nlogicalcpu;
93 static uint32_t g_numthreads;
94 static wake_type_t g_waketype;
95 static policy_t g_policy;
96 static uint32_t g_iterations;
97 static struct mach_timebase_info g_mti;
98 static semaphore_t g_main_sem;
99 static uint64_t *g_thread_endtimes_abs;
100 static boolean_t g_verbose = FALSE;
101 static boolean_t g_do_affinity = FALSE;
102 static uint64_t g_starttime_abs;
103 static uint32_t g_iteration_sleeptime_us = 0;
104 static uint32_t g_priority = 0;
105 static uint32_t g_churn_pri = 0;
106 static uint32_t g_churn_count = 0;
107
108 static pthread_t* g_churn_threads = NULL;
109
110 /* Threshold for dropping a 'bad run' tracepoint */
111 static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS;
112
113 /* Have we re-execed to set apptype? */
114 static boolean_t g_seen_apptype = FALSE;
115
116 /* usleep in betweeen iterations */
117 static boolean_t g_do_sleep = TRUE;
118
119 /* Every thread spins until all threads have checked in */
120 static boolean_t g_do_all_spin = FALSE;
121
122 /* Every thread backgrounds temporarily before parking */
123 static boolean_t g_drop_priority = FALSE;
124
125 /* Test whether realtime threads are scheduled on the separate CPUs */
126 static boolean_t g_test_rt = FALSE;
127
128 /* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */
129 static boolean_t g_test_rt_smt = FALSE;
130
131 /* Test whether realtime threads are successfully avoiding CPU 0 on Intel */
132 static boolean_t g_test_rt_avoid0 = FALSE;
133
134 /* One randomly chosen thread holds up the train for a certain duration. */
135 static boolean_t g_do_one_long_spin = FALSE;
136 static uint32_t g_one_long_spin_id = 0;
137 static uint64_t g_one_long_spin_length_abs = 0;
138 static uint64_t g_one_long_spin_length_ns = 0;
139
140 /* Each thread spins for a certain duration after waking up before blocking again. */
141 static boolean_t g_do_each_spin = FALSE;
142 static uint64_t g_each_spin_duration_abs = 0;
143 static uint64_t g_each_spin_duration_ns = 0;
144
145 /* Global variables (broadcast) */
146 static semaphore_t g_broadcastsem;
147 static semaphore_t g_leadersem;
148 static semaphore_t g_readysem;
149 static semaphore_t g_donesem;
150
151 /* Global variables (chain) */
152 static semaphore_t *g_semarr;
153
154 typedef struct {
155 __attribute__((aligned(128))) uint32_t current;
156 uint32_t accum;
157 } histogram_t;
158
159 static histogram_t *g_cpu_histogram;
160 static _Atomic uint64_t *g_cpu_map;
161
162 static uint64_t
163 abs_to_nanos(uint64_t abstime)
164 {
165 return (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom)));
166 }
167
168 static uint64_t
169 nanos_to_abs(uint64_t ns)
170 {
171 return (uint64_t)(ns * (((double)g_mti.denom) / ((double)g_mti.numer)));
172 }
173
174 inline static void
175 yield(void)
176 {
177 #if defined(__arm__) || defined(__arm64__)
178 asm volatile ("yield");
179 #elif defined(__x86_64__) || defined(__i386__)
180 asm volatile ("pause");
181 #else
182 #error Unrecognized architecture
183 #endif
184 }
185
186 static void *
187 churn_thread(__unused void *arg)
188 {
189 uint64_t spin_count = 0;
190
191 /*
192 * As a safety measure to avoid wedging, we will bail on the spin if
193 * it's been more than 1s after the most recent run start
194 */
195
196 while (g_churn_stop == FALSE &&
197 mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) {
198 spin_count++;
199 yield();
200 }
201
202 /* This is totally racy, but only here to detect if anyone stops early */
203 atomic_fetch_add_explicit(&g_churn_stopped_at, spin_count, memory_order_relaxed);
204
205 return NULL;
206 }
207
208 static void
209 create_churn_threads()
210 {
211 if (g_churn_count == 0) {
212 g_churn_count = g_numcpus - 1;
213 }
214
215 errno_t err;
216
217 struct sched_param param = { .sched_priority = (int)g_churn_pri };
218 pthread_attr_t attr;
219
220 /* Array for churn threads */
221 g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count);
222 assert(g_churn_threads);
223
224 if ((err = pthread_attr_init(&attr))) {
225 errc(EX_OSERR, err, "pthread_attr_init");
226 }
227
228 if ((err = pthread_attr_setschedparam(&attr, &param))) {
229 errc(EX_OSERR, err, "pthread_attr_setschedparam");
230 }
231
232 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
233 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
234 }
235
236 for (uint32_t i = 0; i < g_churn_count; i++) {
237 pthread_t new_thread;
238
239 if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL))) {
240 errc(EX_OSERR, err, "pthread_create");
241 }
242 g_churn_threads[i] = new_thread;
243 }
244
245 if ((err = pthread_attr_destroy(&attr))) {
246 errc(EX_OSERR, err, "pthread_attr_destroy");
247 }
248 }
249
250 static void
251 join_churn_threads(void)
252 {
253 if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) {
254 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
255 g_churn_stopped_at);
256 }
257
258 atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst);
259
260 /* Rejoin churn threads */
261 for (uint32_t i = 0; i < g_churn_count; i++) {
262 errno_t err = pthread_join(g_churn_threads[i], NULL);
263 if (err) {
264 errc(EX_OSERR, err, "pthread_join %d", i);
265 }
266 }
267 }
268
269 /*
270 * Figure out what thread policy to use
271 */
272 static my_policy_type_t
273 parse_thread_policy(const char *str)
274 {
275 if (strcmp(str, "timeshare") == 0) {
276 return MY_POLICY_TIMESHARE;
277 } else if (strcmp(str, "realtime") == 0) {
278 return MY_POLICY_REALTIME;
279 } else if (strcmp(str, "fixed") == 0) {
280 return MY_POLICY_FIXEDPRI;
281 } else {
282 errx(EX_USAGE, "Invalid thread policy \"%s\"", str);
283 }
284 }
285
286 /*
287 * Figure out what wakeup pattern to use
288 */
289 static wake_type_t
290 parse_wakeup_pattern(const char *str)
291 {
292 if (strcmp(str, "chain") == 0) {
293 return WAKE_CHAIN;
294 } else if (strcmp(str, "hop") == 0) {
295 return WAKE_HOP;
296 } else if (strcmp(str, "broadcast-single-sem") == 0) {
297 return WAKE_BROADCAST_ONESEM;
298 } else if (strcmp(str, "broadcast-per-thread") == 0) {
299 return WAKE_BROADCAST_PERTHREAD;
300 } else {
301 errx(EX_USAGE, "Invalid wakeup pattern \"%s\"", str);
302 }
303 }
304
305 /*
306 * Set policy
307 */
308 static int
309 thread_setup(uint32_t my_id)
310 {
311 kern_return_t kr;
312 errno_t ret;
313 thread_time_constraint_policy_data_t pol;
314
315 if (g_priority) {
316 int policy = SCHED_OTHER;
317 if (g_policy == MY_POLICY_FIXEDPRI) {
318 policy = SCHED_RR;
319 }
320
321 struct sched_param param = {.sched_priority = (int)g_priority};
322 if ((ret = pthread_setschedparam(pthread_self(), policy, &param))) {
323 errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id);
324 }
325 }
326
327 switch (g_policy) {
328 case MY_POLICY_TIMESHARE:
329 break;
330 case MY_POLICY_REALTIME:
331 /* Hard-coded realtime parameters (similar to what Digi uses) */
332 pol.period = 100000;
333 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS);
334 pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS);
335 pol.preemptible = 0; /* Ignored by OS */
336
337 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
338 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
339 mach_assert_zero_t(my_id, kr);
340 break;
341 case MY_POLICY_FIXEDPRI:
342 ret = pthread_set_fixedpriority_self();
343 if (ret) {
344 errc(EX_OSERR, ret, "pthread_set_fixedpriority_self");
345 }
346 break;
347 default:
348 errx(EX_USAGE, "invalid policy type %d", g_policy);
349 }
350
351 if (g_do_affinity) {
352 thread_affinity_policy_data_t affinity;
353
354 affinity.affinity_tag = my_id % 2;
355
356 kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
357 (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT);
358 mach_assert_zero_t(my_id, kr);
359 }
360
361 return 0;
362 }
363
364 /*
365 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
366 * and notify the main thread when done.
367 */
368 static void*
369 worker_thread(void *arg)
370 {
371 uint32_t my_id = (uint32_t)(uintptr_t)arg;
372 kern_return_t kr;
373
374 volatile double x = 0.0;
375 volatile double y = 0.0;
376
377 /* Set policy and so forth */
378 thread_setup(my_id);
379
380 for (uint32_t i = 0; i < g_iterations; i++) {
381 if (my_id == 0) {
382 /*
383 * Leader thread either wakes everyone up or starts the chain going.
384 */
385
386 /* Give the worker threads undisturbed time to finish before waiting on them */
387 if (g_do_sleep) {
388 usleep(g_iteration_sleeptime_us);
389 }
390
391 debug_log("%d Leader thread wait for ready\n", i);
392
393 /*
394 * Wait for everyone else to declare ready
395 * Is there a better way to do this that won't interfere with the rest of the chain?
396 * TODO: Invent 'semaphore wait for N signals'
397 */
398
399 for (uint32_t j = 0; j < g_numthreads - 1; j++) {
400 kr = semaphore_wait(g_readysem);
401 mach_assert_zero_t(my_id, kr);
402 }
403
404 debug_log("%d Leader thread wait\n", i);
405
406 if (i > 0) {
407 for (int cpuid = 0; cpuid < g_numcpus; cpuid++) {
408 if (g_cpu_histogram[cpuid].current == 1) {
409 atomic_fetch_or_explicit(&g_cpu_map[i - 1], (1UL << cpuid), memory_order_relaxed);
410 g_cpu_histogram[cpuid].current = 0;
411 }
412 }
413 }
414
415 /* Signal main thread and wait for start of iteration */
416
417 kr = semaphore_wait_signal(g_leadersem, g_main_sem);
418 mach_assert_zero_t(my_id, kr);
419
420 g_thread_endtimes_abs[my_id] = mach_absolute_time();
421
422 debug_log("%d Leader thread go\n", i);
423
424 assert_zero_t(my_id, atomic_load_explicit(&g_done_threads, memory_order_relaxed));
425
426 switch (g_waketype) {
427 case WAKE_BROADCAST_ONESEM:
428 kr = semaphore_signal_all(g_broadcastsem);
429 mach_assert_zero_t(my_id, kr);
430 break;
431 case WAKE_BROADCAST_PERTHREAD:
432 for (uint32_t j = 1; j < g_numthreads; j++) {
433 kr = semaphore_signal(g_semarr[j]);
434 mach_assert_zero_t(my_id, kr);
435 }
436 break;
437 case WAKE_CHAIN:
438 kr = semaphore_signal(g_semarr[my_id + 1]);
439 mach_assert_zero_t(my_id, kr);
440 break;
441 case WAKE_HOP:
442 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
443 mach_assert_zero_t(my_id, kr);
444 break;
445 }
446 } else {
447 /*
448 * Everyone else waits to be woken up,
449 * records when she wakes up, and possibly
450 * wakes up a friend.
451 */
452 switch (g_waketype) {
453 case WAKE_BROADCAST_ONESEM:
454 kr = semaphore_wait_signal(g_broadcastsem, g_readysem);
455 mach_assert_zero_t(my_id, kr);
456
457 g_thread_endtimes_abs[my_id] = mach_absolute_time();
458 break;
459
460 case WAKE_BROADCAST_PERTHREAD:
461 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
462 mach_assert_zero_t(my_id, kr);
463
464 g_thread_endtimes_abs[my_id] = mach_absolute_time();
465 break;
466
467 case WAKE_CHAIN:
468 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
469 mach_assert_zero_t(my_id, kr);
470
471 /* Signal the next thread *after* recording wake time */
472
473 g_thread_endtimes_abs[my_id] = mach_absolute_time();
474
475 if (my_id < (g_numthreads - 1)) {
476 kr = semaphore_signal(g_semarr[my_id + 1]);
477 mach_assert_zero_t(my_id, kr);
478 }
479
480 break;
481
482 case WAKE_HOP:
483 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
484 mach_assert_zero_t(my_id, kr);
485
486 /* Signal the next thread *after* recording wake time */
487
488 g_thread_endtimes_abs[my_id] = mach_absolute_time();
489
490 if (my_id < (g_numthreads - 1)) {
491 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
492 mach_assert_zero_t(my_id, kr);
493 } else {
494 kr = semaphore_signal_all(g_donesem);
495 mach_assert_zero_t(my_id, kr);
496 }
497
498 break;
499 }
500 }
501
502 unsigned int cpuid = _os_cpu_number();
503 assert(cpuid < g_numcpus);
504 debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid, i);
505 g_cpu_histogram[cpuid].current = 1;
506 g_cpu_histogram[cpuid].accum++;
507
508 if (g_do_one_long_spin && g_one_long_spin_id == my_id) {
509 /* One randomly chosen thread holds up the train for a while. */
510
511 uint64_t endspin = g_starttime_abs + g_one_long_spin_length_abs;
512 while (mach_absolute_time() < endspin) {
513 y = y + 1.5 + x;
514 x = sqrt(y);
515 }
516 }
517
518 if (g_do_each_spin) {
519 /* Each thread spins for a certain duration after waking up before blocking again. */
520
521 uint64_t endspin = mach_absolute_time() + g_each_spin_duration_abs;
522 while (mach_absolute_time() < endspin) {
523 y = y + 1.5 + x;
524 x = sqrt(y);
525 }
526 }
527
528 uint32_t done_threads;
529 done_threads = atomic_fetch_add_explicit(&g_done_threads, 1, memory_order_relaxed) + 1;
530
531 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads, i);
532
533 if (g_drop_priority) {
534 /* Drop priority to BG momentarily */
535 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG);
536 if (ret) {
537 errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG");
538 }
539 }
540
541 if (g_do_all_spin) {
542 /* Everyone spins until the last thread checks in. */
543
544 while (atomic_load_explicit(&g_done_threads, memory_order_relaxed) < g_numthreads) {
545 y = y + 1.5 + x;
546 x = sqrt(y);
547 }
548 }
549
550 if (g_drop_priority) {
551 /* Restore normal priority */
552 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0);
553 if (ret) {
554 errc(EX_OSERR, ret, "setpriority 0");
555 }
556 }
557
558 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i);
559 }
560
561 if (my_id == 0) {
562 /* Give the worker threads undisturbed time to finish before waiting on them */
563 if (g_do_sleep) {
564 usleep(g_iteration_sleeptime_us);
565 }
566
567 /* Wait for the worker threads to finish */
568 for (uint32_t i = 0; i < g_numthreads - 1; i++) {
569 kr = semaphore_wait(g_readysem);
570 mach_assert_zero_t(my_id, kr);
571 }
572
573 /* Tell everyone and the main thread that the last iteration is done */
574 debug_log("%d Leader thread done\n", g_iterations - 1);
575
576 for (int cpuid = 0; cpuid < g_numcpus; cpuid++) {
577 if (g_cpu_histogram[cpuid].current == 1) {
578 atomic_fetch_or_explicit(&g_cpu_map[g_iterations - 1], (1UL << cpuid), memory_order_relaxed);
579 g_cpu_histogram[cpuid].current = 0;
580 }
581 }
582
583 kr = semaphore_signal_all(g_main_sem);
584 mach_assert_zero_t(my_id, kr);
585 } else {
586 /* Hold up thread teardown so it doesn't affect the last iteration */
587 kr = semaphore_wait_signal(g_main_sem, g_readysem);
588 mach_assert_zero_t(my_id, kr);
589 }
590
591 return 0;
592 }
593
594 /*
595 * Given an array of uint64_t values, compute average, max, min, and standard deviation
596 */
597 static void
598 compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t *minp, float *stddevp)
599 {
600 uint32_t i;
601 uint64_t _sum = 0;
602 uint64_t _max = 0;
603 uint64_t _min = UINT64_MAX;
604 float _avg = 0;
605 float _dev = 0;
606
607 for (i = 0; i < count; i++) {
608 _sum += values[i];
609 _max = values[i] > _max ? values[i] : _max;
610 _min = values[i] < _min ? values[i] : _min;
611 }
612
613 _avg = ((float)_sum) / ((float)count);
614
615 _dev = 0;
616 for (i = 0; i < count; i++) {
617 _dev += powf((((float)values[i]) - _avg), 2);
618 }
619
620 _dev /= count;
621 _dev = sqrtf(_dev);
622
623 *averagep = _avg;
624 *maxp = _max;
625 *minp = _min;
626 *stddevp = _dev;
627 }
628
629 int
630 main(int argc, char **argv)
631 {
632 errno_t ret;
633 kern_return_t kr;
634
635 pthread_t *threads;
636 uint64_t *worst_latencies_ns;
637 uint64_t *worst_latencies_from_first_ns;
638 uint64_t max, min;
639 float avg, stddev;
640
641 bool test_fail = false;
642
643 for (int i = 0; i < argc; i++) {
644 if (strcmp(argv[i], "--switched_apptype") == 0) {
645 g_seen_apptype = TRUE;
646 }
647 }
648
649 if (!g_seen_apptype) {
650 selfexec_with_apptype(argc, argv);
651 }
652
653 parse_args(argc, argv);
654
655 srand((unsigned int)time(NULL));
656
657 mach_timebase_info(&g_mti);
658
659 size_t ncpu_size = sizeof(g_numcpus);
660 ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0);
661 if (ret) {
662 err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)");
663 }
664 assert(g_numcpus <= 64); /* g_cpu_map needs to be extended for > 64 cpus */
665
666 size_t physicalcpu_size = sizeof(g_nphysicalcpu);
667 ret = sysctlbyname("hw.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0);
668 if (ret) {
669 err(EX_OSERR, "Failed sysctlbyname(hw.physicalcpu)");
670 }
671
672 size_t logicalcpu_size = sizeof(g_nlogicalcpu);
673 ret = sysctlbyname("hw.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0);
674 if (ret) {
675 err(EX_OSERR, "Failed sysctlbyname(hw.logicalcpu)");
676 }
677
678 if (g_test_rt) {
679 if (g_numthreads == 0) {
680 g_numthreads = g_numcpus;
681 }
682 g_policy = MY_POLICY_REALTIME;
683 g_do_all_spin = TRUE;
684 } else if (g_test_rt_smt) {
685 if (g_nlogicalcpu != 2 * g_nphysicalcpu) {
686 /* Not SMT */
687 printf("Attempt to run --test-rt-smt on a non-SMT device\n");
688 exit(0);
689 }
690
691 if (g_numthreads == 0) {
692 g_numthreads = g_nphysicalcpu;
693 }
694 g_policy = MY_POLICY_REALTIME;
695 g_do_all_spin = TRUE;
696 } else if (g_test_rt_avoid0) {
697 #if defined(__x86_64__) || defined(__i386__)
698 if (g_numthreads == 0) {
699 g_numthreads = g_nphysicalcpu - 1;
700 }
701 if (g_numthreads == 0) {
702 printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n");
703 exit(0);
704 }
705 g_policy = MY_POLICY_REALTIME;
706 g_do_all_spin = TRUE;
707 #else
708 printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n");
709 exit(0);
710 #endif
711 } else if (g_numthreads == 0) {
712 g_numthreads = g_numcpus;
713 }
714
715 if (g_do_each_spin) {
716 g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns);
717 }
718
719 /* Configure the long-spin thread to take up half of its computation */
720 if (g_do_one_long_spin) {
721 g_one_long_spin_length_ns = COMPUTATION_NANOS / 2;
722 g_one_long_spin_length_abs = nanos_to_abs(g_one_long_spin_length_ns);
723 }
724
725 /* Estimate the amount of time the cleanup phase needs to back off */
726 g_iteration_sleeptime_us = g_numthreads * 20;
727
728 uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1;
729 if (g_do_each_spin) {
730 g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC);
731 }
732 if (g_do_one_long_spin) {
733 g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC;
734 }
735
736 /* Arrays for threads and their wakeup times */
737 threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads);
738 assert(threads);
739
740 size_t endtimes_size = sizeof(uint64_t) * g_numthreads;
741
742 g_thread_endtimes_abs = (uint64_t*) valloc(endtimes_size);
743 assert(g_thread_endtimes_abs);
744
745 /* Ensure the allocation is pre-faulted */
746 ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size);
747 if (ret) {
748 errc(EX_OSERR, ret, "memset_s endtimes");
749 }
750
751 size_t latencies_size = sizeof(uint64_t) * g_iterations;
752
753 worst_latencies_ns = (uint64_t*) valloc(latencies_size);
754 assert(worst_latencies_ns);
755
756 /* Ensure the allocation is pre-faulted */
757 ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size);
758 if (ret) {
759 errc(EX_OSERR, ret, "memset_s latencies");
760 }
761
762 worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size);
763 assert(worst_latencies_from_first_ns);
764
765 /* Ensure the allocation is pre-faulted */
766 ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size);
767 if (ret) {
768 errc(EX_OSERR, ret, "memset_s latencies_from_first");
769 }
770
771 size_t histogram_size = sizeof(histogram_t) * g_numcpus;
772 g_cpu_histogram = (histogram_t *)valloc(histogram_size);
773 assert(g_cpu_histogram);
774 /* Ensure the allocation is pre-faulted */
775 ret = memset_s(g_cpu_histogram, histogram_size, 0, histogram_size);
776 if (ret) {
777 errc(EX_OSERR, ret, "memset_s g_cpu_histogram");
778 }
779
780 size_t map_size = sizeof(uint64_t) * g_iterations;
781 g_cpu_map = (_Atomic uint64_t *)valloc(map_size);
782 assert(g_cpu_map);
783 /* Ensure the allocation is pre-faulted */
784 ret = memset_s(g_cpu_map, map_size, 0, map_size);
785 if (ret) {
786 errc(EX_OSERR, ret, "memset_s g_cpu_map");
787 }
788
789 kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0);
790 mach_assert_zero(kr);
791
792 /* Either one big semaphore or one per thread */
793 if (g_waketype == WAKE_CHAIN ||
794 g_waketype == WAKE_BROADCAST_PERTHREAD ||
795 g_waketype == WAKE_HOP) {
796 g_semarr = valloc(sizeof(semaphore_t) * g_numthreads);
797 assert(g_semarr);
798
799 for (uint32_t i = 0; i < g_numthreads; i++) {
800 kr = semaphore_create(mach_task_self(), &g_semarr[i], SYNC_POLICY_FIFO, 0);
801 mach_assert_zero(kr);
802 }
803
804 g_leadersem = g_semarr[0];
805 } else {
806 kr = semaphore_create(mach_task_self(), &g_broadcastsem, SYNC_POLICY_FIFO, 0);
807 mach_assert_zero(kr);
808 kr = semaphore_create(mach_task_self(), &g_leadersem, SYNC_POLICY_FIFO, 0);
809 mach_assert_zero(kr);
810 }
811
812 if (g_waketype == WAKE_HOP) {
813 kr = semaphore_create(mach_task_self(), &g_donesem, SYNC_POLICY_FIFO, 0);
814 mach_assert_zero(kr);
815 }
816
817 kr = semaphore_create(mach_task_self(), &g_readysem, SYNC_POLICY_FIFO, 0);
818 mach_assert_zero(kr);
819
820 atomic_store_explicit(&g_done_threads, 0, memory_order_relaxed);
821
822 /* Create the threads */
823 for (uint32_t i = 0; i < g_numthreads; i++) {
824 ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i);
825 if (ret) {
826 errc(EX_OSERR, ret, "pthread_create %d", i);
827 }
828 }
829
830 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
831 if (ret) {
832 errc(EX_OSERR, ret, "setpriority");
833 }
834
835 thread_setup(0);
836
837 g_starttime_abs = mach_absolute_time();
838
839 if (g_churn_pri) {
840 create_churn_threads();
841 }
842
843 /* Let everyone get settled */
844 kr = semaphore_wait(g_main_sem);
845 mach_assert_zero(kr);
846
847 /* Give the system a bit more time to settle */
848 if (g_do_sleep) {
849 usleep(g_iteration_sleeptime_us);
850 }
851
852 /* Go! */
853 for (uint32_t i = 0; i < g_iterations; i++) {
854 uint32_t j;
855 uint64_t worst_abs = 0, best_abs = UINT64_MAX;
856
857 if (g_do_one_long_spin) {
858 g_one_long_spin_id = (uint32_t)rand() % g_numthreads;
859 }
860
861 debug_log("%d Main thread reset\n", i);
862
863 atomic_store_explicit(&g_done_threads, 0, memory_order_seq_cst);
864
865 g_starttime_abs = mach_absolute_time();
866
867 /* Fire them off and wait for worker threads to finish */
868 kr = semaphore_wait_signal(g_main_sem, g_leadersem);
869 mach_assert_zero(kr);
870
871 debug_log("%d Main thread return\n", i);
872
873 assert(atomic_load_explicit(&g_done_threads, memory_order_relaxed) == g_numthreads);
874
875 /*
876 * We report the worst latencies relative to start time
877 * and relative to the lead worker thread.
878 */
879 for (j = 0; j < g_numthreads; j++) {
880 uint64_t latency_abs;
881
882 latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs;
883 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
884 }
885
886 worst_latencies_ns[i] = abs_to_nanos(worst_abs);
887
888 worst_abs = 0;
889 for (j = 1; j < g_numthreads; j++) {
890 uint64_t latency_abs;
891
892 latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0];
893 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
894 best_abs = best_abs > latency_abs ? latency_abs : best_abs;
895 }
896
897 worst_latencies_from_first_ns[i] = abs_to_nanos(worst_abs);
898
899 /*
900 * In the event of a bad run, cut a trace point.
901 */
902 if (worst_latencies_from_first_ns[i] > g_traceworthy_latency_ns) {
903 /* Ariadne's ad-hoc test signpost */
904 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns[i], g_traceworthy_latency_ns, 0, 0);
905
906 if (g_verbose) {
907 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0);
908 }
909 }
910
911 /* Give the system a bit more time to settle */
912 if (g_do_sleep) {
913 usleep(g_iteration_sleeptime_us);
914 }
915 }
916
917 /* Rejoin threads */
918 for (uint32_t i = 0; i < g_numthreads; i++) {
919 ret = pthread_join(threads[i], NULL);
920 if (ret) {
921 errc(EX_OSERR, ret, "pthread_join %d", i);
922 }
923 }
924
925 if (g_churn_pri) {
926 join_churn_threads();
927 }
928
929 compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev);
930 printf("Results (from a stop):\n");
931 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
932 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
933 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
934 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
935
936 putchar('\n');
937
938 compute_stats(worst_latencies_from_first_ns, g_iterations, &avg, &max, &min, &stddev);
939 printf("Results (relative to first thread):\n");
940 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
941 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
942 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
943 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
944
945 #if 0
946 for (uint32_t i = 0; i < g_iterations; i++) {
947 printf("Iteration %d: %f us\n", i, worst_latencies_ns[i] / 1000.0);
948 }
949 #endif
950
951 if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) {
952 putchar('\n');
953
954 for (uint32_t i = 0; i < g_numcpus; i++) {
955 printf("%d\t%d\n", i, g_cpu_histogram[i].accum);
956 }
957
958 #define PRIMARY 0x5555555555555555ULL
959 #define SECONDARY 0xaaaaaaaaaaaaaaaaULL
960
961 int fail_count = 0;
962
963 for (uint32_t i = 0; i < g_iterations; i++) {
964 bool secondary = false;
965 bool fail = false;
966 uint64_t map = g_cpu_map[i];
967 if (g_test_rt_smt) {
968 /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */
969 secondary = (map & SECONDARY);
970 /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */
971 fail = ((map & PRIMARY) & ((map & SECONDARY) >> 1));
972 } else if (g_test_rt) {
973 fail = __builtin_popcountll(map) != g_numthreads;
974 } else if (g_test_rt_avoid0) {
975 fail = ((map & 0x1) == 0x1);
976 }
977 if (secondary || fail) {
978 printf("Iteration %d: 0x%llx%s%s\n", i, map,
979 secondary ? " SECONDARY" : "",
980 fail ? " FAIL" : "");
981 }
982 test_fail |= fail;
983 fail_count += fail;
984 }
985
986 if (test_fail && (g_iterations >= 100) && (fail_count <= g_iterations / 100)) {
987 printf("99%% or better success rate\n");
988 test_fail = 0;
989 }
990 }
991
992 free(threads);
993 free(g_thread_endtimes_abs);
994 free(worst_latencies_ns);
995 free(worst_latencies_from_first_ns);
996 free(g_cpu_histogram);
997 free(g_cpu_map);
998
999 return test_fail;
1000 }
1001
1002 /*
1003 * WARNING: This is SPI specifically intended for use by launchd to start UI
1004 * apps. We use it here for a test tool only to opt into QoS using the same
1005 * policies. Do not use this outside xnu or libxpc/launchd.
1006 */
1007 static void
1008 selfexec_with_apptype(int argc, char *argv[])
1009 {
1010 int ret;
1011 posix_spawnattr_t attr;
1012 extern char **environ;
1013 char *new_argv[argc + 1 + 1 /* NULL */];
1014 int i;
1015 char prog[PATH_MAX];
1016 uint32_t prog_size = PATH_MAX;
1017
1018 ret = _NSGetExecutablePath(prog, &prog_size);
1019 if (ret) {
1020 err(EX_OSERR, "_NSGetExecutablePath");
1021 }
1022
1023 for (i = 0; i < argc; i++) {
1024 new_argv[i] = argv[i];
1025 }
1026
1027 new_argv[i] = "--switched_apptype";
1028 new_argv[i + 1] = NULL;
1029
1030 ret = posix_spawnattr_init(&attr);
1031 if (ret) {
1032 errc(EX_OSERR, ret, "posix_spawnattr_init");
1033 }
1034
1035 ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC);
1036 if (ret) {
1037 errc(EX_OSERR, ret, "posix_spawnattr_setflags");
1038 }
1039
1040 ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT);
1041 if (ret) {
1042 errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np");
1043 }
1044
1045 ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ);
1046 if (ret) {
1047 errc(EX_OSERR, ret, "posix_spawn");
1048 }
1049 }
1050
1051 /*
1052 * Admittedly not very attractive.
1053 */
1054 static void __attribute__((noreturn))
1055 usage()
1056 {
1057 errx(EX_USAGE, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
1058 "<realtime | timeshare | fixed> <iterations>\n\t\t"
1059 "[--trace <traceworthy latency in ns>] "
1060 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
1061 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]",
1062 getprogname());
1063 }
1064
1065 static struct option* g_longopts;
1066 static int option_index;
1067
1068 static uint32_t
1069 read_dec_arg()
1070 {
1071 char *cp;
1072 /* char* optarg is a magic global */
1073
1074 uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10);
1075
1076 if (cp == optarg || *cp) {
1077 errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"",
1078 g_longopts[option_index].name, optarg);
1079 }
1080
1081 return arg_val;
1082 }
1083
1084 static void
1085 parse_args(int argc, char *argv[])
1086 {
1087 enum {
1088 OPT_GETOPT = 0,
1089 OPT_SPIN_TIME,
1090 OPT_TRACE,
1091 OPT_PRIORITY,
1092 OPT_CHURN_PRI,
1093 OPT_CHURN_COUNT,
1094 };
1095
1096 static struct option longopts[] = {
1097 /* BEGIN IGNORE CODESTYLE */
1098 { "spin-time", required_argument, NULL, OPT_SPIN_TIME },
1099 { "trace", required_argument, NULL, OPT_TRACE },
1100 { "priority", required_argument, NULL, OPT_PRIORITY },
1101 { "churn-pri", required_argument, NULL, OPT_CHURN_PRI },
1102 { "churn-count", required_argument, NULL, OPT_CHURN_COUNT },
1103 { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE },
1104 { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE },
1105 { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE },
1106 { "affinity", no_argument, (int*)&g_do_affinity, TRUE },
1107 { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE },
1108 { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE },
1109 { "test-rt", no_argument, (int*)&g_test_rt, TRUE },
1110 { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE },
1111 { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE },
1112 { "verbose", no_argument, (int*)&g_verbose, TRUE },
1113 { "help", no_argument, NULL, 'h' },
1114 { NULL, 0, NULL, 0 }
1115 /* END IGNORE CODESTYLE */
1116 };
1117
1118 g_longopts = longopts;
1119 int ch = 0;
1120
1121 while ((ch = getopt_long(argc, argv, "h", longopts, &option_index)) != -1) {
1122 switch (ch) {
1123 case OPT_GETOPT:
1124 /* getopt_long set a variable */
1125 break;
1126 case OPT_SPIN_TIME:
1127 g_do_each_spin = TRUE;
1128 g_each_spin_duration_ns = read_dec_arg();
1129 break;
1130 case OPT_TRACE:
1131 g_traceworthy_latency_ns = read_dec_arg();
1132 break;
1133 case OPT_PRIORITY:
1134 g_priority = read_dec_arg();
1135 break;
1136 case OPT_CHURN_PRI:
1137 g_churn_pri = read_dec_arg();
1138 break;
1139 case OPT_CHURN_COUNT:
1140 g_churn_count = read_dec_arg();
1141 break;
1142 case '?':
1143 case 'h':
1144 default:
1145 usage();
1146 /* NORETURN */
1147 }
1148 }
1149
1150 /*
1151 * getopt_long reorders all the options to the beginning of the argv array.
1152 * Jump past them to the non-option arguments.
1153 */
1154
1155 argc -= optind;
1156 argv += optind;
1157
1158 if (argc > 4) {
1159 warnx("Too many non-option arguments passed");
1160 usage();
1161 }
1162
1163 if (argc != 4) {
1164 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
1165 usage();
1166 }
1167
1168 char *cp;
1169
1170 /* How many threads? */
1171 g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10);
1172
1173 if (cp == argv[0] || *cp) {
1174 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]);
1175 }
1176
1177 /* What wakeup pattern? */
1178 g_waketype = parse_wakeup_pattern(argv[1]);
1179
1180 /* Policy */
1181 g_policy = parse_thread_policy(argv[2]);
1182
1183 /* Iterations */
1184 g_iterations = (uint32_t)strtoull(argv[3], &cp, 10);
1185
1186 if (cp == argv[3] || *cp) {
1187 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]);
1188 }
1189
1190 if (g_iterations < 1) {
1191 errx(EX_USAGE, "Must have at least one iteration");
1192 }
1193
1194 if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) {
1195 errx(EX_USAGE, "chain mode requires more than one thread");
1196 }
1197
1198 if (g_numthreads == 1 && g_waketype == WAKE_HOP) {
1199 errx(EX_USAGE, "hop mode requires more than one thread");
1200 }
1201 }