]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/zero-to-n/zero-to-n.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / tools / tests / zero-to-n / zero-to-n.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <unistd.h>
29 #include <stdio.h>
30 #include <math.h>
31 #include <sys/kdebug.h>
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <errno.h>
35 #include <err.h>
36 #include <string.h>
37 #include <assert.h>
38 #include <sysexits.h>
39 #include <sys/sysctl.h>
40 #include <getopt.h>
41
42 #include <spawn.h>
43 #include <spawn_private.h>
44 #include <sys/spawn_internal.h>
45 #include <mach-o/dyld.h>
46
47 #include <mach/mach_time.h>
48 #include <mach/mach.h>
49 #include <mach/task.h>
50 #include <mach/semaphore.h>
51
52 #include <pthread/qos_private.h>
53
54 #include <sys/resource.h>
55
56 #include <stdatomic.h>
57
58 typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t;
59 typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t;
60
61 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
62 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
63 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
64
65 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
66 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
67 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
68
69 #define DEBUG 0
70
71 #if DEBUG
72 #define debug_log(args...) printf(args)
73 #else
74 #define debug_log(args...) do { } while(0)
75 #endif
76
77 /* Declarations */
78 static void* worker_thread(void *arg);
79 static void usage();
80 static int thread_setup(uint32_t my_id);
81 static my_policy_type_t parse_thread_policy(const char *str);
82 static void selfexec_with_apptype(int argc, char *argv[]);
83 static void parse_args(int argc, char *argv[]);
84
85 static __attribute__((aligned(128))) _Atomic uint32_t g_done_threads;
86 static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop = FALSE;
87 static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0;
88
89 /* Global variables (general) */
90 static uint32_t g_numcpus;
91 static uint32_t g_numthreads;
92 static wake_type_t g_waketype;
93 static policy_t g_policy;
94 static uint32_t g_iterations;
95 static struct mach_timebase_info g_mti;
96 static semaphore_t g_main_sem;
97 static uint64_t *g_thread_endtimes_abs;
98 static boolean_t g_verbose = FALSE;
99 static boolean_t g_do_affinity = FALSE;
100 static uint64_t g_starttime_abs;
101 static uint32_t g_iteration_sleeptime_us = 0;
102 static uint32_t g_priority = 0;
103 static uint32_t g_churn_pri = 0;
104 static uint32_t g_churn_count = 0;
105
106 static pthread_t* g_churn_threads = NULL;
107
108 /* Threshold for dropping a 'bad run' tracepoint */
109 static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS;
110
111 /* Have we re-execed to set apptype? */
112 static boolean_t g_seen_apptype = FALSE;
113
114 /* usleep in betweeen iterations */
115 static boolean_t g_do_sleep = TRUE;
116
117 /* Every thread spins until all threads have checked in */
118 static boolean_t g_do_all_spin = FALSE;
119
120 /* Every thread backgrounds temporarily before parking */
121 static boolean_t g_drop_priority = FALSE;
122
123 /* One randomly chosen thread holds up the train for a certain duration. */
124 static boolean_t g_do_one_long_spin = FALSE;
125 static uint32_t g_one_long_spin_id = 0;
126 static uint64_t g_one_long_spin_length_abs = 0;
127 static uint64_t g_one_long_spin_length_ns = 0;
128
129 /* Each thread spins for a certain duration after waking up before blocking again. */
130 static boolean_t g_do_each_spin = FALSE;
131 static uint64_t g_each_spin_duration_abs = 0;
132 static uint64_t g_each_spin_duration_ns = 0;
133
134 /* Global variables (broadcast) */
135 static semaphore_t g_broadcastsem;
136 static semaphore_t g_leadersem;
137 static semaphore_t g_readysem;
138 static semaphore_t g_donesem;
139
140 /* Global variables (chain) */
141 static semaphore_t *g_semarr;
142
143 static uint64_t
144 abs_to_nanos(uint64_t abstime)
145 {
146 return (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom)));
147 }
148
149 static uint64_t
150 nanos_to_abs(uint64_t ns)
151 {
152 return (uint64_t)(ns * (((double)g_mti.denom) / ((double)g_mti.numer)));
153 }
154
155 inline static void
156 yield(void)
157 {
158 #if defined(__arm__) || defined(__arm64__)
159 asm volatile("yield");
160 #elif defined(__x86_64__) || defined(__i386__)
161 asm volatile("pause");
162 #else
163 #error Unrecognized architecture
164 #endif
165 }
166
167 static void *
168 churn_thread(__unused void *arg)
169 {
170 uint64_t spin_count = 0;
171
172 /*
173 * As a safety measure to avoid wedging, we will bail on the spin if
174 * it's been more than 1s after the most recent run start
175 */
176
177 while (g_churn_stop == FALSE &&
178 mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) {
179 spin_count++;
180 yield();
181 }
182
183 /* This is totally racy, but only here to detect if anyone stops early */
184 atomic_fetch_add_explicit(&g_churn_stopped_at, spin_count, memory_order_relaxed);
185
186 return NULL;
187 }
188
189 static void
190 create_churn_threads()
191 {
192 if (g_churn_count == 0)
193 g_churn_count = g_numcpus - 1;
194
195 errno_t err;
196
197 struct sched_param param = { .sched_priority = (int)g_churn_pri };
198 pthread_attr_t attr;
199
200 /* Array for churn threads */
201 g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count);
202 assert(g_churn_threads);
203
204 if ((err = pthread_attr_init(&attr)))
205 errc(EX_OSERR, err, "pthread_attr_init");
206
207 if ((err = pthread_attr_setschedparam(&attr, &param)))
208 errc(EX_OSERR, err, "pthread_attr_setschedparam");
209
210 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR)))
211 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
212
213 for (uint32_t i = 0 ; i < g_churn_count ; i++) {
214 pthread_t new_thread;
215
216 if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL)))
217 errc(EX_OSERR, err, "pthread_create");
218 g_churn_threads[i] = new_thread;
219 }
220
221 if ((err = pthread_attr_destroy(&attr)))
222 errc(EX_OSERR, err, "pthread_attr_destroy");
223 }
224
225 static void
226 join_churn_threads(void)
227 {
228 if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0)
229 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
230 g_churn_stopped_at);
231
232 atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst);
233
234 /* Rejoin churn threads */
235 for (uint32_t i = 0; i < g_churn_count; i++) {
236 errno_t err = pthread_join(g_churn_threads[i], NULL);
237 if (err) errc(EX_OSERR, err, "pthread_join %d", i);
238 }
239 }
240
241 /*
242 * Figure out what thread policy to use
243 */
244 static my_policy_type_t
245 parse_thread_policy(const char *str)
246 {
247 if (strcmp(str, "timeshare") == 0) {
248 return MY_POLICY_TIMESHARE;
249 } else if (strcmp(str, "realtime") == 0) {
250 return MY_POLICY_REALTIME;
251 } else if (strcmp(str, "fixed") == 0) {
252 return MY_POLICY_FIXEDPRI;
253 } else {
254 errx(EX_USAGE, "Invalid thread policy \"%s\"", str);
255 }
256 }
257
258 /*
259 * Figure out what wakeup pattern to use
260 */
261 static wake_type_t
262 parse_wakeup_pattern(const char *str)
263 {
264 if (strcmp(str, "chain") == 0) {
265 return WAKE_CHAIN;
266 } else if (strcmp(str, "hop") == 0) {
267 return WAKE_HOP;
268 } else if (strcmp(str, "broadcast-single-sem") == 0) {
269 return WAKE_BROADCAST_ONESEM;
270 } else if (strcmp(str, "broadcast-per-thread") == 0) {
271 return WAKE_BROADCAST_PERTHREAD;
272 } else {
273 errx(EX_USAGE, "Invalid wakeup pattern \"%s\"", str);
274 }
275 }
276
277 /*
278 * Set policy
279 */
280 static int
281 thread_setup(uint32_t my_id)
282 {
283 kern_return_t kr;
284 errno_t ret;
285 thread_time_constraint_policy_data_t pol;
286
287 if (g_priority) {
288 int policy = SCHED_OTHER;
289 if (g_policy == MY_POLICY_FIXEDPRI)
290 policy = SCHED_RR;
291
292 struct sched_param param = {.sched_priority = (int)g_priority};
293 if ((ret = pthread_setschedparam(pthread_self(), policy, &param)))
294 errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id);
295 }
296
297 switch (g_policy) {
298 case MY_POLICY_TIMESHARE:
299 break;
300 case MY_POLICY_REALTIME:
301 /* Hard-coded realtime parameters (similar to what Digi uses) */
302 pol.period = 100000;
303 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS);
304 pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS);
305 pol.preemptible = 0; /* Ignored by OS */
306
307 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
308 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
309 mach_assert_zero_t(my_id, kr);
310 break;
311 case MY_POLICY_FIXEDPRI:
312 ret = pthread_set_fixedpriority_self();
313 if (ret) errc(EX_OSERR, ret, "pthread_set_fixedpriority_self");
314 break;
315 default:
316 errx(EX_USAGE, "invalid policy type %d", g_policy);
317 }
318
319 if (g_do_affinity) {
320 thread_affinity_policy_data_t affinity;
321
322 affinity.affinity_tag = my_id % 2;
323
324 kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
325 (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT);
326 mach_assert_zero_t(my_id, kr);
327 }
328
329 return 0;
330 }
331
332 /*
333 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
334 * and notify the main thread when done.
335 */
336 static void*
337 worker_thread(void *arg)
338 {
339 uint32_t my_id = (uint32_t)(uintptr_t)arg;
340 kern_return_t kr;
341
342 volatile double x = 0.0;
343 volatile double y = 0.0;
344
345 /* Set policy and so forth */
346 thread_setup(my_id);
347
348 for (uint32_t i = 0; i < g_iterations; i++) {
349 if (my_id == 0) {
350 /*
351 * Leader thread either wakes everyone up or starts the chain going.
352 */
353
354 /* Give the worker threads undisturbed time to finish before waiting on them */
355 if (g_do_sleep)
356 usleep(g_iteration_sleeptime_us);
357
358 debug_log("%d Leader thread wait for ready\n", i);
359
360 /*
361 * Wait for everyone else to declare ready
362 * Is there a better way to do this that won't interfere with the rest of the chain?
363 * TODO: Invent 'semaphore wait for N signals'
364 */
365
366 for (uint32_t j = 0 ; j < g_numthreads - 1; j++) {
367 kr = semaphore_wait(g_readysem);
368 mach_assert_zero_t(my_id, kr);
369 }
370
371 debug_log("%d Leader thread wait\n", i);
372
373 /* Signal main thread and wait for start of iteration */
374
375 kr = semaphore_wait_signal(g_leadersem, g_main_sem);
376 mach_assert_zero_t(my_id, kr);
377
378 g_thread_endtimes_abs[my_id] = mach_absolute_time();
379
380 debug_log("%d Leader thread go\n", i);
381
382 assert_zero_t(my_id, atomic_load_explicit(&g_done_threads, memory_order_relaxed));
383
384 switch (g_waketype) {
385 case WAKE_BROADCAST_ONESEM:
386 kr = semaphore_signal_all(g_broadcastsem);
387 mach_assert_zero_t(my_id, kr);
388 break;
389 case WAKE_BROADCAST_PERTHREAD:
390 for (uint32_t j = 1; j < g_numthreads; j++) {
391 kr = semaphore_signal(g_semarr[j]);
392 mach_assert_zero_t(my_id, kr);
393 }
394 break;
395 case WAKE_CHAIN:
396 kr = semaphore_signal(g_semarr[my_id + 1]);
397 mach_assert_zero_t(my_id, kr);
398 break;
399 case WAKE_HOP:
400 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
401 mach_assert_zero_t(my_id, kr);
402 break;
403 }
404 } else {
405 /*
406 * Everyone else waits to be woken up,
407 * records when she wakes up, and possibly
408 * wakes up a friend.
409 */
410 switch(g_waketype) {
411 case WAKE_BROADCAST_ONESEM:
412 kr = semaphore_wait_signal(g_broadcastsem, g_readysem);
413 mach_assert_zero_t(my_id, kr);
414
415 g_thread_endtimes_abs[my_id] = mach_absolute_time();
416 break;
417
418 case WAKE_BROADCAST_PERTHREAD:
419 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
420 mach_assert_zero_t(my_id, kr);
421
422 g_thread_endtimes_abs[my_id] = mach_absolute_time();
423 break;
424
425 case WAKE_CHAIN:
426 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
427 mach_assert_zero_t(my_id, kr);
428
429 /* Signal the next thread *after* recording wake time */
430
431 g_thread_endtimes_abs[my_id] = mach_absolute_time();
432
433 if (my_id < (g_numthreads - 1)) {
434 kr = semaphore_signal(g_semarr[my_id + 1]);
435 mach_assert_zero_t(my_id, kr);
436 }
437
438 break;
439
440 case WAKE_HOP:
441 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
442 mach_assert_zero_t(my_id, kr);
443
444 /* Signal the next thread *after* recording wake time */
445
446 g_thread_endtimes_abs[my_id] = mach_absolute_time();
447
448 if (my_id < (g_numthreads - 1)) {
449 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
450 mach_assert_zero_t(my_id, kr);
451 } else {
452 kr = semaphore_signal_all(g_donesem);
453 mach_assert_zero_t(my_id, kr);
454 }
455
456 break;
457 }
458 }
459
460 debug_log("Thread %p woke up for iteration %d.\n", pthread_self(), i);
461
462 if (g_do_one_long_spin && g_one_long_spin_id == my_id) {
463 /* One randomly chosen thread holds up the train for a while. */
464
465 uint64_t endspin = g_starttime_abs + g_one_long_spin_length_abs;
466 while (mach_absolute_time() < endspin) {
467 y = y + 1.5 + x;
468 x = sqrt(y);
469 }
470 }
471
472 if (g_do_each_spin) {
473 /* Each thread spins for a certain duration after waking up before blocking again. */
474
475 uint64_t endspin = mach_absolute_time() + g_each_spin_duration_abs;
476 while (mach_absolute_time() < endspin) {
477 y = y + 1.5 + x;
478 x = sqrt(y);
479 }
480 }
481
482 uint32_t done_threads;
483 done_threads = atomic_fetch_add_explicit(&g_done_threads, 1, memory_order_relaxed) + 1;
484
485 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads, i);
486
487 if (g_drop_priority) {
488 /* Drop priority to BG momentarily */
489 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG);
490 if (ret) errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG");
491 }
492
493 if (g_do_all_spin) {
494 /* Everyone spins until the last thread checks in. */
495
496 while (atomic_load_explicit(&g_done_threads, memory_order_relaxed) < g_numthreads) {
497 y = y + 1.5 + x;
498 x = sqrt(y);
499 }
500 }
501
502 if (g_drop_priority) {
503 /* Restore normal priority */
504 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0);
505 if (ret) errc(EX_OSERR, ret, "setpriority 0");
506 }
507
508 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i);
509 }
510
511 if (my_id == 0) {
512 /* Give the worker threads undisturbed time to finish before waiting on them */
513 if (g_do_sleep)
514 usleep(g_iteration_sleeptime_us);
515
516 /* Wait for the worker threads to finish */
517 for (uint32_t i = 0 ; i < g_numthreads - 1; i++) {
518 kr = semaphore_wait(g_readysem);
519 mach_assert_zero_t(my_id, kr);
520 }
521
522 /* Tell everyone and the main thread that the last iteration is done */
523 debug_log("%d Leader thread done\n", i);
524
525 kr = semaphore_signal_all(g_main_sem);
526 mach_assert_zero_t(my_id, kr);
527 } else {
528 /* Hold up thread teardown so it doesn't affect the last iteration */
529 kr = semaphore_wait_signal(g_main_sem, g_readysem);
530 mach_assert_zero_t(my_id, kr);
531 }
532
533 return 0;
534 }
535
536 /*
537 * Given an array of uint64_t values, compute average, max, min, and standard deviation
538 */
539 static void
540 compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t *minp, float *stddevp)
541 {
542 uint32_t i;
543 uint64_t _sum = 0;
544 uint64_t _max = 0;
545 uint64_t _min = UINT64_MAX;
546 float _avg = 0;
547 float _dev = 0;
548
549 for (i = 0; i < count; i++) {
550 _sum += values[i];
551 _max = values[i] > _max ? values[i] : _max;
552 _min = values[i] < _min ? values[i] : _min;
553 }
554
555 _avg = ((float)_sum) / ((float)count);
556
557 _dev = 0;
558 for (i = 0; i < count; i++) {
559 _dev += powf((((float)values[i]) - _avg), 2);
560 }
561
562 _dev /= count;
563 _dev = sqrtf(_dev);
564
565 *averagep = _avg;
566 *maxp = _max;
567 *minp = _min;
568 *stddevp = _dev;
569 }
570
571 int
572 main(int argc, char **argv)
573 {
574 errno_t ret;
575 kern_return_t kr;
576
577 pthread_t *threads;
578 uint64_t *worst_latencies_ns;
579 uint64_t *worst_latencies_from_first_ns;
580 uint64_t max, min;
581 float avg, stddev;
582
583 for (int i = 0; i < argc; i++)
584 if (strcmp(argv[i], "--switched_apptype") == 0)
585 g_seen_apptype = TRUE;
586
587 if (!g_seen_apptype)
588 selfexec_with_apptype(argc, argv);
589
590 parse_args(argc, argv);
591
592 srand((unsigned int)time(NULL));
593
594 mach_timebase_info(&g_mti);
595
596 size_t ncpu_size = sizeof(g_numcpus);
597 ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0);
598 if (ret) err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)");
599
600 if (g_do_each_spin)
601 g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns);
602
603 /* Configure the long-spin thread to take up half of its computation */
604 if (g_do_one_long_spin) {
605 g_one_long_spin_length_ns = COMPUTATION_NANOS / 2;
606 g_one_long_spin_length_abs = nanos_to_abs(g_one_long_spin_length_ns);
607 }
608
609 /* Estimate the amount of time the cleanup phase needs to back off */
610 g_iteration_sleeptime_us = g_numthreads * 20;
611
612 uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1;
613 if (g_do_each_spin)
614 g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC);
615 if (g_do_one_long_spin)
616 g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC;
617
618 /* Arrays for threads and their wakeup times */
619 threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads);
620 assert(threads);
621
622 size_t endtimes_size = sizeof(uint64_t) * g_numthreads;
623
624 g_thread_endtimes_abs = (uint64_t*) valloc(endtimes_size);
625 assert(g_thread_endtimes_abs);
626
627 /* Ensure the allocation is pre-faulted */
628 ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size);
629 if (ret) errc(EX_OSERR, ret, "memset_s endtimes");
630
631 size_t latencies_size = sizeof(uint64_t) * g_iterations;
632
633 worst_latencies_ns = (uint64_t*) valloc(latencies_size);
634 assert(worst_latencies_ns);
635
636 /* Ensure the allocation is pre-faulted */
637 ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size);
638 if (ret) errc(EX_OSERR, ret, "memset_s latencies");
639
640 worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size);
641 assert(worst_latencies_from_first_ns);
642
643 /* Ensure the allocation is pre-faulted */
644 ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size);
645 if (ret) errc(EX_OSERR, ret, "memset_s latencies_from_first");
646
647 kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0);
648 mach_assert_zero(kr);
649
650 /* Either one big semaphore or one per thread */
651 if (g_waketype == WAKE_CHAIN ||
652 g_waketype == WAKE_BROADCAST_PERTHREAD ||
653 g_waketype == WAKE_HOP) {
654
655 g_semarr = valloc(sizeof(semaphore_t) * g_numthreads);
656 assert(g_semarr);
657
658 for (uint32_t i = 0; i < g_numthreads; i++) {
659 kr = semaphore_create(mach_task_self(), &g_semarr[i], SYNC_POLICY_FIFO, 0);
660 mach_assert_zero(kr);
661 }
662
663 g_leadersem = g_semarr[0];
664 } else {
665 kr = semaphore_create(mach_task_self(), &g_broadcastsem, SYNC_POLICY_FIFO, 0);
666 mach_assert_zero(kr);
667 kr = semaphore_create(mach_task_self(), &g_leadersem, SYNC_POLICY_FIFO, 0);
668 mach_assert_zero(kr);
669 }
670
671 if (g_waketype == WAKE_HOP) {
672 kr = semaphore_create(mach_task_self(), &g_donesem, SYNC_POLICY_FIFO, 0);
673 mach_assert_zero(kr);
674 }
675
676 kr = semaphore_create(mach_task_self(), &g_readysem, SYNC_POLICY_FIFO, 0);
677 mach_assert_zero(kr);
678
679 atomic_store_explicit(&g_done_threads, 0, memory_order_relaxed);
680
681 /* Create the threads */
682 for (uint32_t i = 0; i < g_numthreads; i++) {
683 ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i);
684 if (ret) errc(EX_OSERR, ret, "pthread_create %d", i);
685 }
686
687 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
688 if (ret) errc(EX_OSERR, ret, "setpriority");
689
690 thread_setup(0);
691
692 g_starttime_abs = mach_absolute_time();
693
694 if (g_churn_pri)
695 create_churn_threads();
696
697 /* Let everyone get settled */
698 kr = semaphore_wait(g_main_sem);
699 mach_assert_zero(kr);
700
701 /* Give the system a bit more time to settle */
702 if (g_do_sleep)
703 usleep(g_iteration_sleeptime_us);
704
705 /* Go! */
706 for (uint32_t i = 0; i < g_iterations; i++) {
707 uint32_t j;
708 uint64_t worst_abs = 0, best_abs = UINT64_MAX;
709
710 if (g_do_one_long_spin)
711 g_one_long_spin_id = (uint32_t)rand() % g_numthreads;
712
713 debug_log("%d Main thread reset\n", i);
714
715 atomic_store_explicit(&g_done_threads, 0, memory_order_seq_cst);
716
717 g_starttime_abs = mach_absolute_time();
718
719 /* Fire them off and wait for worker threads to finish */
720 kr = semaphore_wait_signal(g_main_sem, g_leadersem);
721 mach_assert_zero(kr);
722
723 debug_log("%d Main thread return\n", i);
724
725 assert(atomic_load_explicit(&g_done_threads, memory_order_relaxed) == g_numthreads);
726
727 /*
728 * We report the worst latencies relative to start time
729 * and relative to the lead worker thread.
730 */
731 for (j = 0; j < g_numthreads; j++) {
732 uint64_t latency_abs;
733
734 latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs;
735 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
736 }
737
738 worst_latencies_ns[i] = abs_to_nanos(worst_abs);
739
740 worst_abs = 0;
741 for (j = 1; j < g_numthreads; j++) {
742 uint64_t latency_abs;
743
744 latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0];
745 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
746 best_abs = best_abs > latency_abs ? latency_abs : best_abs;
747 }
748
749 worst_latencies_from_first_ns[i] = abs_to_nanos(worst_abs);
750
751 /*
752 * In the event of a bad run, cut a trace point.
753 */
754 if (worst_latencies_from_first_ns[i] > g_traceworthy_latency_ns) {
755 /* Ariadne's ad-hoc test signpost */
756 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns[i], g_traceworthy_latency_ns, 0, 0);
757
758 if (g_verbose)
759 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0);
760 }
761
762 /* Give the system a bit more time to settle */
763 if (g_do_sleep)
764 usleep(g_iteration_sleeptime_us);
765 }
766
767 /* Rejoin threads */
768 for (uint32_t i = 0; i < g_numthreads; i++) {
769 ret = pthread_join(threads[i], NULL);
770 if (ret) errc(EX_OSERR, ret, "pthread_join %d", i);
771 }
772
773 if (g_churn_pri)
774 join_churn_threads();
775
776 compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev);
777 printf("Results (from a stop):\n");
778 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
779 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
780 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
781 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
782
783 putchar('\n');
784
785 compute_stats(worst_latencies_from_first_ns, g_iterations, &avg, &max, &min, &stddev);
786 printf("Results (relative to first thread):\n");
787 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
788 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
789 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
790 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
791
792 #if 0
793 for (uint32_t i = 0; i < g_iterations; i++) {
794 printf("Iteration %d: %f us\n", i, worst_latencies_ns[i] / 1000.0);
795 }
796 #endif
797
798 free(threads);
799 free(g_thread_endtimes_abs);
800 free(worst_latencies_ns);
801 free(worst_latencies_from_first_ns);
802
803 return 0;
804 }
805
806 /*
807 * WARNING: This is SPI specifically intended for use by launchd to start UI
808 * apps. We use it here for a test tool only to opt into QoS using the same
809 * policies. Do not use this outside xnu or libxpc/launchd.
810 */
811 static void
812 selfexec_with_apptype(int argc, char *argv[])
813 {
814 int ret;
815 posix_spawnattr_t attr;
816 extern char **environ;
817 char *new_argv[argc + 1 + 1 /* NULL */];
818 int i;
819 char prog[PATH_MAX];
820 uint32_t prog_size = PATH_MAX;
821
822 ret = _NSGetExecutablePath(prog, &prog_size);
823 if (ret) err(EX_OSERR, "_NSGetExecutablePath");
824
825 for (i=0; i < argc; i++) {
826 new_argv[i] = argv[i];
827 }
828
829 new_argv[i] = "--switched_apptype";
830 new_argv[i+1] = NULL;
831
832 ret = posix_spawnattr_init(&attr);
833 if (ret) errc(EX_OSERR, ret, "posix_spawnattr_init");
834
835 ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC);
836 if (ret) errc(EX_OSERR, ret, "posix_spawnattr_setflags");
837
838 ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT);
839 if (ret) errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np");
840
841 ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ);
842 if (ret) errc(EX_OSERR, ret, "posix_spawn");
843 }
844
845 /*
846 * Admittedly not very attractive.
847 */
848 static void __attribute__((noreturn))
849 usage()
850 {
851 errx(EX_USAGE, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
852 "<realtime | timeshare | fixed> <iterations>\n\t\t"
853 "[--trace <traceworthy latency in ns>] "
854 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
855 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]",
856 getprogname());
857 }
858
859 static struct option* g_longopts;
860 static int option_index;
861
862 static uint32_t
863 read_dec_arg()
864 {
865 char *cp;
866 /* char* optarg is a magic global */
867
868 uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10);
869
870 if (cp == optarg || *cp)
871 errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"",
872 g_longopts[option_index].name, optarg);
873
874 return arg_val;
875 }
876
877 static void
878 parse_args(int argc, char *argv[])
879 {
880 enum {
881 OPT_GETOPT = 0,
882 OPT_SPIN_TIME,
883 OPT_TRACE,
884 OPT_PRIORITY,
885 OPT_CHURN_PRI,
886 OPT_CHURN_COUNT,
887 };
888
889 static struct option longopts[] = {
890 { "spin-time", required_argument, NULL, OPT_SPIN_TIME },
891 { "trace", required_argument, NULL, OPT_TRACE },
892 { "priority", required_argument, NULL, OPT_PRIORITY },
893 { "churn-pri", required_argument, NULL, OPT_CHURN_PRI },
894 { "churn-count", required_argument, NULL, OPT_CHURN_COUNT },
895 { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE },
896 { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE },
897 { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE },
898 { "affinity", no_argument, (int*)&g_do_affinity, TRUE },
899 { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE },
900 { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE },
901 { "verbose", no_argument, (int*)&g_verbose, TRUE },
902 { "help", no_argument, NULL, 'h' },
903 { NULL, 0, NULL, 0 }
904 };
905
906 g_longopts = longopts;
907 int ch = 0;
908
909 while ((ch = getopt_long(argc, argv, "h", longopts, &option_index)) != -1) {
910 switch (ch) {
911 case OPT_GETOPT:
912 /* getopt_long set a variable */
913 break;
914 case OPT_SPIN_TIME:
915 g_do_each_spin = TRUE;
916 g_each_spin_duration_ns = read_dec_arg();
917 break;
918 case OPT_TRACE:
919 g_traceworthy_latency_ns = read_dec_arg();
920 break;
921 case OPT_PRIORITY:
922 g_priority = read_dec_arg();
923 break;
924 case OPT_CHURN_PRI:
925 g_churn_pri = read_dec_arg();
926 break;
927 case OPT_CHURN_COUNT:
928 g_churn_count = read_dec_arg();
929 break;
930 case '?':
931 case 'h':
932 default:
933 usage();
934 /* NORETURN */
935 }
936 }
937
938 /*
939 * getopt_long reorders all the options to the beginning of the argv array.
940 * Jump past them to the non-option arguments.
941 */
942
943 argc -= optind;
944 argv += optind;
945
946 if (argc > 4) {
947 warnx("Too many non-option arguments passed");
948 usage();
949 }
950
951 if (argc != 4) {
952 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
953 usage();
954 }
955
956 char *cp;
957
958 /* How many threads? */
959 g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10);
960
961 if (cp == argv[0] || *cp)
962 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]);
963
964 if (g_numthreads < 1)
965 errx(EX_USAGE, "Must use at least one thread");
966
967 /* What wakeup pattern? */
968 g_waketype = parse_wakeup_pattern(argv[1]);
969
970 /* Policy */
971 g_policy = parse_thread_policy(argv[2]);
972
973 /* Iterations */
974 g_iterations = (uint32_t)strtoull(argv[3], &cp, 10);
975
976 if (cp == argv[3] || *cp)
977 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]);
978
979 if (g_iterations < 1)
980 errx(EX_USAGE, "Must have at least one iteration");
981
982 if (g_numthreads == 1 && g_waketype == WAKE_CHAIN)
983 errx(EX_USAGE, "chain mode requires more than one thread");
984
985 if (g_numthreads == 1 && g_waketype == WAKE_HOP)
986 errx(EX_USAGE, "hop mode requires more than one thread");
987 }
988
989