]> git.saurik.com Git - apple/xnu.git/blame - tools/tests/zero-to-n/zero-to-n.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / tools / tests / zero-to-n / zero-to-n.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <unistd.h>
29#include <stdio.h>
30#include <math.h>
a1c7dba1 31#include <sys/kdebug.h>
6d2010ae
A
32#include <stdlib.h>
33#include <pthread.h>
6d2010ae 34#include <errno.h>
fe8ab488 35#include <err.h>
6d2010ae 36#include <string.h>
3e170ce0
A
37#include <assert.h>
38#include <sysexits.h>
39#include <sys/sysctl.h>
40#include <getopt.h>
6d2010ae 41
fe8ab488
A
42#include <spawn.h>
43#include <spawn_private.h>
44#include <sys/spawn_internal.h>
45#include <mach-o/dyld.h>
46
6d2010ae
A
47#include <mach/mach_time.h>
48#include <mach/mach.h>
49#include <mach/task.h>
50#include <mach/semaphore.h>
51
3e170ce0
A
52#include <pthread/qos_private.h>
53
39037602
A
54#include <sys/resource.h>
55
5ba3f43e
A
56#include <stdatomic.h>
57
0a7de745
A
58#include <os/tsd.h>
59
3e170ce0 60typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t;
6d2010ae
A
61typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t;
62
3e170ce0
A
63#define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
64#define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
65#define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
6d2010ae 66
0a7de745
A
67#define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
68#define COMPUTATION_NANOS (10000000ll) /* 10 ms */
94ff46dc 69#define RT_CHURN_COMP_NANOS ( 1000000ll) /* 1 ms */
0a7de745 70#define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
cb323159 71#define TRACEWORTHY_NANOS_TEST ( 2000000ll) /* 2 ms */
5ba3f43e 72
6d2010ae 73#if DEBUG
0a7de745 74#define debug_log(args ...) printf(args)
6d2010ae 75#else
0a7de745 76#define debug_log(args ...) do { } while(0)
6d2010ae
A
77#endif
78
79/* Declarations */
3e170ce0
A
80static void* worker_thread(void *arg);
81static void usage();
82static int thread_setup(uint32_t my_id);
83static my_policy_type_t parse_thread_policy(const char *str);
84static void selfexec_with_apptype(int argc, char *argv[]);
85static void parse_args(int argc, char *argv[]);
6d2010ae 86
5ba3f43e
A
87static __attribute__((aligned(128))) _Atomic uint32_t g_done_threads;
88static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop = FALSE;
89static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0;
90
6d2010ae 91/* Global variables (general) */
3e170ce0 92static uint32_t g_numcpus;
0a7de745
A
93static uint32_t g_nphysicalcpu;
94static uint32_t g_nlogicalcpu;
3e170ce0
A
95static uint32_t g_numthreads;
96static wake_type_t g_waketype;
97static policy_t g_policy;
98static uint32_t g_iterations;
99static struct mach_timebase_info g_mti;
100static semaphore_t g_main_sem;
101static uint64_t *g_thread_endtimes_abs;
3e170ce0
A
102static boolean_t g_verbose = FALSE;
103static boolean_t g_do_affinity = FALSE;
104static uint64_t g_starttime_abs;
105static uint32_t g_iteration_sleeptime_us = 0;
39037602
A
106static uint32_t g_priority = 0;
107static uint32_t g_churn_pri = 0;
108static uint32_t g_churn_count = 0;
94ff46dc 109static uint32_t g_rt_churn_count = 0;
39037602
A
110
111static pthread_t* g_churn_threads = NULL;
94ff46dc 112static pthread_t* g_rt_churn_threads = NULL;
3e170ce0
A
113
114/* Threshold for dropping a 'bad run' tracepoint */
115static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS;
116
117/* Have we re-execed to set apptype? */
118static boolean_t g_seen_apptype = FALSE;
119
120/* usleep in betweeen iterations */
121static boolean_t g_do_sleep = TRUE;
122
123/* Every thread spins until all threads have checked in */
124static boolean_t g_do_all_spin = FALSE;
125
39037602
A
126/* Every thread backgrounds temporarily before parking */
127static boolean_t g_drop_priority = FALSE;
128
0a7de745
A
129/* Test whether realtime threads are scheduled on the separate CPUs */
130static boolean_t g_test_rt = FALSE;
131
94ff46dc
A
132static boolean_t g_rt_churn = FALSE;
133
0a7de745
A
134/* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */
135static boolean_t g_test_rt_smt = FALSE;
136
137/* Test whether realtime threads are successfully avoiding CPU 0 on Intel */
138static boolean_t g_test_rt_avoid0 = FALSE;
139
cb323159
A
140/* Print a histgram showing how many threads ran on each CPU */
141static boolean_t g_histogram = FALSE;
142
3e170ce0
A
143/* One randomly chosen thread holds up the train for a certain duration. */
144static boolean_t g_do_one_long_spin = FALSE;
145static uint32_t g_one_long_spin_id = 0;
146static uint64_t g_one_long_spin_length_abs = 0;
147static uint64_t g_one_long_spin_length_ns = 0;
148
149/* Each thread spins for a certain duration after waking up before blocking again. */
150static boolean_t g_do_each_spin = FALSE;
151static uint64_t g_each_spin_duration_abs = 0;
152static uint64_t g_each_spin_duration_ns = 0;
6d2010ae
A
153
154/* Global variables (broadcast) */
3e170ce0
A
155static semaphore_t g_broadcastsem;
156static semaphore_t g_leadersem;
157static semaphore_t g_readysem;
158static semaphore_t g_donesem;
94ff46dc
A
159static semaphore_t g_rt_churn_sem;
160static semaphore_t g_rt_churn_start_sem;
6d2010ae
A
161
162/* Global variables (chain) */
3e170ce0 163static semaphore_t *g_semarr;
6d2010ae 164
0a7de745
A
165typedef struct {
166 __attribute__((aligned(128))) uint32_t current;
167 uint32_t accum;
168} histogram_t;
169
170static histogram_t *g_cpu_histogram;
171static _Atomic uint64_t *g_cpu_map;
172
3e170ce0 173static uint64_t
6d2010ae
A
174abs_to_nanos(uint64_t abstime)
175{
176 return (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom)));
177}
178
3e170ce0 179static uint64_t
6d2010ae
A
180nanos_to_abs(uint64_t ns)
181{
182 return (uint64_t)(ns * (((double)g_mti.denom) / ((double)g_mti.numer)));
183}
184
39037602
A
185inline static void
186yield(void)
187{
5ba3f43e 188#if defined(__arm__) || defined(__arm64__)
0a7de745 189 asm volatile ("yield");
5ba3f43e 190#elif defined(__x86_64__) || defined(__i386__)
0a7de745 191 asm volatile ("pause");
39037602
A
192#else
193#error Unrecognized architecture
194#endif
195}
196
197static void *
198churn_thread(__unused void *arg)
199{
200 uint64_t spin_count = 0;
201
202 /*
203 * As a safety measure to avoid wedging, we will bail on the spin if
204 * it's been more than 1s after the most recent run start
205 */
206
207 while (g_churn_stop == FALSE &&
0a7de745 208 mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) {
39037602
A
209 spin_count++;
210 yield();
211 }
212
213 /* This is totally racy, but only here to detect if anyone stops early */
5ba3f43e 214 atomic_fetch_add_explicit(&g_churn_stopped_at, spin_count, memory_order_relaxed);
39037602
A
215
216 return NULL;
217}
218
219static void
220create_churn_threads()
221{
0a7de745 222 if (g_churn_count == 0) {
39037602 223 g_churn_count = g_numcpus - 1;
0a7de745 224 }
39037602
A
225
226 errno_t err;
227
228 struct sched_param param = { .sched_priority = (int)g_churn_pri };
229 pthread_attr_t attr;
230
231 /* Array for churn threads */
232 g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count);
233 assert(g_churn_threads);
234
0a7de745 235 if ((err = pthread_attr_init(&attr))) {
39037602 236 errc(EX_OSERR, err, "pthread_attr_init");
0a7de745 237 }
39037602 238
0a7de745 239 if ((err = pthread_attr_setschedparam(&attr, &param))) {
39037602 240 errc(EX_OSERR, err, "pthread_attr_setschedparam");
0a7de745 241 }
39037602 242
0a7de745 243 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
39037602 244 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
0a7de745 245 }
39037602 246
0a7de745 247 for (uint32_t i = 0; i < g_churn_count; i++) {
39037602
A
248 pthread_t new_thread;
249
0a7de745 250 if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL))) {
39037602 251 errc(EX_OSERR, err, "pthread_create");
0a7de745 252 }
39037602
A
253 g_churn_threads[i] = new_thread;
254 }
255
0a7de745 256 if ((err = pthread_attr_destroy(&attr))) {
39037602 257 errc(EX_OSERR, err, "pthread_attr_destroy");
0a7de745 258 }
39037602
A
259}
260
261static void
262join_churn_threads(void)
263{
0a7de745 264 if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) {
39037602 265 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
0a7de745
A
266 g_churn_stopped_at);
267 }
39037602 268
5ba3f43e 269 atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst);
39037602
A
270
271 /* Rejoin churn threads */
272 for (uint32_t i = 0; i < g_churn_count; i++) {
273 errno_t err = pthread_join(g_churn_threads[i], NULL);
0a7de745
A
274 if (err) {
275 errc(EX_OSERR, err, "pthread_join %d", i);
276 }
39037602
A
277 }
278}
279
94ff46dc
A
280/*
281 * Set policy
282 */
283static int
284rt_churn_thread_setup(void)
285{
286 kern_return_t kr;
287 thread_time_constraint_policy_data_t pol;
288
289 /* Hard-coded realtime parameters (similar to what Digi uses) */
290 pol.period = 100000;
291 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS * 2);
292 pol.computation = (uint32_t) nanos_to_abs(RT_CHURN_COMP_NANOS * 2);
293 pol.preemptible = 0; /* Ignored by OS */
294
295 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
296 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
297 mach_assert_zero_t(0, kr);
298
299 return 0;
300}
301
302static void *
303rt_churn_thread(__unused void *arg)
304{
305 rt_churn_thread_setup();
306
307 for (uint32_t i = 0; i < g_iterations; i++) {
308 kern_return_t kr = semaphore_wait_signal(g_rt_churn_start_sem, g_rt_churn_sem);
309 mach_assert_zero_t(0, kr);
310
311 volatile double x = 0.0;
312 volatile double y = 0.0;
313
314 uint64_t endspin = mach_absolute_time() + nanos_to_abs(RT_CHURN_COMP_NANOS);
315 while (mach_absolute_time() < endspin) {
316 y = y + 1.5 + x;
317 x = sqrt(y);
318 }
319 }
320
321 kern_return_t kr = semaphore_signal(g_rt_churn_sem);
322 mach_assert_zero_t(0, kr);
323
324 return NULL;
325}
326
327static void
328wait_for_rt_churn_threads(void)
329{
330 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
331 kern_return_t kr = semaphore_wait(g_rt_churn_sem);
332 mach_assert_zero_t(0, kr);
333 }
334}
335
336static void
337start_rt_churn_threads(void)
338{
339 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
340 kern_return_t kr = semaphore_signal(g_rt_churn_start_sem);
341 mach_assert_zero_t(0, kr);
342 }
343}
344
345static void
346create_rt_churn_threads(void)
347{
348 if (g_rt_churn_count == 0) {
349 /* Leave 1 CPU to ensure that the main thread can make progress */
350 g_rt_churn_count = g_numcpus - 1;
351 }
352
353 errno_t err;
354
355 struct sched_param param = { .sched_priority = (int)g_churn_pri };
356 pthread_attr_t attr;
357
358 /* Array for churn threads */
359 g_rt_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_rt_churn_count);
360 assert(g_rt_churn_threads);
361
362 if ((err = pthread_attr_init(&attr))) {
363 errc(EX_OSERR, err, "pthread_attr_init");
364 }
365
366 if ((err = pthread_attr_setschedparam(&attr, &param))) {
367 errc(EX_OSERR, err, "pthread_attr_setschedparam");
368 }
369
370 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
371 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
372 }
373
374 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
375 pthread_t new_thread;
376
377 if ((err = pthread_create(&new_thread, &attr, rt_churn_thread, NULL))) {
378 errc(EX_OSERR, err, "pthread_create");
379 }
380 g_rt_churn_threads[i] = new_thread;
381 }
382
383 if ((err = pthread_attr_destroy(&attr))) {
384 errc(EX_OSERR, err, "pthread_attr_destroy");
385 }
386
387 /* Wait until all threads have checked in */
388 wait_for_rt_churn_threads();
389}
390
391static void
392join_rt_churn_threads(void)
393{
394 /* Rejoin rt churn threads */
395 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
396 errno_t err = pthread_join(g_rt_churn_threads[i], NULL);
397 if (err) {
398 errc(EX_OSERR, err, "pthread_join %d", i);
399 }
400 }
401}
402
6d2010ae 403/*
0a7de745 404 * Figure out what thread policy to use
6d2010ae 405 */
3e170ce0 406static my_policy_type_t
6d2010ae
A
407parse_thread_policy(const char *str)
408{
409 if (strcmp(str, "timeshare") == 0) {
410 return MY_POLICY_TIMESHARE;
411 } else if (strcmp(str, "realtime") == 0) {
412 return MY_POLICY_REALTIME;
413 } else if (strcmp(str, "fixed") == 0) {
414 return MY_POLICY_FIXEDPRI;
415 } else {
3e170ce0 416 errx(EX_USAGE, "Invalid thread policy \"%s\"", str);
6d2010ae
A
417 }
418}
419
420/*
421 * Figure out what wakeup pattern to use
422 */
3e170ce0 423static wake_type_t
0a7de745 424parse_wakeup_pattern(const char *str)
6d2010ae
A
425{
426 if (strcmp(str, "chain") == 0) {
427 return WAKE_CHAIN;
3e170ce0
A
428 } else if (strcmp(str, "hop") == 0) {
429 return WAKE_HOP;
6d2010ae
A
430 } else if (strcmp(str, "broadcast-single-sem") == 0) {
431 return WAKE_BROADCAST_ONESEM;
432 } else if (strcmp(str, "broadcast-per-thread") == 0) {
433 return WAKE_BROADCAST_PERTHREAD;
434 } else {
3e170ce0 435 errx(EX_USAGE, "Invalid wakeup pattern \"%s\"", str);
6d2010ae
A
436 }
437}
438
439/*
440 * Set policy
441 */
3e170ce0
A
442static int
443thread_setup(uint32_t my_id)
6d2010ae 444{
3e170ce0
A
445 kern_return_t kr;
446 errno_t ret;
447 thread_time_constraint_policy_data_t pol;
6d2010ae 448
39037602
A
449 if (g_priority) {
450 int policy = SCHED_OTHER;
0a7de745 451 if (g_policy == MY_POLICY_FIXEDPRI) {
39037602 452 policy = SCHED_RR;
0a7de745 453 }
39037602
A
454
455 struct sched_param param = {.sched_priority = (int)g_priority};
0a7de745 456 if ((ret = pthread_setschedparam(pthread_self(), policy, &param))) {
39037602 457 errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id);
0a7de745 458 }
39037602
A
459 }
460
6d2010ae 461 switch (g_policy) {
0a7de745
A
462 case MY_POLICY_TIMESHARE:
463 break;
464 case MY_POLICY_REALTIME:
465 /* Hard-coded realtime parameters (similar to what Digi uses) */
466 pol.period = 100000;
467 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS);
468 pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS);
469 pol.preemptible = 0; /* Ignored by OS */
470
471 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
472 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
473 mach_assert_zero_t(my_id, kr);
474 break;
475 case MY_POLICY_FIXEDPRI:
476 ret = pthread_set_fixedpriority_self();
477 if (ret) {
478 errc(EX_OSERR, ret, "pthread_set_fixedpriority_self");
479 }
480 break;
481 default:
482 errx(EX_USAGE, "invalid policy type %d", g_policy);
6d2010ae
A
483 }
484
39236c6e
A
485 if (g_do_affinity) {
486 thread_affinity_policy_data_t affinity;
487
488 affinity.affinity_tag = my_id % 2;
489
3e170ce0 490 kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
0a7de745 491 (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT);
3e170ce0 492 mach_assert_zero_t(my_id, kr);
39236c6e
A
493 }
494
6d2010ae 495 return 0;
6d2010ae
A
496}
497
498/*
3e170ce0
A
499 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
500 * and notify the main thread when done.
6d2010ae 501 */
3e170ce0
A
502static void*
503worker_thread(void *arg)
6d2010ae 504{
3e170ce0
A
505 uint32_t my_id = (uint32_t)(uintptr_t)arg;
506 kern_return_t kr;
6d2010ae 507
3e170ce0
A
508 volatile double x = 0.0;
509 volatile double y = 0.0;
6d2010ae 510
3e170ce0
A
511 /* Set policy and so forth */
512 thread_setup(my_id);
6d2010ae 513
3e170ce0
A
514 for (uint32_t i = 0; i < g_iterations; i++) {
515 if (my_id == 0) {
516 /*
517 * Leader thread either wakes everyone up or starts the chain going.
518 */
6d2010ae 519
3e170ce0 520 /* Give the worker threads undisturbed time to finish before waiting on them */
0a7de745 521 if (g_do_sleep) {
3e170ce0 522 usleep(g_iteration_sleeptime_us);
0a7de745 523 }
6d2010ae 524
3e170ce0 525 debug_log("%d Leader thread wait for ready\n", i);
6d2010ae 526
3e170ce0
A
527 /*
528 * Wait for everyone else to declare ready
529 * Is there a better way to do this that won't interfere with the rest of the chain?
530 * TODO: Invent 'semaphore wait for N signals'
531 */
6d2010ae 532
0a7de745 533 for (uint32_t j = 0; j < g_numthreads - 1; j++) {
3e170ce0
A
534 kr = semaphore_wait(g_readysem);
535 mach_assert_zero_t(my_id, kr);
536 }
6d2010ae 537
3e170ce0
A
538 debug_log("%d Leader thread wait\n", i);
539
0a7de745
A
540 if (i > 0) {
541 for (int cpuid = 0; cpuid < g_numcpus; cpuid++) {
542 if (g_cpu_histogram[cpuid].current == 1) {
543 atomic_fetch_or_explicit(&g_cpu_map[i - 1], (1UL << cpuid), memory_order_relaxed);
544 g_cpu_histogram[cpuid].current = 0;
545 }
546 }
547 }
548
3e170ce0
A
549 /* Signal main thread and wait for start of iteration */
550
551 kr = semaphore_wait_signal(g_leadersem, g_main_sem);
552 mach_assert_zero_t(my_id, kr);
6d2010ae 553
6d2010ae
A
554 g_thread_endtimes_abs[my_id] = mach_absolute_time();
555
3e170ce0
A
556 debug_log("%d Leader thread go\n", i);
557
5ba3f43e 558 assert_zero_t(my_id, atomic_load_explicit(&g_done_threads, memory_order_relaxed));
6d2010ae
A
559
560 switch (g_waketype) {
3e170ce0
A
561 case WAKE_BROADCAST_ONESEM:
562 kr = semaphore_signal_all(g_broadcastsem);
563 mach_assert_zero_t(my_id, kr);
6d2010ae
A
564 break;
565 case WAKE_BROADCAST_PERTHREAD:
3e170ce0
A
566 for (uint32_t j = 1; j < g_numthreads; j++) {
567 kr = semaphore_signal(g_semarr[j]);
568 mach_assert_zero_t(my_id, kr);
6d2010ae
A
569 }
570 break;
3e170ce0
A
571 case WAKE_CHAIN:
572 kr = semaphore_signal(g_semarr[my_id + 1]);
573 mach_assert_zero_t(my_id, kr);
574 break;
575 case WAKE_HOP:
576 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
577 mach_assert_zero_t(my_id, kr);
578 break;
6d2010ae
A
579 }
580 } else {
581 /*
582 * Everyone else waits to be woken up,
3e170ce0 583 * records when she wakes up, and possibly
6d2010ae
A
584 * wakes up a friend.
585 */
0a7de745 586 switch (g_waketype) {
6d2010ae 587 case WAKE_BROADCAST_ONESEM:
3e170ce0
A
588 kr = semaphore_wait_signal(g_broadcastsem, g_readysem);
589 mach_assert_zero_t(my_id, kr);
6d2010ae
A
590
591 g_thread_endtimes_abs[my_id] = mach_absolute_time();
6d2010ae 592 break;
3e170ce0 593
6d2010ae 594 case WAKE_BROADCAST_PERTHREAD:
3e170ce0
A
595 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
596 mach_assert_zero_t(my_id, kr);
6d2010ae
A
597
598 g_thread_endtimes_abs[my_id] = mach_absolute_time();
599 break;
600
601 case WAKE_CHAIN:
3e170ce0
A
602 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
603 mach_assert_zero_t(my_id, kr);
604
605 /* Signal the next thread *after* recording wake time */
6d2010ae
A
606
607 g_thread_endtimes_abs[my_id] = mach_absolute_time();
608
609 if (my_id < (g_numthreads - 1)) {
3e170ce0
A
610 kr = semaphore_signal(g_semarr[my_id + 1]);
611 mach_assert_zero_t(my_id, kr);
612 }
613
614 break;
615
616 case WAKE_HOP:
617 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
618 mach_assert_zero_t(my_id, kr);
619
620 /* Signal the next thread *after* recording wake time */
621
622 g_thread_endtimes_abs[my_id] = mach_absolute_time();
623
624 if (my_id < (g_numthreads - 1)) {
625 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
626 mach_assert_zero_t(my_id, kr);
627 } else {
628 kr = semaphore_signal_all(g_donesem);
629 mach_assert_zero_t(my_id, kr);
6d2010ae
A
630 }
631
632 break;
6d2010ae
A
633 }
634 }
635
0a7de745
A
636 unsigned int cpuid = _os_cpu_number();
637 assert(cpuid < g_numcpus);
638 debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid, i);
639 g_cpu_histogram[cpuid].current = 1;
640 g_cpu_histogram[cpuid].accum++;
3e170ce0
A
641
642 if (g_do_one_long_spin && g_one_long_spin_id == my_id) {
643 /* One randomly chosen thread holds up the train for a while. */
644
645 uint64_t endspin = g_starttime_abs + g_one_long_spin_length_abs;
646 while (mach_absolute_time() < endspin) {
647 y = y + 1.5 + x;
648 x = sqrt(y);
649 }
650 }
651
652 if (g_do_each_spin) {
653 /* Each thread spins for a certain duration after waking up before blocking again. */
654
655 uint64_t endspin = mach_absolute_time() + g_each_spin_duration_abs;
656 while (mach_absolute_time() < endspin) {
657 y = y + 1.5 + x;
658 x = sqrt(y);
659 }
660 }
661
5ba3f43e
A
662 uint32_t done_threads;
663 done_threads = atomic_fetch_add_explicit(&g_done_threads, 1, memory_order_relaxed) + 1;
3e170ce0 664
5ba3f43e 665 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads, i);
3e170ce0 666
39037602
A
667 if (g_drop_priority) {
668 /* Drop priority to BG momentarily */
669 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG);
0a7de745
A
670 if (ret) {
671 errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG");
672 }
39037602
A
673 }
674
3e170ce0
A
675 if (g_do_all_spin) {
676 /* Everyone spins until the last thread checks in. */
677
5ba3f43e 678 while (atomic_load_explicit(&g_done_threads, memory_order_relaxed) < g_numthreads) {
3e170ce0
A
679 y = y + 1.5 + x;
680 x = sqrt(y);
681 }
682 }
683
39037602
A
684 if (g_drop_priority) {
685 /* Restore normal priority */
686 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0);
0a7de745
A
687 if (ret) {
688 errc(EX_OSERR, ret, "setpriority 0");
689 }
39037602
A
690 }
691
3e170ce0 692 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i);
6d2010ae
A
693 }
694
3e170ce0
A
695 if (my_id == 0) {
696 /* Give the worker threads undisturbed time to finish before waiting on them */
0a7de745 697 if (g_do_sleep) {
3e170ce0 698 usleep(g_iteration_sleeptime_us);
0a7de745 699 }
6d2010ae 700
3e170ce0 701 /* Wait for the worker threads to finish */
0a7de745 702 for (uint32_t i = 0; i < g_numthreads - 1; i++) {
3e170ce0
A
703 kr = semaphore_wait(g_readysem);
704 mach_assert_zero_t(my_id, kr);
705 }
706
707 /* Tell everyone and the main thread that the last iteration is done */
0a7de745
A
708 debug_log("%d Leader thread done\n", g_iterations - 1);
709
710 for (int cpuid = 0; cpuid < g_numcpus; cpuid++) {
711 if (g_cpu_histogram[cpuid].current == 1) {
712 atomic_fetch_or_explicit(&g_cpu_map[g_iterations - 1], (1UL << cpuid), memory_order_relaxed);
713 g_cpu_histogram[cpuid].current = 0;
714 }
715 }
3e170ce0
A
716
717 kr = semaphore_signal_all(g_main_sem);
718 mach_assert_zero_t(my_id, kr);
719 } else {
720 /* Hold up thread teardown so it doesn't affect the last iteration */
721 kr = semaphore_wait_signal(g_main_sem, g_readysem);
722 mach_assert_zero_t(my_id, kr);
723 }
724
725 return 0;
6d2010ae
A
726}
727
728/*
729 * Given an array of uint64_t values, compute average, max, min, and standard deviation
730 */
3e170ce0 731static void
6d2010ae
A
732compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t *minp, float *stddevp)
733{
3e170ce0 734 uint32_t i;
6d2010ae
A
735 uint64_t _sum = 0;
736 uint64_t _max = 0;
737 uint64_t _min = UINT64_MAX;
0a7de745
A
738 float _avg = 0;
739 float _dev = 0;
6d2010ae
A
740
741 for (i = 0; i < count; i++) {
742 _sum += values[i];
743 _max = values[i] > _max ? values[i] : _max;
744 _min = values[i] < _min ? values[i] : _min;
745 }
746
747 _avg = ((float)_sum) / ((float)count);
0a7de745 748
6d2010ae
A
749 _dev = 0;
750 for (i = 0; i < count; i++) {
751 _dev += powf((((float)values[i]) - _avg), 2);
752 }
0a7de745 753
6d2010ae
A
754 _dev /= count;
755 _dev = sqrtf(_dev);
756
757 *averagep = _avg;
758 *maxp = _max;
759 *minp = _min;
760 *stddevp = _dev;
761}
762
763int
764main(int argc, char **argv)
765{
3e170ce0
A
766 errno_t ret;
767 kern_return_t kr;
768
0a7de745
A
769 pthread_t *threads;
770 uint64_t *worst_latencies_ns;
771 uint64_t *worst_latencies_from_first_ns;
772 uint64_t max, min;
773 float avg, stddev;
6d2010ae 774
0a7de745
A
775 bool test_fail = false;
776
777 for (int i = 0; i < argc; i++) {
778 if (strcmp(argv[i], "--switched_apptype") == 0) {
3e170ce0 779 g_seen_apptype = TRUE;
0a7de745
A
780 }
781 }
6d2010ae 782
0a7de745 783 if (!g_seen_apptype) {
3e170ce0 784 selfexec_with_apptype(argc, argv);
0a7de745 785 }
6d2010ae 786
3e170ce0 787 parse_args(argc, argv);
6d2010ae 788
3e170ce0 789 srand((unsigned int)time(NULL));
6d2010ae 790
3e170ce0 791 mach_timebase_info(&g_mti);
6d2010ae 792
3e170ce0
A
793 size_t ncpu_size = sizeof(g_numcpus);
794 ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0);
0a7de745
A
795 if (ret) {
796 err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)");
797 }
798 assert(g_numcpus <= 64); /* g_cpu_map needs to be extended for > 64 cpus */
799
800 size_t physicalcpu_size = sizeof(g_nphysicalcpu);
801 ret = sysctlbyname("hw.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0);
802 if (ret) {
803 err(EX_OSERR, "Failed sysctlbyname(hw.physicalcpu)");
804 }
6d2010ae 805
0a7de745
A
806 size_t logicalcpu_size = sizeof(g_nlogicalcpu);
807 ret = sysctlbyname("hw.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0);
808 if (ret) {
809 err(EX_OSERR, "Failed sysctlbyname(hw.logicalcpu)");
810 }
811
812 if (g_test_rt) {
813 if (g_numthreads == 0) {
814 g_numthreads = g_numcpus;
815 }
816 g_policy = MY_POLICY_REALTIME;
817 g_do_all_spin = TRUE;
cb323159
A
818 g_histogram = true;
819 /* Don't change g_traceworthy_latency_ns if it's explicity been set to something other than the default */
820 if (g_traceworthy_latency_ns == TRACEWORTHY_NANOS) {
821 g_traceworthy_latency_ns = TRACEWORTHY_NANOS_TEST;
822 }
0a7de745
A
823 } else if (g_test_rt_smt) {
824 if (g_nlogicalcpu != 2 * g_nphysicalcpu) {
825 /* Not SMT */
826 printf("Attempt to run --test-rt-smt on a non-SMT device\n");
827 exit(0);
828 }
829
830 if (g_numthreads == 0) {
831 g_numthreads = g_nphysicalcpu;
832 }
833 g_policy = MY_POLICY_REALTIME;
834 g_do_all_spin = TRUE;
cb323159 835 g_histogram = true;
0a7de745
A
836 } else if (g_test_rt_avoid0) {
837#if defined(__x86_64__) || defined(__i386__)
838 if (g_numthreads == 0) {
839 g_numthreads = g_nphysicalcpu - 1;
840 }
841 if (g_numthreads == 0) {
842 printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n");
843 exit(0);
844 }
845 g_policy = MY_POLICY_REALTIME;
846 g_do_all_spin = TRUE;
cb323159 847 g_histogram = true;
0a7de745
A
848#else
849 printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n");
850 exit(0);
851#endif
852 } else if (g_numthreads == 0) {
853 g_numthreads = g_numcpus;
854 }
855
856 if (g_do_each_spin) {
3e170ce0 857 g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns);
0a7de745 858 }
3e170ce0
A
859
860 /* Configure the long-spin thread to take up half of its computation */
861 if (g_do_one_long_spin) {
862 g_one_long_spin_length_ns = COMPUTATION_NANOS / 2;
863 g_one_long_spin_length_abs = nanos_to_abs(g_one_long_spin_length_ns);
fe8ab488
A
864 }
865
3e170ce0
A
866 /* Estimate the amount of time the cleanup phase needs to back off */
867 g_iteration_sleeptime_us = g_numthreads * 20;
6d2010ae 868
3e170ce0 869 uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1;
0a7de745 870 if (g_do_each_spin) {
3e170ce0 871 g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC);
0a7de745
A
872 }
873 if (g_do_one_long_spin) {
3e170ce0 874 g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC;
0a7de745 875 }
6d2010ae
A
876
877 /* Arrays for threads and their wakeup times */
3e170ce0
A
878 threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads);
879 assert(threads);
880
881 size_t endtimes_size = sizeof(uint64_t) * g_numthreads;
882
883 g_thread_endtimes_abs = (uint64_t*) valloc(endtimes_size);
884 assert(g_thread_endtimes_abs);
885
886 /* Ensure the allocation is pre-faulted */
887 ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size);
0a7de745
A
888 if (ret) {
889 errc(EX_OSERR, ret, "memset_s endtimes");
890 }
3e170ce0
A
891
892 size_t latencies_size = sizeof(uint64_t) * g_iterations;
6d2010ae 893
3e170ce0
A
894 worst_latencies_ns = (uint64_t*) valloc(latencies_size);
895 assert(worst_latencies_ns);
6d2010ae 896
3e170ce0
A
897 /* Ensure the allocation is pre-faulted */
898 ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size);
0a7de745
A
899 if (ret) {
900 errc(EX_OSERR, ret, "memset_s latencies");
901 }
6d2010ae 902
3e170ce0
A
903 worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size);
904 assert(worst_latencies_from_first_ns);
905
906 /* Ensure the allocation is pre-faulted */
907 ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size);
0a7de745
A
908 if (ret) {
909 errc(EX_OSERR, ret, "memset_s latencies_from_first");
910 }
911
912 size_t histogram_size = sizeof(histogram_t) * g_numcpus;
913 g_cpu_histogram = (histogram_t *)valloc(histogram_size);
914 assert(g_cpu_histogram);
915 /* Ensure the allocation is pre-faulted */
916 ret = memset_s(g_cpu_histogram, histogram_size, 0, histogram_size);
917 if (ret) {
918 errc(EX_OSERR, ret, "memset_s g_cpu_histogram");
919 }
920
921 size_t map_size = sizeof(uint64_t) * g_iterations;
922 g_cpu_map = (_Atomic uint64_t *)valloc(map_size);
923 assert(g_cpu_map);
924 /* Ensure the allocation is pre-faulted */
925 ret = memset_s(g_cpu_map, map_size, 0, map_size);
926 if (ret) {
927 errc(EX_OSERR, ret, "memset_s g_cpu_map");
928 }
3e170ce0
A
929
930 kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0);
931 mach_assert_zero(kr);
6d2010ae
A
932
933 /* Either one big semaphore or one per thread */
3e170ce0
A
934 if (g_waketype == WAKE_CHAIN ||
935 g_waketype == WAKE_BROADCAST_PERTHREAD ||
936 g_waketype == WAKE_HOP) {
3e170ce0
A
937 g_semarr = valloc(sizeof(semaphore_t) * g_numthreads);
938 assert(g_semarr);
6d2010ae 939
3e170ce0
A
940 for (uint32_t i = 0; i < g_numthreads; i++) {
941 kr = semaphore_create(mach_task_self(), &g_semarr[i], SYNC_POLICY_FIFO, 0);
942 mach_assert_zero(kr);
6d2010ae 943 }
3e170ce0 944
6d2010ae
A
945 g_leadersem = g_semarr[0];
946 } else {
3e170ce0
A
947 kr = semaphore_create(mach_task_self(), &g_broadcastsem, SYNC_POLICY_FIFO, 0);
948 mach_assert_zero(kr);
949 kr = semaphore_create(mach_task_self(), &g_leadersem, SYNC_POLICY_FIFO, 0);
950 mach_assert_zero(kr);
6d2010ae
A
951 }
952
3e170ce0
A
953 if (g_waketype == WAKE_HOP) {
954 kr = semaphore_create(mach_task_self(), &g_donesem, SYNC_POLICY_FIFO, 0);
955 mach_assert_zero(kr);
956 }
957
958 kr = semaphore_create(mach_task_self(), &g_readysem, SYNC_POLICY_FIFO, 0);
959 mach_assert_zero(kr);
960
94ff46dc
A
961 kr = semaphore_create(mach_task_self(), &g_rt_churn_sem, SYNC_POLICY_FIFO, 0);
962 mach_assert_zero(kr);
963
964 kr = semaphore_create(mach_task_self(), &g_rt_churn_start_sem, SYNC_POLICY_FIFO, 0);
965 mach_assert_zero(kr);
966
5ba3f43e
A
967 atomic_store_explicit(&g_done_threads, 0, memory_order_relaxed);
968
6d2010ae 969 /* Create the threads */
3e170ce0
A
970 for (uint32_t i = 0; i < g_numthreads; i++) {
971 ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i);
0a7de745
A
972 if (ret) {
973 errc(EX_OSERR, ret, "pthread_create %d", i);
974 }
6d2010ae
A
975 }
976
3e170ce0 977 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
0a7de745
A
978 if (ret) {
979 errc(EX_OSERR, ret, "setpriority");
980 }
fe8ab488 981
3e170ce0 982 thread_setup(0);
fe8ab488 983
39037602
A
984 g_starttime_abs = mach_absolute_time();
985
0a7de745 986 if (g_churn_pri) {
39037602 987 create_churn_threads();
0a7de745 988 }
94ff46dc
A
989 if (g_rt_churn) {
990 create_rt_churn_threads();
991 }
39037602 992
6d2010ae 993 /* Let everyone get settled */
3e170ce0
A
994 kr = semaphore_wait(g_main_sem);
995 mach_assert_zero(kr);
996
997 /* Give the system a bit more time to settle */
0a7de745 998 if (g_do_sleep) {
3e170ce0 999 usleep(g_iteration_sleeptime_us);
0a7de745 1000 }
6d2010ae
A
1001
1002 /* Go! */
3e170ce0
A
1003 for (uint32_t i = 0; i < g_iterations; i++) {
1004 uint32_t j;
6d2010ae
A
1005 uint64_t worst_abs = 0, best_abs = UINT64_MAX;
1006
0a7de745 1007 if (g_do_one_long_spin) {
3e170ce0 1008 g_one_long_spin_id = (uint32_t)rand() % g_numthreads;
0a7de745 1009 }
3e170ce0 1010
94ff46dc
A
1011 if (g_rt_churn) {
1012 start_rt_churn_threads();
1013 usleep(100);
1014 }
1015
3e170ce0
A
1016 debug_log("%d Main thread reset\n", i);
1017
5ba3f43e 1018 atomic_store_explicit(&g_done_threads, 0, memory_order_seq_cst);
6d2010ae
A
1019
1020 g_starttime_abs = mach_absolute_time();
1021
3e170ce0
A
1022 /* Fire them off and wait for worker threads to finish */
1023 kr = semaphore_wait_signal(g_main_sem, g_leadersem);
1024 mach_assert_zero(kr);
6d2010ae 1025
3e170ce0 1026 debug_log("%d Main thread return\n", i);
6d2010ae 1027
5ba3f43e
A
1028 assert(atomic_load_explicit(&g_done_threads, memory_order_relaxed) == g_numthreads);
1029
94ff46dc
A
1030 if (g_rt_churn) {
1031 wait_for_rt_churn_threads();
1032 }
1033
3e170ce0 1034 /*
6d2010ae
A
1035 * We report the worst latencies relative to start time
1036 * and relative to the lead worker thread.
1037 */
1038 for (j = 0; j < g_numthreads; j++) {
1039 uint64_t latency_abs;
3e170ce0 1040
6d2010ae
A
1041 latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs;
1042 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
1043 }
0a7de745 1044
6d2010ae
A
1045 worst_latencies_ns[i] = abs_to_nanos(worst_abs);
1046
1047 worst_abs = 0;
1048 for (j = 1; j < g_numthreads; j++) {
1049 uint64_t latency_abs;
0a7de745 1050
6d2010ae
A
1051 latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0];
1052 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
1053 best_abs = best_abs > latency_abs ? latency_abs : best_abs;
1054 }
1055
1056 worst_latencies_from_first_ns[i] = abs_to_nanos(worst_abs);
1057
1058 /*
1059 * In the event of a bad run, cut a trace point.
1060 */
3e170ce0
A
1061 if (worst_latencies_from_first_ns[i] > g_traceworthy_latency_ns) {
1062 /* Ariadne's ad-hoc test signpost */
1063 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns[i], g_traceworthy_latency_ns, 0, 0);
6d2010ae 1064
0a7de745 1065 if (g_verbose) {
6d2010ae 1066 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0);
0a7de745 1067 }
6d2010ae
A
1068 }
1069
3e170ce0 1070 /* Give the system a bit more time to settle */
0a7de745 1071 if (g_do_sleep) {
3e170ce0 1072 usleep(g_iteration_sleeptime_us);
0a7de745 1073 }
6d2010ae
A
1074 }
1075
1076 /* Rejoin threads */
3e170ce0
A
1077 for (uint32_t i = 0; i < g_numthreads; i++) {
1078 ret = pthread_join(threads[i], NULL);
0a7de745
A
1079 if (ret) {
1080 errc(EX_OSERR, ret, "pthread_join %d", i);
1081 }
6d2010ae
A
1082 }
1083
94ff46dc
A
1084 if (g_rt_churn) {
1085 join_rt_churn_threads();
1086 }
1087
0a7de745 1088 if (g_churn_pri) {
39037602 1089 join_churn_threads();
0a7de745 1090 }
39037602 1091
6d2010ae
A
1092 compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev);
1093 printf("Results (from a stop):\n");
1094 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
1095 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
1096 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
1097 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
1098
1099 putchar('\n');
1100
1101 compute_stats(worst_latencies_from_first_ns, g_iterations, &avg, &max, &min, &stddev);
1102 printf("Results (relative to first thread):\n");
1103 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
1104 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
1105 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
1106 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
1107
1108#if 0
3e170ce0 1109 for (uint32_t i = 0; i < g_iterations; i++) {
6d2010ae
A
1110 printf("Iteration %d: %f us\n", i, worst_latencies_ns[i] / 1000.0);
1111 }
3e170ce0
A
1112#endif
1113
cb323159 1114 if (g_histogram) {
0a7de745
A
1115 putchar('\n');
1116
1117 for (uint32_t i = 0; i < g_numcpus; i++) {
1118 printf("%d\t%d\n", i, g_cpu_histogram[i].accum);
1119 }
cb323159 1120 }
0a7de745 1121
cb323159 1122 if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) {
0a7de745
A
1123#define PRIMARY 0x5555555555555555ULL
1124#define SECONDARY 0xaaaaaaaaaaaaaaaaULL
1125
1126 int fail_count = 0;
1127
1128 for (uint32_t i = 0; i < g_iterations; i++) {
1129 bool secondary = false;
1130 bool fail = false;
1131 uint64_t map = g_cpu_map[i];
1132 if (g_test_rt_smt) {
1133 /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */
1134 secondary = (map & SECONDARY);
1135 /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */
1136 fail = ((map & PRIMARY) & ((map & SECONDARY) >> 1));
1137 } else if (g_test_rt) {
cb323159 1138 fail = (__builtin_popcountll(map) != g_numthreads) && (worst_latencies_ns[i] > g_traceworthy_latency_ns);
0a7de745
A
1139 } else if (g_test_rt_avoid0) {
1140 fail = ((map & 0x1) == 0x1);
1141 }
1142 if (secondary || fail) {
1143 printf("Iteration %d: 0x%llx%s%s\n", i, map,
1144 secondary ? " SECONDARY" : "",
1145 fail ? " FAIL" : "");
1146 }
1147 test_fail |= fail;
1148 fail_count += fail;
1149 }
1150
1151 if (test_fail && (g_iterations >= 100) && (fail_count <= g_iterations / 100)) {
1152 printf("99%% or better success rate\n");
1153 test_fail = 0;
1154 }
1155 }
1156
3e170ce0
A
1157 free(threads);
1158 free(g_thread_endtimes_abs);
1159 free(worst_latencies_ns);
1160 free(worst_latencies_from_first_ns);
0a7de745
A
1161 free(g_cpu_histogram);
1162 free(g_cpu_map);
6d2010ae 1163
0a7de745 1164 return test_fail;
6d2010ae 1165}
fe8ab488
A
1166
1167/*
1168 * WARNING: This is SPI specifically intended for use by launchd to start UI
1169 * apps. We use it here for a test tool only to opt into QoS using the same
1170 * policies. Do not use this outside xnu or libxpc/launchd.
1171 */
3e170ce0 1172static void
fe8ab488
A
1173selfexec_with_apptype(int argc, char *argv[])
1174{
1175 int ret;
1176 posix_spawnattr_t attr;
1177 extern char **environ;
1178 char *new_argv[argc + 1 + 1 /* NULL */];
1179 int i;
1180 char prog[PATH_MAX];
a1c7dba1 1181 uint32_t prog_size = PATH_MAX;
fe8ab488
A
1182
1183 ret = _NSGetExecutablePath(prog, &prog_size);
0a7de745
A
1184 if (ret) {
1185 err(EX_OSERR, "_NSGetExecutablePath");
1186 }
fe8ab488 1187
0a7de745 1188 for (i = 0; i < argc; i++) {
fe8ab488
A
1189 new_argv[i] = argv[i];
1190 }
1191
3e170ce0 1192 new_argv[i] = "--switched_apptype";
0a7de745 1193 new_argv[i + 1] = NULL;
fe8ab488
A
1194
1195 ret = posix_spawnattr_init(&attr);
0a7de745
A
1196 if (ret) {
1197 errc(EX_OSERR, ret, "posix_spawnattr_init");
1198 }
fe8ab488
A
1199
1200 ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC);
0a7de745
A
1201 if (ret) {
1202 errc(EX_OSERR, ret, "posix_spawnattr_setflags");
1203 }
fe8ab488
A
1204
1205 ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT);
0a7de745
A
1206 if (ret) {
1207 errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np");
1208 }
fe8ab488
A
1209
1210 ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ);
0a7de745
A
1211 if (ret) {
1212 errc(EX_OSERR, ret, "posix_spawn");
1213 }
3e170ce0
A
1214}
1215
1216/*
1217 * Admittedly not very attractive.
1218 */
1219static void __attribute__((noreturn))
1220usage()
1221{
39037602 1222 errx(EX_USAGE, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
0a7de745
A
1223 "<realtime | timeshare | fixed> <iterations>\n\t\t"
1224 "[--trace <traceworthy latency in ns>] "
1225 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
1226 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]",
1227 getprogname());
39037602
A
1228}
1229
1230static struct option* g_longopts;
1231static int option_index;
1232
1233static uint32_t
1234read_dec_arg()
1235{
1236 char *cp;
1237 /* char* optarg is a magic global */
1238
1239 uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10);
1240
0a7de745 1241 if (cp == optarg || *cp) {
39037602 1242 errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"",
0a7de745
A
1243 g_longopts[option_index].name, optarg);
1244 }
39037602
A
1245
1246 return arg_val;
fe8ab488 1247}
3e170ce0
A
1248
1249static void
1250parse_args(int argc, char *argv[])
1251{
39037602
A
1252 enum {
1253 OPT_GETOPT = 0,
1254 OPT_SPIN_TIME,
1255 OPT_TRACE,
1256 OPT_PRIORITY,
1257 OPT_CHURN_PRI,
1258 OPT_CHURN_COUNT,
94ff46dc 1259 OPT_RT_CHURN_COUNT,
39037602 1260 };
3e170ce0
A
1261
1262 static struct option longopts[] = {
0a7de745 1263 /* BEGIN IGNORE CODESTYLE */
39037602
A
1264 { "spin-time", required_argument, NULL, OPT_SPIN_TIME },
1265 { "trace", required_argument, NULL, OPT_TRACE },
1266 { "priority", required_argument, NULL, OPT_PRIORITY },
1267 { "churn-pri", required_argument, NULL, OPT_CHURN_PRI },
1268 { "churn-count", required_argument, NULL, OPT_CHURN_COUNT },
94ff46dc 1269 { "rt-churn-count", required_argument, NULL, OPT_RT_CHURN_COUNT },
3e170ce0
A
1270 { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE },
1271 { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE },
1272 { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE },
1273 { "affinity", no_argument, (int*)&g_do_affinity, TRUE },
1274 { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE },
39037602 1275 { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE },
0a7de745
A
1276 { "test-rt", no_argument, (int*)&g_test_rt, TRUE },
1277 { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE },
1278 { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE },
94ff46dc 1279 { "rt-churn", no_argument, (int*)&g_rt_churn, TRUE },
cb323159 1280 { "histogram", no_argument, (int*)&g_histogram, TRUE },
3e170ce0
A
1281 { "verbose", no_argument, (int*)&g_verbose, TRUE },
1282 { "help", no_argument, NULL, 'h' },
1283 { NULL, 0, NULL, 0 }
0a7de745 1284 /* END IGNORE CODESTYLE */
3e170ce0
A
1285 };
1286
39037602
A
1287 g_longopts = longopts;
1288 int ch = 0;
1289
3e170ce0
A
1290 while ((ch = getopt_long(argc, argv, "h", longopts, &option_index)) != -1) {
1291 switch (ch) {
39037602 1292 case OPT_GETOPT:
3e170ce0
A
1293 /* getopt_long set a variable */
1294 break;
39037602 1295 case OPT_SPIN_TIME:
3e170ce0 1296 g_do_each_spin = TRUE;
39037602 1297 g_each_spin_duration_ns = read_dec_arg();
3e170ce0 1298 break;
39037602
A
1299 case OPT_TRACE:
1300 g_traceworthy_latency_ns = read_dec_arg();
1301 break;
1302 case OPT_PRIORITY:
1303 g_priority = read_dec_arg();
1304 break;
1305 case OPT_CHURN_PRI:
1306 g_churn_pri = read_dec_arg();
1307 break;
1308 case OPT_CHURN_COUNT:
1309 g_churn_count = read_dec_arg();
3e170ce0 1310 break;
94ff46dc
A
1311 case OPT_RT_CHURN_COUNT:
1312 g_rt_churn_count = read_dec_arg();
1313 break;
3e170ce0
A
1314 case '?':
1315 case 'h':
1316 default:
1317 usage();
1318 /* NORETURN */
1319 }
1320 }
1321
1322 /*
1323 * getopt_long reorders all the options to the beginning of the argv array.
1324 * Jump past them to the non-option arguments.
1325 */
1326
1327 argc -= optind;
1328 argv += optind;
1329
1330 if (argc > 4) {
1331 warnx("Too many non-option arguments passed");
1332 usage();
1333 }
1334
1335 if (argc != 4) {
1336 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
1337 usage();
1338 }
1339
39037602
A
1340 char *cp;
1341
3e170ce0
A
1342 /* How many threads? */
1343 g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10);
1344
0a7de745 1345 if (cp == argv[0] || *cp) {
3e170ce0 1346 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]);
0a7de745 1347 }
3e170ce0
A
1348
1349 /* What wakeup pattern? */
1350 g_waketype = parse_wakeup_pattern(argv[1]);
1351
1352 /* Policy */
1353 g_policy = parse_thread_policy(argv[2]);
1354
1355 /* Iterations */
1356 g_iterations = (uint32_t)strtoull(argv[3], &cp, 10);
1357
0a7de745 1358 if (cp == argv[3] || *cp) {
3e170ce0 1359 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]);
0a7de745 1360 }
3e170ce0 1361
0a7de745 1362 if (g_iterations < 1) {
3e170ce0 1363 errx(EX_USAGE, "Must have at least one iteration");
0a7de745 1364 }
3e170ce0 1365
0a7de745 1366 if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) {
3e170ce0 1367 errx(EX_USAGE, "chain mode requires more than one thread");
0a7de745 1368 }
3e170ce0 1369
0a7de745 1370 if (g_numthreads == 1 && g_waketype == WAKE_HOP) {
3e170ce0 1371 errx(EX_USAGE, "hop mode requires more than one thread");
0a7de745 1372 }
3e170ce0 1373}