2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/kdebug.h>
39 #include <sys/sysctl.h>
43 #include <spawn_private.h>
44 #include <sys/spawn_internal.h>
45 #include <mach-o/dyld.h>
47 #include <libkern/OSAtomic.h>
49 #include <mach/mach_time.h>
50 #include <mach/mach.h>
51 #include <mach/task.h>
52 #include <mach/semaphore.h>
54 #include <pthread/qos_private.h>
56 typedef enum wake_type
{ WAKE_BROADCAST_ONESEM
, WAKE_BROADCAST_PERTHREAD
, WAKE_CHAIN
, WAKE_HOP
} wake_type_t
;
57 typedef enum my_policy_type
{ MY_POLICY_REALTIME
, MY_POLICY_TIMESHARE
, MY_POLICY_FIXEDPRI
} my_policy_type_t
;
59 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
60 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
61 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
63 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
64 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
65 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
68 #define debug_log(args...) printf(args)
70 #define debug_log(args...) do { } while(0)
74 static void* worker_thread(void *arg
);
76 static int thread_setup(uint32_t my_id
);
77 static my_policy_type_t
parse_thread_policy(const char *str
);
78 static void selfexec_with_apptype(int argc
, char *argv
[]);
79 static void parse_args(int argc
, char *argv
[]);
81 /* Global variables (general) */
82 static uint32_t g_numcpus
;
83 static uint32_t g_numthreads
;
84 static wake_type_t g_waketype
;
85 static policy_t g_policy
;
86 static uint32_t g_iterations
;
87 static struct mach_timebase_info g_mti
;
88 static semaphore_t g_main_sem
;
89 static uint64_t *g_thread_endtimes_abs
;
90 static volatile uint32_t g_done_threads
;
91 static boolean_t g_verbose
= FALSE
;
92 static boolean_t g_do_affinity
= FALSE
;
93 static uint64_t g_starttime_abs
;
94 static uint32_t g_iteration_sleeptime_us
= 0;
96 /* Threshold for dropping a 'bad run' tracepoint */
97 static uint64_t g_traceworthy_latency_ns
= TRACEWORTHY_NANOS
;
99 /* Have we re-execed to set apptype? */
100 static boolean_t g_seen_apptype
= FALSE
;
102 /* usleep in betweeen iterations */
103 static boolean_t g_do_sleep
= TRUE
;
105 /* Every thread spins until all threads have checked in */
106 static boolean_t g_do_all_spin
= FALSE
;
108 /* One randomly chosen thread holds up the train for a certain duration. */
109 static boolean_t g_do_one_long_spin
= FALSE
;
110 static uint32_t g_one_long_spin_id
= 0;
111 static uint64_t g_one_long_spin_length_abs
= 0;
112 static uint64_t g_one_long_spin_length_ns
= 0;
114 /* Each thread spins for a certain duration after waking up before blocking again. */
115 static boolean_t g_do_each_spin
= FALSE
;
116 static uint64_t g_each_spin_duration_abs
= 0;
117 static uint64_t g_each_spin_duration_ns
= 0;
119 /* Global variables (broadcast) */
120 static semaphore_t g_broadcastsem
;
121 static semaphore_t g_leadersem
;
122 static semaphore_t g_readysem
;
123 static semaphore_t g_donesem
;
125 /* Global variables (chain) */
126 static semaphore_t
*g_semarr
;
129 abs_to_nanos(uint64_t abstime
)
131 return (uint64_t)(abstime
* (((double)g_mti
.numer
) / ((double)g_mti
.denom
)));
135 nanos_to_abs(uint64_t ns
)
137 return (uint64_t)(ns
* (((double)g_mti
.denom
) / ((double)g_mti
.numer
)));
141 * Figure out what thread policy to use
143 static my_policy_type_t
144 parse_thread_policy(const char *str
)
146 if (strcmp(str
, "timeshare") == 0) {
147 return MY_POLICY_TIMESHARE
;
148 } else if (strcmp(str
, "realtime") == 0) {
149 return MY_POLICY_REALTIME
;
150 } else if (strcmp(str
, "fixed") == 0) {
151 return MY_POLICY_FIXEDPRI
;
153 errx(EX_USAGE
, "Invalid thread policy \"%s\"", str
);
158 * Figure out what wakeup pattern to use
161 parse_wakeup_pattern(const char *str
)
163 if (strcmp(str
, "chain") == 0) {
165 } else if (strcmp(str
, "hop") == 0) {
167 } else if (strcmp(str
, "broadcast-single-sem") == 0) {
168 return WAKE_BROADCAST_ONESEM
;
169 } else if (strcmp(str
, "broadcast-per-thread") == 0) {
170 return WAKE_BROADCAST_PERTHREAD
;
172 errx(EX_USAGE
, "Invalid wakeup pattern \"%s\"", str
);
180 thread_setup(uint32_t my_id
)
184 thread_time_constraint_policy_data_t pol
;
187 case MY_POLICY_TIMESHARE
:
189 case MY_POLICY_REALTIME
:
190 /* Hard-coded realtime parameters (similar to what Digi uses) */
192 pol
.constraint
= (uint32_t) nanos_to_abs(CONSTRAINT_NANOS
);
193 pol
.computation
= (uint32_t) nanos_to_abs(COMPUTATION_NANOS
);
194 pol
.preemptible
= 0; /* Ignored by OS */
196 kr
= thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
197 (thread_policy_t
) &pol
, THREAD_TIME_CONSTRAINT_POLICY_COUNT
);
198 mach_assert_zero_t(my_id
, kr
);
200 case MY_POLICY_FIXEDPRI
:
201 ret
= pthread_set_fixedpriority_self();
202 if (ret
) errc(EX_OSERR
, ret
, "pthread_set_fixedpriority_self");
205 errx(EX_USAGE
, "invalid policy type %d", g_policy
);
209 thread_affinity_policy_data_t affinity
;
211 affinity
.affinity_tag
= my_id
% 2;
213 kr
= thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY
,
214 (thread_policy_t
)&affinity
, THREAD_AFFINITY_POLICY_COUNT
);
215 mach_assert_zero_t(my_id
, kr
);
222 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
223 * and notify the main thread when done.
226 worker_thread(void *arg
)
228 uint32_t my_id
= (uint32_t)(uintptr_t)arg
;
231 volatile double x
= 0.0;
232 volatile double y
= 0.0;
234 /* Set policy and so forth */
237 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
240 * Leader thread either wakes everyone up or starts the chain going.
243 /* Give the worker threads undisturbed time to finish before waiting on them */
245 usleep(g_iteration_sleeptime_us
);
247 debug_log("%d Leader thread wait for ready\n", i
);
250 * Wait for everyone else to declare ready
251 * Is there a better way to do this that won't interfere with the rest of the chain?
252 * TODO: Invent 'semaphore wait for N signals'
255 for (uint32_t j
= 0 ; j
< g_numthreads
- 1; j
++) {
256 kr
= semaphore_wait(g_readysem
);
257 mach_assert_zero_t(my_id
, kr
);
260 debug_log("%d Leader thread wait\n", i
);
262 /* Signal main thread and wait for start of iteration */
264 kr
= semaphore_wait_signal(g_leadersem
, g_main_sem
);
265 mach_assert_zero_t(my_id
, kr
);
267 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
269 debug_log("%d Leader thread go\n", i
);
271 assert_zero_t(my_id
, g_done_threads
);
273 switch (g_waketype
) {
274 case WAKE_BROADCAST_ONESEM
:
275 kr
= semaphore_signal_all(g_broadcastsem
);
276 mach_assert_zero_t(my_id
, kr
);
278 case WAKE_BROADCAST_PERTHREAD
:
279 for (uint32_t j
= 1; j
< g_numthreads
; j
++) {
280 kr
= semaphore_signal(g_semarr
[j
]);
281 mach_assert_zero_t(my_id
, kr
);
285 kr
= semaphore_signal(g_semarr
[my_id
+ 1]);
286 mach_assert_zero_t(my_id
, kr
);
289 kr
= semaphore_wait_signal(g_donesem
, g_semarr
[my_id
+ 1]);
290 mach_assert_zero_t(my_id
, kr
);
295 * Everyone else waits to be woken up,
296 * records when she wakes up, and possibly
300 case WAKE_BROADCAST_ONESEM
:
301 kr
= semaphore_wait_signal(g_broadcastsem
, g_readysem
);
302 mach_assert_zero_t(my_id
, kr
);
304 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
307 case WAKE_BROADCAST_PERTHREAD
:
308 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
309 mach_assert_zero_t(my_id
, kr
);
311 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
315 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
316 mach_assert_zero_t(my_id
, kr
);
318 /* Signal the next thread *after* recording wake time */
320 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
322 if (my_id
< (g_numthreads
- 1)) {
323 kr
= semaphore_signal(g_semarr
[my_id
+ 1]);
324 mach_assert_zero_t(my_id
, kr
);
330 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
331 mach_assert_zero_t(my_id
, kr
);
333 /* Signal the next thread *after* recording wake time */
335 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
337 if (my_id
< (g_numthreads
- 1)) {
338 kr
= semaphore_wait_signal(g_donesem
, g_semarr
[my_id
+ 1]);
339 mach_assert_zero_t(my_id
, kr
);
341 kr
= semaphore_signal_all(g_donesem
);
342 mach_assert_zero_t(my_id
, kr
);
349 debug_log("Thread %p woke up for iteration %d.\n", pthread_self(), i
);
351 if (g_do_one_long_spin
&& g_one_long_spin_id
== my_id
) {
352 /* One randomly chosen thread holds up the train for a while. */
354 uint64_t endspin
= g_starttime_abs
+ g_one_long_spin_length_abs
;
355 while (mach_absolute_time() < endspin
) {
361 if (g_do_each_spin
) {
362 /* Each thread spins for a certain duration after waking up before blocking again. */
364 uint64_t endspin
= mach_absolute_time() + g_each_spin_duration_abs
;
365 while (mach_absolute_time() < endspin
) {
371 int32_t new = OSAtomicIncrement32((volatile int32_t *)&g_done_threads
);
374 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), new, i
);
377 /* Everyone spins until the last thread checks in. */
379 while (g_done_threads
< g_numthreads
) {
385 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i
);
389 /* Give the worker threads undisturbed time to finish before waiting on them */
391 usleep(g_iteration_sleeptime_us
);
393 /* Wait for the worker threads to finish */
394 for (uint32_t i
= 0 ; i
< g_numthreads
- 1; i
++) {
395 kr
= semaphore_wait(g_readysem
);
396 mach_assert_zero_t(my_id
, kr
);
399 /* Tell everyone and the main thread that the last iteration is done */
400 debug_log("%d Leader thread done\n", i
);
402 kr
= semaphore_signal_all(g_main_sem
);
403 mach_assert_zero_t(my_id
, kr
);
405 /* Hold up thread teardown so it doesn't affect the last iteration */
406 kr
= semaphore_wait_signal(g_main_sem
, g_readysem
);
407 mach_assert_zero_t(my_id
, kr
);
414 * Given an array of uint64_t values, compute average, max, min, and standard deviation
417 compute_stats(uint64_t *values
, uint64_t count
, float *averagep
, uint64_t *maxp
, uint64_t *minp
, float *stddevp
)
422 uint64_t _min
= UINT64_MAX
;
426 for (i
= 0; i
< count
; i
++) {
428 _max
= values
[i
] > _max
? values
[i
] : _max
;
429 _min
= values
[i
] < _min
? values
[i
] : _min
;
432 _avg
= ((float)_sum
) / ((float)count
);
435 for (i
= 0; i
< count
; i
++) {
436 _dev
+= powf((((float)values
[i
]) - _avg
), 2);
449 main(int argc
, char **argv
)
455 uint64_t *worst_latencies_ns
;
456 uint64_t *worst_latencies_from_first_ns
;
460 for (int i
= 0; i
< argc
; i
++)
461 if (strcmp(argv
[i
], "--switched_apptype") == 0)
462 g_seen_apptype
= TRUE
;
465 selfexec_with_apptype(argc
, argv
);
467 parse_args(argc
, argv
);
469 srand((unsigned int)time(NULL
));
471 mach_timebase_info(&g_mti
);
473 size_t ncpu_size
= sizeof(g_numcpus
);
474 ret
= sysctlbyname("hw.ncpu", &g_numcpus
, &ncpu_size
, NULL
, 0);
475 if (ret
) err(EX_OSERR
, "Failed sysctlbyname(hw.ncpu)");
478 g_each_spin_duration_abs
= nanos_to_abs(g_each_spin_duration_ns
);
480 /* Configure the long-spin thread to take up half of its computation */
481 if (g_do_one_long_spin
) {
482 g_one_long_spin_length_ns
= COMPUTATION_NANOS
/ 2;
483 g_one_long_spin_length_abs
= nanos_to_abs(g_one_long_spin_length_ns
);
486 /* Estimate the amount of time the cleanup phase needs to back off */
487 g_iteration_sleeptime_us
= g_numthreads
* 20;
489 uint32_t threads_per_core
= (g_numthreads
/ g_numcpus
) + 1;
491 g_iteration_sleeptime_us
+= threads_per_core
* (g_each_spin_duration_ns
/ NSEC_PER_USEC
);
492 if (g_do_one_long_spin
)
493 g_iteration_sleeptime_us
+= g_one_long_spin_length_ns
/ NSEC_PER_USEC
;
495 /* Arrays for threads and their wakeup times */
496 threads
= (pthread_t
*) valloc(sizeof(pthread_t
) * g_numthreads
);
499 size_t endtimes_size
= sizeof(uint64_t) * g_numthreads
;
501 g_thread_endtimes_abs
= (uint64_t*) valloc(endtimes_size
);
502 assert(g_thread_endtimes_abs
);
504 /* Ensure the allocation is pre-faulted */
505 ret
= memset_s(g_thread_endtimes_abs
, endtimes_size
, 0, endtimes_size
);
506 if (ret
) errc(EX_OSERR
, ret
, "memset_s endtimes");
508 size_t latencies_size
= sizeof(uint64_t) * g_iterations
;
510 worst_latencies_ns
= (uint64_t*) valloc(latencies_size
);
511 assert(worst_latencies_ns
);
513 /* Ensure the allocation is pre-faulted */
514 ret
= memset_s(worst_latencies_ns
, latencies_size
, 0, latencies_size
);
515 if (ret
) errc(EX_OSERR
, ret
, "memset_s latencies");
517 worst_latencies_from_first_ns
= (uint64_t*) valloc(latencies_size
);
518 assert(worst_latencies_from_first_ns
);
520 /* Ensure the allocation is pre-faulted */
521 ret
= memset_s(worst_latencies_from_first_ns
, latencies_size
, 0, latencies_size
);
522 if (ret
) errc(EX_OSERR
, ret
, "memset_s latencies_from_first");
524 kr
= semaphore_create(mach_task_self(), &g_main_sem
, SYNC_POLICY_FIFO
, 0);
525 mach_assert_zero(kr
);
527 /* Either one big semaphore or one per thread */
528 if (g_waketype
== WAKE_CHAIN
||
529 g_waketype
== WAKE_BROADCAST_PERTHREAD
||
530 g_waketype
== WAKE_HOP
) {
532 g_semarr
= valloc(sizeof(semaphore_t
) * g_numthreads
);
535 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
536 kr
= semaphore_create(mach_task_self(), &g_semarr
[i
], SYNC_POLICY_FIFO
, 0);
537 mach_assert_zero(kr
);
540 g_leadersem
= g_semarr
[0];
542 kr
= semaphore_create(mach_task_self(), &g_broadcastsem
, SYNC_POLICY_FIFO
, 0);
543 mach_assert_zero(kr
);
544 kr
= semaphore_create(mach_task_self(), &g_leadersem
, SYNC_POLICY_FIFO
, 0);
545 mach_assert_zero(kr
);
548 if (g_waketype
== WAKE_HOP
) {
549 kr
= semaphore_create(mach_task_self(), &g_donesem
, SYNC_POLICY_FIFO
, 0);
550 mach_assert_zero(kr
);
553 kr
= semaphore_create(mach_task_self(), &g_readysem
, SYNC_POLICY_FIFO
, 0);
554 mach_assert_zero(kr
);
556 /* Create the threads */
558 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
559 ret
= pthread_create(&threads
[i
], NULL
, worker_thread
, (void*)(uintptr_t)i
);
560 if (ret
) errc(EX_OSERR
, ret
, "pthread_create %d", i
);
563 ret
= setpriority(PRIO_DARWIN_ROLE
, 0, PRIO_DARWIN_ROLE_UI_FOCAL
);
564 if (ret
) errc(EX_OSERR
, ret
, "setpriority");
568 /* Let everyone get settled */
569 kr
= semaphore_wait(g_main_sem
);
570 mach_assert_zero(kr
);
572 /* Give the system a bit more time to settle */
574 usleep(g_iteration_sleeptime_us
);
577 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
579 uint64_t worst_abs
= 0, best_abs
= UINT64_MAX
;
581 if (g_do_one_long_spin
)
582 g_one_long_spin_id
= (uint32_t)rand() % g_numthreads
;
584 debug_log("%d Main thread reset\n", i
);
589 g_starttime_abs
= mach_absolute_time();
591 /* Fire them off and wait for worker threads to finish */
592 kr
= semaphore_wait_signal(g_main_sem
, g_leadersem
);
593 mach_assert_zero(kr
);
595 debug_log("%d Main thread return\n", i
);
598 * We report the worst latencies relative to start time
599 * and relative to the lead worker thread.
601 for (j
= 0; j
< g_numthreads
; j
++) {
602 uint64_t latency_abs
;
604 latency_abs
= g_thread_endtimes_abs
[j
] - g_starttime_abs
;
605 worst_abs
= worst_abs
< latency_abs
? latency_abs
: worst_abs
;
608 worst_latencies_ns
[i
] = abs_to_nanos(worst_abs
);
611 for (j
= 1; j
< g_numthreads
; j
++) {
612 uint64_t latency_abs
;
614 latency_abs
= g_thread_endtimes_abs
[j
] - g_thread_endtimes_abs
[0];
615 worst_abs
= worst_abs
< latency_abs
? latency_abs
: worst_abs
;
616 best_abs
= best_abs
> latency_abs
? latency_abs
: best_abs
;
619 worst_latencies_from_first_ns
[i
] = abs_to_nanos(worst_abs
);
622 * In the event of a bad run, cut a trace point.
624 if (worst_latencies_from_first_ns
[i
] > g_traceworthy_latency_ns
) {
625 /* Ariadne's ad-hoc test signpost */
626 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns
[i
], g_traceworthy_latency_ns
, 0, 0);
629 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns
[i
]) / 1000.0);
632 /* Give the system a bit more time to settle */
634 usleep(g_iteration_sleeptime_us
);
638 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
639 ret
= pthread_join(threads
[i
], NULL
);
640 if (ret
) errc(EX_OSERR
, ret
, "pthread_join %d", i
);
643 compute_stats(worst_latencies_ns
, g_iterations
, &avg
, &max
, &min
, &stddev
);
644 printf("Results (from a stop):\n");
645 printf("Max:\t\t%.2f us\n", ((float)max
) / 1000.0);
646 printf("Min:\t\t%.2f us\n", ((float)min
) / 1000.0);
647 printf("Avg:\t\t%.2f us\n", avg
/ 1000.0);
648 printf("Stddev:\t\t%.2f us\n", stddev
/ 1000.0);
652 compute_stats(worst_latencies_from_first_ns
, g_iterations
, &avg
, &max
, &min
, &stddev
);
653 printf("Results (relative to first thread):\n");
654 printf("Max:\t\t%.2f us\n", ((float)max
) / 1000.0);
655 printf("Min:\t\t%.2f us\n", ((float)min
) / 1000.0);
656 printf("Avg:\t\t%.2f us\n", avg
/ 1000.0);
657 printf("Stddev:\t\t%.2f us\n", stddev
/ 1000.0);
660 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
661 printf("Iteration %d: %f us\n", i
, worst_latencies_ns
[i
] / 1000.0);
666 free(g_thread_endtimes_abs
);
667 free(worst_latencies_ns
);
668 free(worst_latencies_from_first_ns
);
674 * WARNING: This is SPI specifically intended for use by launchd to start UI
675 * apps. We use it here for a test tool only to opt into QoS using the same
676 * policies. Do not use this outside xnu or libxpc/launchd.
679 selfexec_with_apptype(int argc
, char *argv
[])
682 posix_spawnattr_t attr
;
683 extern char **environ
;
684 char *new_argv
[argc
+ 1 + 1 /* NULL */];
687 uint32_t prog_size
= PATH_MAX
;
689 ret
= _NSGetExecutablePath(prog
, &prog_size
);
690 if (ret
) err(EX_OSERR
, "_NSGetExecutablePath");
692 for (i
=0; i
< argc
; i
++) {
693 new_argv
[i
] = argv
[i
];
696 new_argv
[i
] = "--switched_apptype";
697 new_argv
[i
+1] = NULL
;
699 ret
= posix_spawnattr_init(&attr
);
700 if (ret
) errc(EX_OSERR
, ret
, "posix_spawnattr_init");
702 ret
= posix_spawnattr_setflags(&attr
, POSIX_SPAWN_SETEXEC
);
703 if (ret
) errc(EX_OSERR
, ret
, "posix_spawnattr_setflags");
705 ret
= posix_spawnattr_setprocesstype_np(&attr
, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
);
706 if (ret
) errc(EX_OSERR
, ret
, "posix_spawnattr_setprocesstype_np");
708 ret
= posix_spawn(NULL
, prog
, NULL
, &attr
, new_argv
, environ
);
709 if (ret
) errc(EX_OSERR
, ret
, "posix_spawn");
713 * Admittedly not very attractive.
715 static void __attribute__((noreturn
))
718 errx(EX_USAGE
, "Usage: zn <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
719 "<realtime | timeshare | fixed> <iterations> [--trace <traceworthy latency in ns>] "
720 "[--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity] [--no-sleep] [--verbose]");
724 parse_args(int argc
, char *argv
[])
726 int ch
, option_index
= 0;
729 static struct option longopts
[] = {
730 { "spin-time", required_argument
, NULL
, 2 },
731 { "trace", required_argument
, NULL
, 3 },
732 { "switched_apptype", no_argument
, (int*)&g_seen_apptype
, TRUE
},
733 { "spin-one", no_argument
, (int*)&g_do_one_long_spin
, TRUE
},
734 { "spin-all", no_argument
, (int*)&g_do_all_spin
, TRUE
},
735 { "affinity", no_argument
, (int*)&g_do_affinity
, TRUE
},
736 { "no-sleep", no_argument
, (int*)&g_do_sleep
, FALSE
},
737 { "verbose", no_argument
, (int*)&g_verbose
, TRUE
},
738 { "help", no_argument
, NULL
, 'h' },
742 while ((ch
= getopt_long(argc
, argv
, "h", longopts
, &option_index
)) != -1) {
745 /* getopt_long set a variable */
749 g_do_each_spin
= TRUE
;
750 g_each_spin_duration_ns
= strtoull(optarg
, &cp
, 10);
752 if (cp
== optarg
|| *cp
)
753 errx(EX_USAGE
, "arg --%s requires a decimal number, found \"%s\"",
754 longopts
[option_index
].name
, optarg
);
758 g_traceworthy_latency_ns
= strtoull(optarg
, &cp
, 10);
760 if (cp
== optarg
|| *cp
)
761 errx(EX_USAGE
, "arg --%s requires a decimal number, found \"%s\"",
762 longopts
[option_index
].name
, optarg
);
773 * getopt_long reorders all the options to the beginning of the argv array.
774 * Jump past them to the non-option arguments.
781 warnx("Too many non-option arguments passed");
786 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
790 /* How many threads? */
791 g_numthreads
= (uint32_t)strtoull(argv
[0], &cp
, 10);
793 if (cp
== argv
[0] || *cp
)
794 errx(EX_USAGE
, "numthreads requires a decimal number, found \"%s\"", argv
[0]);
796 if (g_numthreads
< 1)
797 errx(EX_USAGE
, "Must use at least one thread");
799 /* What wakeup pattern? */
800 g_waketype
= parse_wakeup_pattern(argv
[1]);
803 g_policy
= parse_thread_policy(argv
[2]);
806 g_iterations
= (uint32_t)strtoull(argv
[3], &cp
, 10);
808 if (cp
== argv
[3] || *cp
)
809 errx(EX_USAGE
, "numthreads requires a decimal number, found \"%s\"", argv
[3]);
811 if (g_iterations
< 1)
812 errx(EX_USAGE
, "Must have at least one iteration");
814 if (g_numthreads
== 1 && g_waketype
== WAKE_CHAIN
)
815 errx(EX_USAGE
, "chain mode requires more than one thread");
817 if (g_numthreads
== 1 && g_waketype
== WAKE_HOP
)
818 errx(EX_USAGE
, "hop mode requires more than one thread");