2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/kdebug.h>
39 #include <sys/sysctl.h>
44 #include <spawn_private.h>
45 #include <sys/spawn_internal.h>
46 #include <mach-o/dyld.h>
48 #include <mach/mach_time.h>
49 #include <mach/mach.h>
50 #include <mach/task.h>
51 #include <mach/semaphore.h>
53 #include <pthread/qos_private.h>
55 #include <sys/resource.h>
57 #include <stdatomic.h>
61 #include <TargetConditionals.h>
63 typedef enum wake_type
{ WAKE_BROADCAST_ONESEM
, WAKE_BROADCAST_PERTHREAD
, WAKE_CHAIN
, WAKE_HOP
} wake_type_t
;
64 typedef enum my_policy_type
{ MY_POLICY_REALTIME
, MY_POLICY_TIMESHARE
, MY_POLICY_TIMESHARE_NO_SMT
, MY_POLICY_FIXEDPRI
} my_policy_type_t
;
66 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
67 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
68 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
70 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
71 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
72 #define LL_CONSTRAINT_NANOS ( 2000000ll) /* 2 ms */
73 #define LL_COMPUTATION_NANOS ( 1000000ll) /* 1 ms */
74 #define RT_CHURN_COMP_NANOS ( 1000000ll) /* 1 ms */
75 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
76 #define TRACEWORTHY_NANOS_TEST ( 2000000ll) /* 2 ms */
79 #define debug_log(args ...) printf(args)
81 #define debug_log(args ...) do { } while(0)
85 static void* worker_thread(void *arg
);
87 static int thread_setup(uint32_t my_id
);
88 static my_policy_type_t
parse_thread_policy(const char *str
);
89 static void selfexec_with_apptype(int argc
, char *argv
[]);
90 static void parse_args(int argc
, char *argv
[]);
92 static __attribute__((aligned(128))) _Atomic
uint32_t g_done_threads
;
93 static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop
= FALSE
;
94 static __attribute__((aligned(128))) _Atomic
uint64_t g_churn_stopped_at
= 0;
96 /* Global variables (general) */
97 static uint32_t g_numcpus
;
98 static uint32_t g_nphysicalcpu
;
99 static uint32_t g_nlogicalcpu
;
100 static uint32_t g_numthreads
;
101 static wake_type_t g_waketype
;
102 static policy_t g_policy
;
103 static uint32_t g_iterations
;
104 static struct mach_timebase_info g_mti
;
105 static semaphore_t g_main_sem
;
106 static uint64_t *g_thread_endtimes_abs
;
107 static boolean_t g_verbose
= FALSE
;
108 static boolean_t g_do_affinity
= FALSE
;
109 static uint64_t g_starttime_abs
;
110 static uint32_t g_iteration_sleeptime_us
= 0;
111 static uint32_t g_priority
= 0;
112 static uint32_t g_churn_pri
= 0;
113 static uint32_t g_churn_count
= 0;
114 static uint32_t g_rt_churn_count
= 0;
116 static pthread_t
* g_churn_threads
= NULL
;
117 static pthread_t
* g_rt_churn_threads
= NULL
;
119 /* should we skip test if run on non-intel */
120 static boolean_t g_run_on_intel_only
= FALSE
;
122 /* Threshold for dropping a 'bad run' tracepoint */
123 static uint64_t g_traceworthy_latency_ns
= TRACEWORTHY_NANOS
;
125 /* Have we re-execed to set apptype? */
126 static boolean_t g_seen_apptype
= FALSE
;
128 /* usleep in betweeen iterations */
129 static boolean_t g_do_sleep
= TRUE
;
131 /* Every thread spins until all threads have checked in */
132 static boolean_t g_do_all_spin
= FALSE
;
134 /* Every thread backgrounds temporarily before parking */
135 static boolean_t g_drop_priority
= FALSE
;
137 /* Use low-latency (sub 4ms deadline) realtime threads */
138 static boolean_t g_rt_ll
= FALSE
;
140 /* Test whether realtime threads are scheduled on the separate CPUs */
141 static boolean_t g_test_rt
= FALSE
;
143 static boolean_t g_rt_churn
= FALSE
;
145 /* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */
146 static boolean_t g_test_rt_smt
= FALSE
;
148 /* Test whether realtime threads are successfully avoiding CPU 0 on Intel */
149 static boolean_t g_test_rt_avoid0
= FALSE
;
151 /* Print a histgram showing how many threads ran on each CPU */
152 static boolean_t g_histogram
= FALSE
;
154 /* One randomly chosen thread holds up the train for a certain duration. */
155 static boolean_t g_do_one_long_spin
= FALSE
;
156 static uint32_t g_one_long_spin_id
= 0;
157 static uint64_t g_one_long_spin_length_abs
= 0;
158 static uint64_t g_one_long_spin_length_ns
= 0;
160 /* Each thread spins for a certain duration after waking up before blocking again. */
161 static boolean_t g_do_each_spin
= FALSE
;
162 static uint64_t g_each_spin_duration_abs
= 0;
163 static uint64_t g_each_spin_duration_ns
= 0;
165 /* Global variables (broadcast) */
166 static semaphore_t g_broadcastsem
;
167 static semaphore_t g_leadersem
;
168 static semaphore_t g_readysem
;
169 static semaphore_t g_donesem
;
170 static semaphore_t g_rt_churn_sem
;
171 static semaphore_t g_rt_churn_start_sem
;
173 /* Global variables (chain) */
174 static semaphore_t
*g_semarr
;
177 __attribute__((aligned(128))) uint32_t current
;
181 static histogram_t
*g_cpu_histogram
;
182 static _Atomic
uint64_t *g_cpu_map
;
185 abs_to_nanos(uint64_t abstime
)
187 return (uint64_t)(abstime
* (((double)g_mti
.numer
) / ((double)g_mti
.denom
)));
191 nanos_to_abs(uint64_t ns
)
193 return (uint64_t)(ns
* (((double)g_mti
.denom
) / ((double)g_mti
.numer
)));
199 #if defined(__arm__) || defined(__arm64__)
200 asm volatile ("yield");
201 #elif defined(__x86_64__) || defined(__i386__)
202 asm volatile ("pause");
204 #error Unrecognized architecture
209 churn_thread(__unused
void *arg
)
211 uint64_t spin_count
= 0;
214 * As a safety measure to avoid wedging, we will bail on the spin if
215 * it's been more than 1s after the most recent run start
218 while (g_churn_stop
== FALSE
&&
219 mach_absolute_time() < (g_starttime_abs
+ NSEC_PER_SEC
)) {
224 /* This is totally racy, but only here to detect if anyone stops early */
225 atomic_fetch_add_explicit(&g_churn_stopped_at
, spin_count
, memory_order_relaxed
);
231 create_churn_threads()
233 if (g_churn_count
== 0) {
234 g_churn_count
= g_test_rt_smt
? g_numcpus
: g_numcpus
- 1;
239 struct sched_param param
= { .sched_priority
= (int)g_churn_pri
};
242 /* Array for churn threads */
243 g_churn_threads
= (pthread_t
*) valloc(sizeof(pthread_t
) * g_churn_count
);
244 assert(g_churn_threads
);
246 if ((err
= pthread_attr_init(&attr
))) {
247 errc(EX_OSERR
, err
, "pthread_attr_init");
250 if ((err
= pthread_attr_setschedparam(&attr
, ¶m
))) {
251 errc(EX_OSERR
, err
, "pthread_attr_setschedparam");
254 if ((err
= pthread_attr_setschedpolicy(&attr
, SCHED_RR
))) {
255 errc(EX_OSERR
, err
, "pthread_attr_setschedpolicy");
258 for (uint32_t i
= 0; i
< g_churn_count
; i
++) {
259 pthread_t new_thread
;
261 if ((err
= pthread_create(&new_thread
, &attr
, churn_thread
, NULL
))) {
262 errc(EX_OSERR
, err
, "pthread_create");
264 g_churn_threads
[i
] = new_thread
;
267 if ((err
= pthread_attr_destroy(&attr
))) {
268 errc(EX_OSERR
, err
, "pthread_attr_destroy");
273 join_churn_threads(void)
275 if (atomic_load_explicit(&g_churn_stopped_at
, memory_order_seq_cst
) != 0) {
276 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
280 atomic_store_explicit(&g_churn_stop
, TRUE
, memory_order_seq_cst
);
282 /* Rejoin churn threads */
283 for (uint32_t i
= 0; i
< g_churn_count
; i
++) {
284 errno_t err
= pthread_join(g_churn_threads
[i
], NULL
);
286 errc(EX_OSERR
, err
, "pthread_join %d", i
);
295 rt_churn_thread_setup(void)
298 thread_time_constraint_policy_data_t pol
;
300 /* Hard-coded realtime parameters (similar to what Digi uses) */
302 pol
.constraint
= (uint32_t) nanos_to_abs(CONSTRAINT_NANOS
* 2);
303 pol
.computation
= (uint32_t) nanos_to_abs(RT_CHURN_COMP_NANOS
* 2);
304 pol
.preemptible
= 0; /* Ignored by OS */
306 kr
= thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
307 (thread_policy_t
) &pol
, THREAD_TIME_CONSTRAINT_POLICY_COUNT
);
308 mach_assert_zero_t(0, kr
);
314 rt_churn_thread(__unused
void *arg
)
316 rt_churn_thread_setup();
318 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
319 kern_return_t kr
= semaphore_wait_signal(g_rt_churn_start_sem
, g_rt_churn_sem
);
320 mach_assert_zero_t(0, kr
);
322 volatile double x
= 0.0;
323 volatile double y
= 0.0;
325 uint64_t endspin
= mach_absolute_time() + nanos_to_abs(RT_CHURN_COMP_NANOS
);
326 while (mach_absolute_time() < endspin
) {
332 kern_return_t kr
= semaphore_signal(g_rt_churn_sem
);
333 mach_assert_zero_t(0, kr
);
339 wait_for_rt_churn_threads(void)
341 for (uint32_t i
= 0; i
< g_rt_churn_count
; i
++) {
342 kern_return_t kr
= semaphore_wait(g_rt_churn_sem
);
343 mach_assert_zero_t(0, kr
);
348 start_rt_churn_threads(void)
350 for (uint32_t i
= 0; i
< g_rt_churn_count
; i
++) {
351 kern_return_t kr
= semaphore_signal(g_rt_churn_start_sem
);
352 mach_assert_zero_t(0, kr
);
357 create_rt_churn_threads(void)
359 if (g_rt_churn_count
== 0) {
360 /* Leave 1 CPU to ensure that the main thread can make progress */
361 g_rt_churn_count
= g_numcpus
- 1;
366 struct sched_param param
= { .sched_priority
= (int)g_churn_pri
};
369 /* Array for churn threads */
370 g_rt_churn_threads
= (pthread_t
*) valloc(sizeof(pthread_t
) * g_rt_churn_count
);
371 assert(g_rt_churn_threads
);
373 if ((err
= pthread_attr_init(&attr
))) {
374 errc(EX_OSERR
, err
, "pthread_attr_init");
377 if ((err
= pthread_attr_setschedparam(&attr
, ¶m
))) {
378 errc(EX_OSERR
, err
, "pthread_attr_setschedparam");
381 if ((err
= pthread_attr_setschedpolicy(&attr
, SCHED_RR
))) {
382 errc(EX_OSERR
, err
, "pthread_attr_setschedpolicy");
385 for (uint32_t i
= 0; i
< g_rt_churn_count
; i
++) {
386 pthread_t new_thread
;
388 if ((err
= pthread_create(&new_thread
, &attr
, rt_churn_thread
, NULL
))) {
389 errc(EX_OSERR
, err
, "pthread_create");
391 g_rt_churn_threads
[i
] = new_thread
;
394 if ((err
= pthread_attr_destroy(&attr
))) {
395 errc(EX_OSERR
, err
, "pthread_attr_destroy");
398 /* Wait until all threads have checked in */
399 wait_for_rt_churn_threads();
403 join_rt_churn_threads(void)
405 /* Rejoin rt churn threads */
406 for (uint32_t i
= 0; i
< g_rt_churn_count
; i
++) {
407 errno_t err
= pthread_join(g_rt_churn_threads
[i
], NULL
);
409 errc(EX_OSERR
, err
, "pthread_join %d", i
);
415 * Figure out what thread policy to use
417 static my_policy_type_t
418 parse_thread_policy(const char *str
)
420 if (strcmp(str
, "timeshare") == 0) {
421 return MY_POLICY_TIMESHARE
;
422 } else if (strcmp(str
, "timeshare_no_smt") == 0) {
423 return MY_POLICY_TIMESHARE_NO_SMT
;
424 } else if (strcmp(str
, "realtime") == 0) {
425 return MY_POLICY_REALTIME
;
426 } else if (strcmp(str
, "fixed") == 0) {
427 return MY_POLICY_FIXEDPRI
;
429 errx(EX_USAGE
, "Invalid thread policy \"%s\"", str
);
434 * Figure out what wakeup pattern to use
437 parse_wakeup_pattern(const char *str
)
439 if (strcmp(str
, "chain") == 0) {
441 } else if (strcmp(str
, "hop") == 0) {
443 } else if (strcmp(str
, "broadcast-single-sem") == 0) {
444 return WAKE_BROADCAST_ONESEM
;
445 } else if (strcmp(str
, "broadcast-per-thread") == 0) {
446 return WAKE_BROADCAST_PERTHREAD
;
448 errx(EX_USAGE
, "Invalid wakeup pattern \"%s\"", str
);
456 thread_setup(uint32_t my_id
)
460 thread_time_constraint_policy_data_t pol
;
463 int policy
= SCHED_OTHER
;
464 if (g_policy
== MY_POLICY_FIXEDPRI
) {
468 struct sched_param param
= {.sched_priority
= (int)g_priority
};
469 if ((ret
= pthread_setschedparam(pthread_self(), policy
, ¶m
))) {
470 errc(EX_OSERR
, ret
, "pthread_setschedparam: %d", my_id
);
475 case MY_POLICY_TIMESHARE
:
477 case MY_POLICY_TIMESHARE_NO_SMT
:
478 proc_setthread_no_smt();
480 case MY_POLICY_REALTIME
:
481 /* Hard-coded realtime parameters (similar to what Digi uses) */
484 pol
.constraint
= (uint32_t) nanos_to_abs(LL_CONSTRAINT_NANOS
);
485 pol
.computation
= (uint32_t) nanos_to_abs(LL_COMPUTATION_NANOS
);
487 pol
.constraint
= (uint32_t) nanos_to_abs(CONSTRAINT_NANOS
);
488 pol
.computation
= (uint32_t) nanos_to_abs(COMPUTATION_NANOS
);
490 pol
.preemptible
= 0; /* Ignored by OS */
492 kr
= thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
493 (thread_policy_t
) &pol
, THREAD_TIME_CONSTRAINT_POLICY_COUNT
);
494 mach_assert_zero_t(my_id
, kr
);
496 case MY_POLICY_FIXEDPRI
:
497 ret
= pthread_set_fixedpriority_self();
499 errc(EX_OSERR
, ret
, "pthread_set_fixedpriority_self");
503 errx(EX_USAGE
, "invalid policy type %d", g_policy
);
507 thread_affinity_policy_data_t affinity
;
509 affinity
.affinity_tag
= my_id
% 2;
511 kr
= thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY
,
512 (thread_policy_t
)&affinity
, THREAD_AFFINITY_POLICY_COUNT
);
513 mach_assert_zero_t(my_id
, kr
);
520 get_thread_runtime(void)
522 thread_basic_info_data_t info
;
523 mach_msg_type_number_t info_count
= THREAD_BASIC_INFO_COUNT
;
524 thread_info(pthread_mach_thread_np(pthread_self()), THREAD_BASIC_INFO
, (thread_info_t
)&info
, &info_count
);
526 time_value_add(&info
.user_time
, &info
.system_time
);
528 return info
.user_time
;
531 time_value_t worker_threads_total_runtime
= {};
534 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
535 * and notify the main thread when done.
538 worker_thread(void *arg
)
540 static os_unfair_lock runtime_lock
= OS_UNFAIR_LOCK_INIT
;
542 uint32_t my_id
= (uint32_t)(uintptr_t)arg
;
545 volatile double x
= 0.0;
546 volatile double y
= 0.0;
548 /* Set policy and so forth */
551 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
554 * Leader thread either wakes everyone up or starts the chain going.
557 /* Give the worker threads undisturbed time to finish before waiting on them */
559 usleep(g_iteration_sleeptime_us
);
562 debug_log("%d Leader thread wait for ready\n", i
);
565 * Wait for everyone else to declare ready
566 * Is there a better way to do this that won't interfere with the rest of the chain?
567 * TODO: Invent 'semaphore wait for N signals'
570 for (uint32_t j
= 0; j
< g_numthreads
- 1; j
++) {
571 kr
= semaphore_wait(g_readysem
);
572 mach_assert_zero_t(my_id
, kr
);
575 debug_log("%d Leader thread wait\n", i
);
578 for (int cpuid
= 0; cpuid
< g_numcpus
; cpuid
++) {
579 if (g_cpu_histogram
[cpuid
].current
== 1) {
580 atomic_fetch_or_explicit(&g_cpu_map
[i
- 1], (1UL << cpuid
), memory_order_relaxed
);
581 g_cpu_histogram
[cpuid
].current
= 0;
586 /* Signal main thread and wait for start of iteration */
588 kr
= semaphore_wait_signal(g_leadersem
, g_main_sem
);
589 mach_assert_zero_t(my_id
, kr
);
591 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
593 debug_log("%d Leader thread go\n", i
);
595 assert_zero_t(my_id
, atomic_load_explicit(&g_done_threads
, memory_order_relaxed
));
597 switch (g_waketype
) {
598 case WAKE_BROADCAST_ONESEM
:
599 kr
= semaphore_signal_all(g_broadcastsem
);
600 mach_assert_zero_t(my_id
, kr
);
602 case WAKE_BROADCAST_PERTHREAD
:
603 for (uint32_t j
= 1; j
< g_numthreads
; j
++) {
604 kr
= semaphore_signal(g_semarr
[j
]);
605 mach_assert_zero_t(my_id
, kr
);
609 kr
= semaphore_signal(g_semarr
[my_id
+ 1]);
610 mach_assert_zero_t(my_id
, kr
);
613 kr
= semaphore_wait_signal(g_donesem
, g_semarr
[my_id
+ 1]);
614 mach_assert_zero_t(my_id
, kr
);
619 * Everyone else waits to be woken up,
620 * records when she wakes up, and possibly
623 switch (g_waketype
) {
624 case WAKE_BROADCAST_ONESEM
:
625 kr
= semaphore_wait_signal(g_broadcastsem
, g_readysem
);
626 mach_assert_zero_t(my_id
, kr
);
628 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
631 case WAKE_BROADCAST_PERTHREAD
:
632 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
633 mach_assert_zero_t(my_id
, kr
);
635 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
639 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
640 mach_assert_zero_t(my_id
, kr
);
642 /* Signal the next thread *after* recording wake time */
644 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
646 if (my_id
< (g_numthreads
- 1)) {
647 kr
= semaphore_signal(g_semarr
[my_id
+ 1]);
648 mach_assert_zero_t(my_id
, kr
);
654 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
655 mach_assert_zero_t(my_id
, kr
);
657 /* Signal the next thread *after* recording wake time */
659 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
661 if (my_id
< (g_numthreads
- 1)) {
662 kr
= semaphore_wait_signal(g_donesem
, g_semarr
[my_id
+ 1]);
663 mach_assert_zero_t(my_id
, kr
);
665 kr
= semaphore_signal_all(g_donesem
);
666 mach_assert_zero_t(my_id
, kr
);
673 unsigned int cpuid
= _os_cpu_number();
674 assert(cpuid
< g_numcpus
);
675 debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid
, i
);
676 g_cpu_histogram
[cpuid
].current
= 1;
677 g_cpu_histogram
[cpuid
].accum
++;
679 if (g_do_one_long_spin
&& g_one_long_spin_id
== my_id
) {
680 /* One randomly chosen thread holds up the train for a while. */
682 uint64_t endspin
= g_starttime_abs
+ g_one_long_spin_length_abs
;
683 while (mach_absolute_time() < endspin
) {
689 if (g_do_each_spin
) {
690 /* Each thread spins for a certain duration after waking up before blocking again. */
692 uint64_t endspin
= mach_absolute_time() + g_each_spin_duration_abs
;
693 while (mach_absolute_time() < endspin
) {
699 uint32_t done_threads
;
700 done_threads
= atomic_fetch_add_explicit(&g_done_threads
, 1, memory_order_relaxed
) + 1;
702 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads
, i
);
704 if (g_drop_priority
) {
705 /* Drop priority to BG momentarily */
706 errno_t ret
= setpriority(PRIO_DARWIN_THREAD
, 0, PRIO_DARWIN_BG
);
708 errc(EX_OSERR
, ret
, "setpriority PRIO_DARWIN_BG");
713 /* Everyone spins until the last thread checks in. */
715 while (atomic_load_explicit(&g_done_threads
, memory_order_relaxed
) < g_numthreads
) {
721 if (g_drop_priority
) {
722 /* Restore normal priority */
723 errno_t ret
= setpriority(PRIO_DARWIN_THREAD
, 0, 0);
725 errc(EX_OSERR
, ret
, "setpriority 0");
729 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i
);
733 /* Give the worker threads undisturbed time to finish before waiting on them */
735 usleep(g_iteration_sleeptime_us
);
738 /* Wait for the worker threads to finish */
739 for (uint32_t i
= 0; i
< g_numthreads
- 1; i
++) {
740 kr
= semaphore_wait(g_readysem
);
741 mach_assert_zero_t(my_id
, kr
);
744 /* Tell everyone and the main thread that the last iteration is done */
745 debug_log("%d Leader thread done\n", g_iterations
- 1);
747 for (int cpuid
= 0; cpuid
< g_numcpus
; cpuid
++) {
748 if (g_cpu_histogram
[cpuid
].current
== 1) {
749 atomic_fetch_or_explicit(&g_cpu_map
[g_iterations
- 1], (1UL << cpuid
), memory_order_relaxed
);
750 g_cpu_histogram
[cpuid
].current
= 0;
754 kr
= semaphore_signal_all(g_main_sem
);
755 mach_assert_zero_t(my_id
, kr
);
757 /* Hold up thread teardown so it doesn't affect the last iteration */
758 kr
= semaphore_wait_signal(g_main_sem
, g_readysem
);
759 mach_assert_zero_t(my_id
, kr
);
762 time_value_t runtime
= get_thread_runtime();
763 os_unfair_lock_lock(&runtime_lock
);
764 time_value_add(&worker_threads_total_runtime
, &runtime
);
765 os_unfair_lock_unlock(&runtime_lock
);
771 * Given an array of uint64_t values, compute average, max, min, and standard deviation
774 compute_stats(uint64_t *values
, uint64_t count
, float *averagep
, uint64_t *maxp
, uint64_t *minp
, float *stddevp
)
779 uint64_t _min
= UINT64_MAX
;
783 for (i
= 0; i
< count
; i
++) {
785 _max
= values
[i
] > _max
? values
[i
] : _max
;
786 _min
= values
[i
] < _min
? values
[i
] : _min
;
789 _avg
= ((float)_sum
) / ((float)count
);
792 for (i
= 0; i
< count
; i
++) {
793 _dev
+= powf((((float)values
[i
]) - _avg
), 2);
812 record_cpu_time(cpu_time_t
*cpu_time
)
814 host_cpu_load_info_data_t load
;
815 mach_msg_type_number_t count
= HOST_CPU_LOAD_INFO_COUNT
;
816 kern_return_t kr
= host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO
, (int *)&load
, &count
);
817 mach_assert_zero_t(0, kr
);
819 natural_t total_system_time
= load
.cpu_ticks
[CPU_STATE_SYSTEM
];
820 natural_t total_user_time
= load
.cpu_ticks
[CPU_STATE_USER
] + load
.cpu_ticks
[CPU_STATE_NICE
];
821 natural_t total_idle_time
= load
.cpu_ticks
[CPU_STATE_IDLE
];
823 cpu_time
->sys
= total_system_time
;
824 cpu_time
->user
= total_user_time
;
825 cpu_time
->idle
= total_idle_time
;
829 main(int argc
, char **argv
)
835 uint64_t *worst_latencies_ns
;
836 uint64_t *worst_latencies_from_first_ns
;
840 bool test_fail
= false;
841 bool test_warn
= false;
843 for (int i
= 0; i
< argc
; i
++) {
844 if (strcmp(argv
[i
], "--switched_apptype") == 0) {
845 g_seen_apptype
= TRUE
;
849 if (!g_seen_apptype
) {
850 selfexec_with_apptype(argc
, argv
);
853 parse_args(argc
, argv
);
855 srand((unsigned int)time(NULL
));
857 mach_timebase_info(&g_mti
);
860 /* SKIP test if running on arm platform */
861 if (g_run_on_intel_only
) {
863 size_t is_arm_size
= sizeof(is_arm
);
864 ret
= sysctlbyname("hw.optional.arm64", &is_arm
, &is_arm_size
, NULL
, 0);
865 if (ret
== 0 && is_arm
) {
866 printf("Unsupported platform. Skipping test.\n");
870 #endif /* TARGET_OS_OSX */
872 size_t ncpu_size
= sizeof(g_numcpus
);
873 ret
= sysctlbyname("hw.ncpu", &g_numcpus
, &ncpu_size
, NULL
, 0);
875 err(EX_OSERR
, "Failed sysctlbyname(hw.ncpu)");
877 assert(g_numcpus
<= 64); /* g_cpu_map needs to be extended for > 64 cpus */
879 size_t physicalcpu_size
= sizeof(g_nphysicalcpu
);
880 ret
= sysctlbyname("hw.physicalcpu", &g_nphysicalcpu
, &physicalcpu_size
, NULL
, 0);
882 err(EX_OSERR
, "Failed sysctlbyname(hw.physicalcpu)");
885 size_t logicalcpu_size
= sizeof(g_nlogicalcpu
);
886 ret
= sysctlbyname("hw.logicalcpu", &g_nlogicalcpu
, &logicalcpu_size
, NULL
, 0);
888 err(EX_OSERR
, "Failed sysctlbyname(hw.logicalcpu)");
892 if (g_numthreads
== 0) {
893 g_numthreads
= g_numcpus
;
895 g_policy
= MY_POLICY_REALTIME
;
896 g_do_all_spin
= TRUE
;
898 /* Don't change g_traceworthy_latency_ns if it's explicity been set to something other than the default */
899 if (g_traceworthy_latency_ns
== TRACEWORTHY_NANOS
) {
900 g_traceworthy_latency_ns
= TRACEWORTHY_NANOS_TEST
;
902 } else if (g_test_rt_smt
) {
903 if (g_nlogicalcpu
!= 2 * g_nphysicalcpu
) {
905 printf("Attempt to run --test-rt-smt on a non-SMT device\n");
909 if (g_numthreads
== 0) {
910 g_numthreads
= g_nphysicalcpu
;
912 g_policy
= MY_POLICY_REALTIME
;
913 g_do_all_spin
= TRUE
;
915 } else if (g_test_rt_avoid0
) {
916 #if defined(__x86_64__) || defined(__i386__)
917 if (g_numthreads
== 0) {
918 g_numthreads
= g_nphysicalcpu
- 1;
920 if (g_numthreads
== 0) {
921 printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n");
924 g_policy
= MY_POLICY_REALTIME
;
925 g_do_all_spin
= TRUE
;
928 printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n");
931 } else if (g_numthreads
== 0) {
932 g_numthreads
= g_numcpus
;
935 if (g_do_each_spin
) {
936 g_each_spin_duration_abs
= nanos_to_abs(g_each_spin_duration_ns
);
939 /* Configure the long-spin thread to take up half of its computation */
940 if (g_do_one_long_spin
) {
941 g_one_long_spin_length_ns
= COMPUTATION_NANOS
/ 2;
942 g_one_long_spin_length_abs
= nanos_to_abs(g_one_long_spin_length_ns
);
945 /* Estimate the amount of time the cleanup phase needs to back off */
946 g_iteration_sleeptime_us
= g_numthreads
* 20;
948 uint32_t threads_per_core
= (g_numthreads
/ g_numcpus
) + 1;
949 if (g_do_each_spin
) {
950 g_iteration_sleeptime_us
+= threads_per_core
* (g_each_spin_duration_ns
/ NSEC_PER_USEC
);
952 if (g_do_one_long_spin
) {
953 g_iteration_sleeptime_us
+= g_one_long_spin_length_ns
/ NSEC_PER_USEC
;
956 /* Arrays for threads and their wakeup times */
957 threads
= (pthread_t
*) valloc(sizeof(pthread_t
) * g_numthreads
);
960 size_t endtimes_size
= sizeof(uint64_t) * g_numthreads
;
962 g_thread_endtimes_abs
= (uint64_t*) valloc(endtimes_size
);
963 assert(g_thread_endtimes_abs
);
965 /* Ensure the allocation is pre-faulted */
966 ret
= memset_s(g_thread_endtimes_abs
, endtimes_size
, 0, endtimes_size
);
968 errc(EX_OSERR
, ret
, "memset_s endtimes");
971 size_t latencies_size
= sizeof(uint64_t) * g_iterations
;
973 worst_latencies_ns
= (uint64_t*) valloc(latencies_size
);
974 assert(worst_latencies_ns
);
976 /* Ensure the allocation is pre-faulted */
977 ret
= memset_s(worst_latencies_ns
, latencies_size
, 0, latencies_size
);
979 errc(EX_OSERR
, ret
, "memset_s latencies");
982 worst_latencies_from_first_ns
= (uint64_t*) valloc(latencies_size
);
983 assert(worst_latencies_from_first_ns
);
985 /* Ensure the allocation is pre-faulted */
986 ret
= memset_s(worst_latencies_from_first_ns
, latencies_size
, 0, latencies_size
);
988 errc(EX_OSERR
, ret
, "memset_s latencies_from_first");
991 size_t histogram_size
= sizeof(histogram_t
) * g_numcpus
;
992 g_cpu_histogram
= (histogram_t
*)valloc(histogram_size
);
993 assert(g_cpu_histogram
);
994 /* Ensure the allocation is pre-faulted */
995 ret
= memset_s(g_cpu_histogram
, histogram_size
, 0, histogram_size
);
997 errc(EX_OSERR
, ret
, "memset_s g_cpu_histogram");
1000 size_t map_size
= sizeof(uint64_t) * g_iterations
;
1001 g_cpu_map
= (_Atomic
uint64_t *)valloc(map_size
);
1003 /* Ensure the allocation is pre-faulted */
1004 ret
= memset_s(g_cpu_map
, map_size
, 0, map_size
);
1006 errc(EX_OSERR
, ret
, "memset_s g_cpu_map");
1009 kr
= semaphore_create(mach_task_self(), &g_main_sem
, SYNC_POLICY_FIFO
, 0);
1010 mach_assert_zero(kr
);
1012 /* Either one big semaphore or one per thread */
1013 if (g_waketype
== WAKE_CHAIN
||
1014 g_waketype
== WAKE_BROADCAST_PERTHREAD
||
1015 g_waketype
== WAKE_HOP
) {
1016 g_semarr
= valloc(sizeof(semaphore_t
) * g_numthreads
);
1019 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
1020 kr
= semaphore_create(mach_task_self(), &g_semarr
[i
], SYNC_POLICY_FIFO
, 0);
1021 mach_assert_zero(kr
);
1024 g_leadersem
= g_semarr
[0];
1026 kr
= semaphore_create(mach_task_self(), &g_broadcastsem
, SYNC_POLICY_FIFO
, 0);
1027 mach_assert_zero(kr
);
1028 kr
= semaphore_create(mach_task_self(), &g_leadersem
, SYNC_POLICY_FIFO
, 0);
1029 mach_assert_zero(kr
);
1032 if (g_waketype
== WAKE_HOP
) {
1033 kr
= semaphore_create(mach_task_self(), &g_donesem
, SYNC_POLICY_FIFO
, 0);
1034 mach_assert_zero(kr
);
1037 kr
= semaphore_create(mach_task_self(), &g_readysem
, SYNC_POLICY_FIFO
, 0);
1038 mach_assert_zero(kr
);
1040 kr
= semaphore_create(mach_task_self(), &g_rt_churn_sem
, SYNC_POLICY_FIFO
, 0);
1041 mach_assert_zero(kr
);
1043 kr
= semaphore_create(mach_task_self(), &g_rt_churn_start_sem
, SYNC_POLICY_FIFO
, 0);
1044 mach_assert_zero(kr
);
1046 atomic_store_explicit(&g_done_threads
, 0, memory_order_relaxed
);
1048 /* Create the threads */
1049 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
1050 ret
= pthread_create(&threads
[i
], NULL
, worker_thread
, (void*)(uintptr_t)i
);
1052 errc(EX_OSERR
, ret
, "pthread_create %d", i
);
1056 ret
= setpriority(PRIO_DARWIN_ROLE
, 0, PRIO_DARWIN_ROLE_UI_FOCAL
);
1058 errc(EX_OSERR
, ret
, "setpriority");
1063 g_starttime_abs
= mach_absolute_time();
1066 create_churn_threads();
1069 create_rt_churn_threads();
1072 /* Let everyone get settled */
1073 kr
= semaphore_wait(g_main_sem
);
1074 mach_assert_zero(kr
);
1076 /* Give the system a bit more time to settle */
1078 usleep(g_iteration_sleeptime_us
);
1081 cpu_time_t start_time
;
1082 cpu_time_t finish_time
;
1084 record_cpu_time(&start_time
);
1087 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
1089 uint64_t worst_abs
= 0, best_abs
= UINT64_MAX
;
1091 if (g_do_one_long_spin
) {
1092 g_one_long_spin_id
= (uint32_t)rand() % g_numthreads
;
1096 start_rt_churn_threads();
1100 debug_log("%d Main thread reset\n", i
);
1102 atomic_store_explicit(&g_done_threads
, 0, memory_order_seq_cst
);
1104 g_starttime_abs
= mach_absolute_time();
1106 /* Fire them off and wait for worker threads to finish */
1107 kr
= semaphore_wait_signal(g_main_sem
, g_leadersem
);
1108 mach_assert_zero(kr
);
1110 debug_log("%d Main thread return\n", i
);
1112 assert(atomic_load_explicit(&g_done_threads
, memory_order_relaxed
) == g_numthreads
);
1115 wait_for_rt_churn_threads();
1119 * We report the worst latencies relative to start time
1120 * and relative to the lead worker thread.
1122 for (j
= 0; j
< g_numthreads
; j
++) {
1123 uint64_t latency_abs
;
1125 latency_abs
= g_thread_endtimes_abs
[j
] - g_starttime_abs
;
1126 worst_abs
= worst_abs
< latency_abs
? latency_abs
: worst_abs
;
1129 worst_latencies_ns
[i
] = abs_to_nanos(worst_abs
);
1132 for (j
= 1; j
< g_numthreads
; j
++) {
1133 uint64_t latency_abs
;
1135 latency_abs
= g_thread_endtimes_abs
[j
] - g_thread_endtimes_abs
[0];
1136 worst_abs
= worst_abs
< latency_abs
? latency_abs
: worst_abs
;
1137 best_abs
= best_abs
> latency_abs
? latency_abs
: best_abs
;
1140 worst_latencies_from_first_ns
[i
] = abs_to_nanos(worst_abs
);
1143 * In the event of a bad run, cut a trace point.
1145 if (worst_latencies_from_first_ns
[i
] > g_traceworthy_latency_ns
) {
1146 /* Ariadne's ad-hoc test signpost */
1147 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns
[i
], g_traceworthy_latency_ns
, 0, 0);
1150 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns
[i
]) / 1000.0);
1154 /* Give the system a bit more time to settle */
1156 usleep(g_iteration_sleeptime_us
);
1160 record_cpu_time(&finish_time
);
1162 /* Rejoin threads */
1163 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
1164 ret
= pthread_join(threads
[i
], NULL
);
1166 errc(EX_OSERR
, ret
, "pthread_join %d", i
);
1171 join_rt_churn_threads();
1175 join_churn_threads();
1178 uint32_t cpu_idle_time
= (finish_time
.idle
- start_time
.idle
) * 10;
1179 uint32_t worker_threads_runtime
= worker_threads_total_runtime
.seconds
* 1000 + worker_threads_total_runtime
.microseconds
/ 1000;
1181 compute_stats(worst_latencies_ns
, g_iterations
, &avg
, &max
, &min
, &stddev
);
1182 printf("Results (from a stop):\n");
1183 printf("Max:\t\t%.2f us\n", ((float)max
) / 1000.0);
1184 printf("Min:\t\t%.2f us\n", ((float)min
) / 1000.0);
1185 printf("Avg:\t\t%.2f us\n", avg
/ 1000.0);
1186 printf("Stddev:\t\t%.2f us\n", stddev
/ 1000.0);
1190 compute_stats(worst_latencies_from_first_ns
, g_iterations
, &avg
, &max
, &min
, &stddev
);
1191 printf("Results (relative to first thread):\n");
1192 printf("Max:\t\t%.2f us\n", ((float)max
) / 1000.0);
1193 printf("Min:\t\t%.2f us\n", ((float)min
) / 1000.0);
1194 printf("Avg:\t\t%.2f us\n", avg
/ 1000.0);
1195 printf("Stddev:\t\t%.2f us\n", stddev
/ 1000.0);
1198 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
1199 printf("Iteration %d: %f us\n", i
, worst_latencies_ns
[i
] / 1000.0);
1206 for (uint32_t i
= 0; i
< g_numcpus
; i
++) {
1207 printf("%d\t%d\n", i
, g_cpu_histogram
[i
].accum
);
1211 if (g_test_rt
|| g_test_rt_smt
|| g_test_rt_avoid0
) {
1212 #define PRIMARY 0x5555555555555555ULL
1213 #define SECONDARY 0xaaaaaaaaaaaaaaaaULL
1217 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
1218 bool secondary
= false;
1220 uint64_t map
= g_cpu_map
[i
];
1221 if (g_test_rt_smt
) {
1222 /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */
1223 secondary
= (map
& SECONDARY
);
1224 /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */
1225 fail
= ((map
& PRIMARY
) & ((map
& SECONDARY
) >> 1));
1226 } else if (g_test_rt
) {
1227 fail
= (__builtin_popcountll(map
) != g_numthreads
) && (worst_latencies_ns
[i
] > g_traceworthy_latency_ns
);
1228 } else if (g_test_rt_avoid0
) {
1229 fail
= ((map
& 0x1) == 0x1);
1231 if (secondary
|| fail
) {
1232 printf("Iteration %d: 0x%llx%s%s\n", i
, map
,
1233 secondary
? " SECONDARY" : "",
1234 fail
? " FAIL" : "");
1236 test_warn
|= (secondary
|| fail
);
1241 if (test_fail
&& (g_iterations
>= 100) && (fail_count
<= g_iterations
/ 100)) {
1242 printf("99%% or better success rate\n");
1247 if (g_test_rt_smt
&& (g_each_spin_duration_ns
>= 200000) && !test_warn
) {
1248 printf("cpu_idle_time=%dms worker_threads_runtime=%dms\n", cpu_idle_time
, worker_threads_runtime
);
1249 if (cpu_idle_time
< worker_threads_runtime
/ 4) {
1250 printf("FAIL cpu_idle_time unexpectedly small\n");
1252 } else if (cpu_idle_time
> worker_threads_runtime
* 2) {
1253 printf("FAIL cpu_idle_time unexpectedly large\n");
1259 free(g_thread_endtimes_abs
);
1260 free(worst_latencies_ns
);
1261 free(worst_latencies_from_first_ns
);
1262 free(g_cpu_histogram
);
1269 * WARNING: This is SPI specifically intended for use by launchd to start UI
1270 * apps. We use it here for a test tool only to opt into QoS using the same
1271 * policies. Do not use this outside xnu or libxpc/launchd.
1274 selfexec_with_apptype(int argc
, char *argv
[])
1277 posix_spawnattr_t attr
;
1278 extern char **environ
;
1279 char *new_argv
[argc
+ 1 + 1 /* NULL */];
1281 char prog
[PATH_MAX
];
1282 uint32_t prog_size
= PATH_MAX
;
1284 ret
= _NSGetExecutablePath(prog
, &prog_size
);
1286 err(EX_OSERR
, "_NSGetExecutablePath");
1289 for (i
= 0; i
< argc
; i
++) {
1290 new_argv
[i
] = argv
[i
];
1293 new_argv
[i
] = "--switched_apptype";
1294 new_argv
[i
+ 1] = NULL
;
1296 ret
= posix_spawnattr_init(&attr
);
1298 errc(EX_OSERR
, ret
, "posix_spawnattr_init");
1301 ret
= posix_spawnattr_setflags(&attr
, POSIX_SPAWN_SETEXEC
);
1303 errc(EX_OSERR
, ret
, "posix_spawnattr_setflags");
1306 ret
= posix_spawnattr_setprocesstype_np(&attr
, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
);
1308 errc(EX_OSERR
, ret
, "posix_spawnattr_setprocesstype_np");
1311 ret
= posix_spawn(NULL
, prog
, NULL
, &attr
, new_argv
, environ
);
1313 errc(EX_OSERR
, ret
, "posix_spawn");
1318 * Admittedly not very attractive.
1320 static void __attribute__((noreturn
))
1323 errx(EX_USAGE
, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
1324 "<realtime | timeshare | timeshare_no_smt | fixed> <iterations>\n\t\t"
1325 "[--trace <traceworthy latency in ns>] "
1326 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
1327 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]\n\t\t"
1328 "[--rt-churn] [--rt-churn-count <n>] [--rt-ll] [--test-rt] [--test-rt-smt] [--test-rt-avoid0]",
1332 static struct option
* g_longopts
;
1333 static int option_index
;
1339 /* char* optarg is a magic global */
1341 uint32_t arg_val
= (uint32_t)strtoull(optarg
, &cp
, 10);
1343 if (cp
== optarg
|| *cp
) {
1344 errx(EX_USAGE
, "arg --%s requires a decimal number, found \"%s\"",
1345 g_longopts
[option_index
].name
, optarg
);
1352 parse_args(int argc
, char *argv
[])
1364 static struct option longopts
[] = {
1365 /* BEGIN IGNORE CODESTYLE */
1366 { "spin-time", required_argument
, NULL
, OPT_SPIN_TIME
},
1367 { "trace", required_argument
, NULL
, OPT_TRACE
},
1368 { "priority", required_argument
, NULL
, OPT_PRIORITY
},
1369 { "churn-pri", required_argument
, NULL
, OPT_CHURN_PRI
},
1370 { "churn-count", required_argument
, NULL
, OPT_CHURN_COUNT
},
1371 { "rt-churn-count", required_argument
, NULL
, OPT_RT_CHURN_COUNT
},
1372 { "switched_apptype", no_argument
, (int*)&g_seen_apptype
, TRUE
},
1373 { "spin-one", no_argument
, (int*)&g_do_one_long_spin
, TRUE
},
1374 { "intel-only", no_argument
, (int*)&g_run_on_intel_only
, TRUE
},
1375 { "spin-all", no_argument
, (int*)&g_do_all_spin
, TRUE
},
1376 { "affinity", no_argument
, (int*)&g_do_affinity
, TRUE
},
1377 { "no-sleep", no_argument
, (int*)&g_do_sleep
, FALSE
},
1378 { "drop-priority", no_argument
, (int*)&g_drop_priority
, TRUE
},
1379 { "test-rt", no_argument
, (int*)&g_test_rt
, TRUE
},
1380 { "test-rt-smt", no_argument
, (int*)&g_test_rt_smt
, TRUE
},
1381 { "test-rt-avoid0", no_argument
, (int*)&g_test_rt_avoid0
, TRUE
},
1382 { "rt-churn", no_argument
, (int*)&g_rt_churn
, TRUE
},
1383 { "rt-ll", no_argument
, (int*)&g_rt_ll
, TRUE
},
1384 { "histogram", no_argument
, (int*)&g_histogram
, TRUE
},
1385 { "verbose", no_argument
, (int*)&g_verbose
, TRUE
},
1386 { "help", no_argument
, NULL
, 'h' },
1387 { NULL
, 0, NULL
, 0 }
1388 /* END IGNORE CODESTYLE */
1391 g_longopts
= longopts
;
1394 while ((ch
= getopt_long(argc
, argv
, "h", longopts
, &option_index
)) != -1) {
1397 /* getopt_long set a variable */
1400 g_do_each_spin
= TRUE
;
1401 g_each_spin_duration_ns
= read_dec_arg();
1404 g_traceworthy_latency_ns
= read_dec_arg();
1407 g_priority
= read_dec_arg();
1410 g_churn_pri
= read_dec_arg();
1412 case OPT_CHURN_COUNT
:
1413 g_churn_count
= read_dec_arg();
1415 case OPT_RT_CHURN_COUNT
:
1416 g_rt_churn_count
= read_dec_arg();
1427 * getopt_long reorders all the options to the beginning of the argv array.
1428 * Jump past them to the non-option arguments.
1435 warnx("Too many non-option arguments passed");
1440 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
1446 /* How many threads? */
1447 g_numthreads
= (uint32_t)strtoull(argv
[0], &cp
, 10);
1449 if (cp
== argv
[0] || *cp
) {
1450 errx(EX_USAGE
, "numthreads requires a decimal number, found \"%s\"", argv
[0]);
1453 /* What wakeup pattern? */
1454 g_waketype
= parse_wakeup_pattern(argv
[1]);
1457 g_policy
= parse_thread_policy(argv
[2]);
1460 g_iterations
= (uint32_t)strtoull(argv
[3], &cp
, 10);
1462 if (cp
== argv
[3] || *cp
) {
1463 errx(EX_USAGE
, "numthreads requires a decimal number, found \"%s\"", argv
[3]);
1466 if (g_iterations
< 1) {
1467 errx(EX_USAGE
, "Must have at least one iteration");
1470 if (g_numthreads
== 1 && g_waketype
== WAKE_CHAIN
) {
1471 errx(EX_USAGE
, "chain mode requires more than one thread");
1474 if (g_numthreads
== 1 && g_waketype
== WAKE_HOP
) {
1475 errx(EX_USAGE
, "hop mode requires more than one thread");