2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/kdebug.h>
39 #include <sys/sysctl.h>
43 #include <spawn_private.h>
44 #include <sys/spawn_internal.h>
45 #include <mach-o/dyld.h>
47 #include <libkern/OSAtomic.h>
49 #include <mach/mach_time.h>
50 #include <mach/mach.h>
51 #include <mach/task.h>
52 #include <mach/semaphore.h>
54 #include <pthread/qos_private.h>
56 #include <sys/resource.h>
58 typedef enum wake_type
{ WAKE_BROADCAST_ONESEM
, WAKE_BROADCAST_PERTHREAD
, WAKE_CHAIN
, WAKE_HOP
} wake_type_t
;
59 typedef enum my_policy_type
{ MY_POLICY_REALTIME
, MY_POLICY_TIMESHARE
, MY_POLICY_FIXEDPRI
} my_policy_type_t
;
61 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
62 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
63 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
65 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
66 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
67 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
70 #define debug_log(args...) printf(args)
72 #define debug_log(args...) do { } while(0)
76 static void* worker_thread(void *arg
);
78 static int thread_setup(uint32_t my_id
);
79 static my_policy_type_t
parse_thread_policy(const char *str
);
80 static void selfexec_with_apptype(int argc
, char *argv
[]);
81 static void parse_args(int argc
, char *argv
[]);
83 /* Global variables (general) */
84 static uint32_t g_numcpus
;
85 static uint32_t g_numthreads
;
86 static wake_type_t g_waketype
;
87 static policy_t g_policy
;
88 static uint32_t g_iterations
;
89 static struct mach_timebase_info g_mti
;
90 static semaphore_t g_main_sem
;
91 static uint64_t *g_thread_endtimes_abs
;
92 static volatile uint32_t g_done_threads
;
93 static boolean_t g_verbose
= FALSE
;
94 static boolean_t g_do_affinity
= FALSE
;
95 static uint64_t g_starttime_abs
;
96 static uint32_t g_iteration_sleeptime_us
= 0;
97 static uint32_t g_priority
= 0;
98 static uint32_t g_churn_pri
= 0;
99 static uint32_t g_churn_count
= 0;
100 static uint64_t g_churn_stopped_at
= 0;
101 static boolean_t g_churn_stop
= FALSE
;
103 static pthread_t
* g_churn_threads
= NULL
;
105 /* Threshold for dropping a 'bad run' tracepoint */
106 static uint64_t g_traceworthy_latency_ns
= TRACEWORTHY_NANOS
;
108 /* Have we re-execed to set apptype? */
109 static boolean_t g_seen_apptype
= FALSE
;
111 /* usleep in betweeen iterations */
112 static boolean_t g_do_sleep
= TRUE
;
114 /* Every thread spins until all threads have checked in */
115 static boolean_t g_do_all_spin
= FALSE
;
117 /* Every thread backgrounds temporarily before parking */
118 static boolean_t g_drop_priority
= FALSE
;
120 /* One randomly chosen thread holds up the train for a certain duration. */
121 static boolean_t g_do_one_long_spin
= FALSE
;
122 static uint32_t g_one_long_spin_id
= 0;
123 static uint64_t g_one_long_spin_length_abs
= 0;
124 static uint64_t g_one_long_spin_length_ns
= 0;
126 /* Each thread spins for a certain duration after waking up before blocking again. */
127 static boolean_t g_do_each_spin
= FALSE
;
128 static uint64_t g_each_spin_duration_abs
= 0;
129 static uint64_t g_each_spin_duration_ns
= 0;
131 /* Global variables (broadcast) */
132 static semaphore_t g_broadcastsem
;
133 static semaphore_t g_leadersem
;
134 static semaphore_t g_readysem
;
135 static semaphore_t g_donesem
;
137 /* Global variables (chain) */
138 static semaphore_t
*g_semarr
;
141 abs_to_nanos(uint64_t abstime
)
143 return (uint64_t)(abstime
* (((double)g_mti
.numer
) / ((double)g_mti
.denom
)));
147 nanos_to_abs(uint64_t ns
)
149 return (uint64_t)(ns
* (((double)g_mti
.denom
) / ((double)g_mti
.numer
)));
155 #if defined(__x86_64__) || defined(__i386__)
156 asm volatile("pause");
158 #error Unrecognized architecture
163 churn_thread(__unused
void *arg
)
165 uint64_t spin_count
= 0;
168 * As a safety measure to avoid wedging, we will bail on the spin if
169 * it's been more than 1s after the most recent run start
172 while (g_churn_stop
== FALSE
&&
173 mach_absolute_time() < (g_starttime_abs
+ NSEC_PER_SEC
)) {
178 /* This is totally racy, but only here to detect if anyone stops early */
179 g_churn_stopped_at
+= spin_count
;
185 create_churn_threads()
187 if (g_churn_count
== 0)
188 g_churn_count
= g_numcpus
- 1;
192 struct sched_param param
= { .sched_priority
= (int)g_churn_pri
};
195 /* Array for churn threads */
196 g_churn_threads
= (pthread_t
*) valloc(sizeof(pthread_t
) * g_churn_count
);
197 assert(g_churn_threads
);
199 if ((err
= pthread_attr_init(&attr
)))
200 errc(EX_OSERR
, err
, "pthread_attr_init");
202 if ((err
= pthread_attr_setschedparam(&attr
, ¶m
)))
203 errc(EX_OSERR
, err
, "pthread_attr_setschedparam");
205 if ((err
= pthread_attr_setschedpolicy(&attr
, SCHED_RR
)))
206 errc(EX_OSERR
, err
, "pthread_attr_setschedpolicy");
208 for (uint32_t i
= 0 ; i
< g_churn_count
; i
++) {
209 pthread_t new_thread
;
211 if ((err
= pthread_create(&new_thread
, &attr
, churn_thread
, NULL
)))
212 errc(EX_OSERR
, err
, "pthread_create");
213 g_churn_threads
[i
] = new_thread
;
216 if ((err
= pthread_attr_destroy(&attr
)))
217 errc(EX_OSERR
, err
, "pthread_attr_destroy");
221 join_churn_threads(void)
223 if (g_churn_stopped_at
!= 0)
224 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
231 /* Rejoin churn threads */
232 for (uint32_t i
= 0; i
< g_churn_count
; i
++) {
233 errno_t err
= pthread_join(g_churn_threads
[i
], NULL
);
234 if (err
) errc(EX_OSERR
, err
, "pthread_join %d", i
);
239 * Figure out what thread policy to use
241 static my_policy_type_t
242 parse_thread_policy(const char *str
)
244 if (strcmp(str
, "timeshare") == 0) {
245 return MY_POLICY_TIMESHARE
;
246 } else if (strcmp(str
, "realtime") == 0) {
247 return MY_POLICY_REALTIME
;
248 } else if (strcmp(str
, "fixed") == 0) {
249 return MY_POLICY_FIXEDPRI
;
251 errx(EX_USAGE
, "Invalid thread policy \"%s\"", str
);
256 * Figure out what wakeup pattern to use
259 parse_wakeup_pattern(const char *str
)
261 if (strcmp(str
, "chain") == 0) {
263 } else if (strcmp(str
, "hop") == 0) {
265 } else if (strcmp(str
, "broadcast-single-sem") == 0) {
266 return WAKE_BROADCAST_ONESEM
;
267 } else if (strcmp(str
, "broadcast-per-thread") == 0) {
268 return WAKE_BROADCAST_PERTHREAD
;
270 errx(EX_USAGE
, "Invalid wakeup pattern \"%s\"", str
);
278 thread_setup(uint32_t my_id
)
282 thread_time_constraint_policy_data_t pol
;
285 int policy
= SCHED_OTHER
;
286 if (g_policy
== MY_POLICY_FIXEDPRI
)
289 struct sched_param param
= {.sched_priority
= (int)g_priority
};
290 if ((ret
= pthread_setschedparam(pthread_self(), policy
, ¶m
)))
291 errc(EX_OSERR
, ret
, "pthread_setschedparam: %d", my_id
);
295 case MY_POLICY_TIMESHARE
:
297 case MY_POLICY_REALTIME
:
298 /* Hard-coded realtime parameters (similar to what Digi uses) */
300 pol
.constraint
= (uint32_t) nanos_to_abs(CONSTRAINT_NANOS
);
301 pol
.computation
= (uint32_t) nanos_to_abs(COMPUTATION_NANOS
);
302 pol
.preemptible
= 0; /* Ignored by OS */
304 kr
= thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY
,
305 (thread_policy_t
) &pol
, THREAD_TIME_CONSTRAINT_POLICY_COUNT
);
306 mach_assert_zero_t(my_id
, kr
);
308 case MY_POLICY_FIXEDPRI
:
309 ret
= pthread_set_fixedpriority_self();
310 if (ret
) errc(EX_OSERR
, ret
, "pthread_set_fixedpriority_self");
313 errx(EX_USAGE
, "invalid policy type %d", g_policy
);
317 thread_affinity_policy_data_t affinity
;
319 affinity
.affinity_tag
= my_id
% 2;
321 kr
= thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY
,
322 (thread_policy_t
)&affinity
, THREAD_AFFINITY_POLICY_COUNT
);
323 mach_assert_zero_t(my_id
, kr
);
330 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
331 * and notify the main thread when done.
334 worker_thread(void *arg
)
336 uint32_t my_id
= (uint32_t)(uintptr_t)arg
;
339 volatile double x
= 0.0;
340 volatile double y
= 0.0;
342 /* Set policy and so forth */
345 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
348 * Leader thread either wakes everyone up or starts the chain going.
351 /* Give the worker threads undisturbed time to finish before waiting on them */
353 usleep(g_iteration_sleeptime_us
);
355 debug_log("%d Leader thread wait for ready\n", i
);
358 * Wait for everyone else to declare ready
359 * Is there a better way to do this that won't interfere with the rest of the chain?
360 * TODO: Invent 'semaphore wait for N signals'
363 for (uint32_t j
= 0 ; j
< g_numthreads
- 1; j
++) {
364 kr
= semaphore_wait(g_readysem
);
365 mach_assert_zero_t(my_id
, kr
);
368 debug_log("%d Leader thread wait\n", i
);
370 /* Signal main thread and wait for start of iteration */
372 kr
= semaphore_wait_signal(g_leadersem
, g_main_sem
);
373 mach_assert_zero_t(my_id
, kr
);
375 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
377 debug_log("%d Leader thread go\n", i
);
379 assert_zero_t(my_id
, g_done_threads
);
381 switch (g_waketype
) {
382 case WAKE_BROADCAST_ONESEM
:
383 kr
= semaphore_signal_all(g_broadcastsem
);
384 mach_assert_zero_t(my_id
, kr
);
386 case WAKE_BROADCAST_PERTHREAD
:
387 for (uint32_t j
= 1; j
< g_numthreads
; j
++) {
388 kr
= semaphore_signal(g_semarr
[j
]);
389 mach_assert_zero_t(my_id
, kr
);
393 kr
= semaphore_signal(g_semarr
[my_id
+ 1]);
394 mach_assert_zero_t(my_id
, kr
);
397 kr
= semaphore_wait_signal(g_donesem
, g_semarr
[my_id
+ 1]);
398 mach_assert_zero_t(my_id
, kr
);
403 * Everyone else waits to be woken up,
404 * records when she wakes up, and possibly
408 case WAKE_BROADCAST_ONESEM
:
409 kr
= semaphore_wait_signal(g_broadcastsem
, g_readysem
);
410 mach_assert_zero_t(my_id
, kr
);
412 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
415 case WAKE_BROADCAST_PERTHREAD
:
416 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
417 mach_assert_zero_t(my_id
, kr
);
419 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
423 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
424 mach_assert_zero_t(my_id
, kr
);
426 /* Signal the next thread *after* recording wake time */
428 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
430 if (my_id
< (g_numthreads
- 1)) {
431 kr
= semaphore_signal(g_semarr
[my_id
+ 1]);
432 mach_assert_zero_t(my_id
, kr
);
438 kr
= semaphore_wait_signal(g_semarr
[my_id
], g_readysem
);
439 mach_assert_zero_t(my_id
, kr
);
441 /* Signal the next thread *after* recording wake time */
443 g_thread_endtimes_abs
[my_id
] = mach_absolute_time();
445 if (my_id
< (g_numthreads
- 1)) {
446 kr
= semaphore_wait_signal(g_donesem
, g_semarr
[my_id
+ 1]);
447 mach_assert_zero_t(my_id
, kr
);
449 kr
= semaphore_signal_all(g_donesem
);
450 mach_assert_zero_t(my_id
, kr
);
457 debug_log("Thread %p woke up for iteration %d.\n", pthread_self(), i
);
459 if (g_do_one_long_spin
&& g_one_long_spin_id
== my_id
) {
460 /* One randomly chosen thread holds up the train for a while. */
462 uint64_t endspin
= g_starttime_abs
+ g_one_long_spin_length_abs
;
463 while (mach_absolute_time() < endspin
) {
469 if (g_do_each_spin
) {
470 /* Each thread spins for a certain duration after waking up before blocking again. */
472 uint64_t endspin
= mach_absolute_time() + g_each_spin_duration_abs
;
473 while (mach_absolute_time() < endspin
) {
479 int32_t new = OSAtomicIncrement32((volatile int32_t *)&g_done_threads
);
482 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), new, i
);
484 if (g_drop_priority
) {
485 /* Drop priority to BG momentarily */
486 errno_t ret
= setpriority(PRIO_DARWIN_THREAD
, 0, PRIO_DARWIN_BG
);
487 if (ret
) errc(EX_OSERR
, ret
, "setpriority PRIO_DARWIN_BG");
491 /* Everyone spins until the last thread checks in. */
493 while (g_done_threads
< g_numthreads
) {
499 if (g_drop_priority
) {
500 /* Restore normal priority */
501 errno_t ret
= setpriority(PRIO_DARWIN_THREAD
, 0, 0);
502 if (ret
) errc(EX_OSERR
, ret
, "setpriority 0");
505 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i
);
509 /* Give the worker threads undisturbed time to finish before waiting on them */
511 usleep(g_iteration_sleeptime_us
);
513 /* Wait for the worker threads to finish */
514 for (uint32_t i
= 0 ; i
< g_numthreads
- 1; i
++) {
515 kr
= semaphore_wait(g_readysem
);
516 mach_assert_zero_t(my_id
, kr
);
519 /* Tell everyone and the main thread that the last iteration is done */
520 debug_log("%d Leader thread done\n", i
);
522 kr
= semaphore_signal_all(g_main_sem
);
523 mach_assert_zero_t(my_id
, kr
);
525 /* Hold up thread teardown so it doesn't affect the last iteration */
526 kr
= semaphore_wait_signal(g_main_sem
, g_readysem
);
527 mach_assert_zero_t(my_id
, kr
);
534 * Given an array of uint64_t values, compute average, max, min, and standard deviation
537 compute_stats(uint64_t *values
, uint64_t count
, float *averagep
, uint64_t *maxp
, uint64_t *minp
, float *stddevp
)
542 uint64_t _min
= UINT64_MAX
;
546 for (i
= 0; i
< count
; i
++) {
548 _max
= values
[i
] > _max
? values
[i
] : _max
;
549 _min
= values
[i
] < _min
? values
[i
] : _min
;
552 _avg
= ((float)_sum
) / ((float)count
);
555 for (i
= 0; i
< count
; i
++) {
556 _dev
+= powf((((float)values
[i
]) - _avg
), 2);
569 main(int argc
, char **argv
)
575 uint64_t *worst_latencies_ns
;
576 uint64_t *worst_latencies_from_first_ns
;
580 for (int i
= 0; i
< argc
; i
++)
581 if (strcmp(argv
[i
], "--switched_apptype") == 0)
582 g_seen_apptype
= TRUE
;
585 selfexec_with_apptype(argc
, argv
);
587 parse_args(argc
, argv
);
589 srand((unsigned int)time(NULL
));
591 mach_timebase_info(&g_mti
);
593 size_t ncpu_size
= sizeof(g_numcpus
);
594 ret
= sysctlbyname("hw.ncpu", &g_numcpus
, &ncpu_size
, NULL
, 0);
595 if (ret
) err(EX_OSERR
, "Failed sysctlbyname(hw.ncpu)");
598 g_each_spin_duration_abs
= nanos_to_abs(g_each_spin_duration_ns
);
600 /* Configure the long-spin thread to take up half of its computation */
601 if (g_do_one_long_spin
) {
602 g_one_long_spin_length_ns
= COMPUTATION_NANOS
/ 2;
603 g_one_long_spin_length_abs
= nanos_to_abs(g_one_long_spin_length_ns
);
606 /* Estimate the amount of time the cleanup phase needs to back off */
607 g_iteration_sleeptime_us
= g_numthreads
* 20;
609 uint32_t threads_per_core
= (g_numthreads
/ g_numcpus
) + 1;
611 g_iteration_sleeptime_us
+= threads_per_core
* (g_each_spin_duration_ns
/ NSEC_PER_USEC
);
612 if (g_do_one_long_spin
)
613 g_iteration_sleeptime_us
+= g_one_long_spin_length_ns
/ NSEC_PER_USEC
;
615 /* Arrays for threads and their wakeup times */
616 threads
= (pthread_t
*) valloc(sizeof(pthread_t
) * g_numthreads
);
619 size_t endtimes_size
= sizeof(uint64_t) * g_numthreads
;
621 g_thread_endtimes_abs
= (uint64_t*) valloc(endtimes_size
);
622 assert(g_thread_endtimes_abs
);
624 /* Ensure the allocation is pre-faulted */
625 ret
= memset_s(g_thread_endtimes_abs
, endtimes_size
, 0, endtimes_size
);
626 if (ret
) errc(EX_OSERR
, ret
, "memset_s endtimes");
628 size_t latencies_size
= sizeof(uint64_t) * g_iterations
;
630 worst_latencies_ns
= (uint64_t*) valloc(latencies_size
);
631 assert(worst_latencies_ns
);
633 /* Ensure the allocation is pre-faulted */
634 ret
= memset_s(worst_latencies_ns
, latencies_size
, 0, latencies_size
);
635 if (ret
) errc(EX_OSERR
, ret
, "memset_s latencies");
637 worst_latencies_from_first_ns
= (uint64_t*) valloc(latencies_size
);
638 assert(worst_latencies_from_first_ns
);
640 /* Ensure the allocation is pre-faulted */
641 ret
= memset_s(worst_latencies_from_first_ns
, latencies_size
, 0, latencies_size
);
642 if (ret
) errc(EX_OSERR
, ret
, "memset_s latencies_from_first");
644 kr
= semaphore_create(mach_task_self(), &g_main_sem
, SYNC_POLICY_FIFO
, 0);
645 mach_assert_zero(kr
);
647 /* Either one big semaphore or one per thread */
648 if (g_waketype
== WAKE_CHAIN
||
649 g_waketype
== WAKE_BROADCAST_PERTHREAD
||
650 g_waketype
== WAKE_HOP
) {
652 g_semarr
= valloc(sizeof(semaphore_t
) * g_numthreads
);
655 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
656 kr
= semaphore_create(mach_task_self(), &g_semarr
[i
], SYNC_POLICY_FIFO
, 0);
657 mach_assert_zero(kr
);
660 g_leadersem
= g_semarr
[0];
662 kr
= semaphore_create(mach_task_self(), &g_broadcastsem
, SYNC_POLICY_FIFO
, 0);
663 mach_assert_zero(kr
);
664 kr
= semaphore_create(mach_task_self(), &g_leadersem
, SYNC_POLICY_FIFO
, 0);
665 mach_assert_zero(kr
);
668 if (g_waketype
== WAKE_HOP
) {
669 kr
= semaphore_create(mach_task_self(), &g_donesem
, SYNC_POLICY_FIFO
, 0);
670 mach_assert_zero(kr
);
673 kr
= semaphore_create(mach_task_self(), &g_readysem
, SYNC_POLICY_FIFO
, 0);
674 mach_assert_zero(kr
);
676 /* Create the threads */
678 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
679 ret
= pthread_create(&threads
[i
], NULL
, worker_thread
, (void*)(uintptr_t)i
);
680 if (ret
) errc(EX_OSERR
, ret
, "pthread_create %d", i
);
683 ret
= setpriority(PRIO_DARWIN_ROLE
, 0, PRIO_DARWIN_ROLE_UI_FOCAL
);
684 if (ret
) errc(EX_OSERR
, ret
, "setpriority");
688 g_starttime_abs
= mach_absolute_time();
691 create_churn_threads();
693 /* Let everyone get settled */
694 kr
= semaphore_wait(g_main_sem
);
695 mach_assert_zero(kr
);
697 /* Give the system a bit more time to settle */
699 usleep(g_iteration_sleeptime_us
);
702 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
704 uint64_t worst_abs
= 0, best_abs
= UINT64_MAX
;
706 if (g_do_one_long_spin
)
707 g_one_long_spin_id
= (uint32_t)rand() % g_numthreads
;
709 debug_log("%d Main thread reset\n", i
);
714 g_starttime_abs
= mach_absolute_time();
716 /* Fire them off and wait for worker threads to finish */
717 kr
= semaphore_wait_signal(g_main_sem
, g_leadersem
);
718 mach_assert_zero(kr
);
720 debug_log("%d Main thread return\n", i
);
723 * We report the worst latencies relative to start time
724 * and relative to the lead worker thread.
726 for (j
= 0; j
< g_numthreads
; j
++) {
727 uint64_t latency_abs
;
729 latency_abs
= g_thread_endtimes_abs
[j
] - g_starttime_abs
;
730 worst_abs
= worst_abs
< latency_abs
? latency_abs
: worst_abs
;
733 worst_latencies_ns
[i
] = abs_to_nanos(worst_abs
);
736 for (j
= 1; j
< g_numthreads
; j
++) {
737 uint64_t latency_abs
;
739 latency_abs
= g_thread_endtimes_abs
[j
] - g_thread_endtimes_abs
[0];
740 worst_abs
= worst_abs
< latency_abs
? latency_abs
: worst_abs
;
741 best_abs
= best_abs
> latency_abs
? latency_abs
: best_abs
;
744 worst_latencies_from_first_ns
[i
] = abs_to_nanos(worst_abs
);
747 * In the event of a bad run, cut a trace point.
749 if (worst_latencies_from_first_ns
[i
] > g_traceworthy_latency_ns
) {
750 /* Ariadne's ad-hoc test signpost */
751 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns
[i
], g_traceworthy_latency_ns
, 0, 0);
754 printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns
[i
]) / 1000.0);
757 /* Give the system a bit more time to settle */
759 usleep(g_iteration_sleeptime_us
);
763 for (uint32_t i
= 0; i
< g_numthreads
; i
++) {
764 ret
= pthread_join(threads
[i
], NULL
);
765 if (ret
) errc(EX_OSERR
, ret
, "pthread_join %d", i
);
769 join_churn_threads();
771 compute_stats(worst_latencies_ns
, g_iterations
, &avg
, &max
, &min
, &stddev
);
772 printf("Results (from a stop):\n");
773 printf("Max:\t\t%.2f us\n", ((float)max
) / 1000.0);
774 printf("Min:\t\t%.2f us\n", ((float)min
) / 1000.0);
775 printf("Avg:\t\t%.2f us\n", avg
/ 1000.0);
776 printf("Stddev:\t\t%.2f us\n", stddev
/ 1000.0);
780 compute_stats(worst_latencies_from_first_ns
, g_iterations
, &avg
, &max
, &min
, &stddev
);
781 printf("Results (relative to first thread):\n");
782 printf("Max:\t\t%.2f us\n", ((float)max
) / 1000.0);
783 printf("Min:\t\t%.2f us\n", ((float)min
) / 1000.0);
784 printf("Avg:\t\t%.2f us\n", avg
/ 1000.0);
785 printf("Stddev:\t\t%.2f us\n", stddev
/ 1000.0);
788 for (uint32_t i
= 0; i
< g_iterations
; i
++) {
789 printf("Iteration %d: %f us\n", i
, worst_latencies_ns
[i
] / 1000.0);
794 free(g_thread_endtimes_abs
);
795 free(worst_latencies_ns
);
796 free(worst_latencies_from_first_ns
);
802 * WARNING: This is SPI specifically intended for use by launchd to start UI
803 * apps. We use it here for a test tool only to opt into QoS using the same
804 * policies. Do not use this outside xnu or libxpc/launchd.
807 selfexec_with_apptype(int argc
, char *argv
[])
810 posix_spawnattr_t attr
;
811 extern char **environ
;
812 char *new_argv
[argc
+ 1 + 1 /* NULL */];
815 uint32_t prog_size
= PATH_MAX
;
817 ret
= _NSGetExecutablePath(prog
, &prog_size
);
818 if (ret
) err(EX_OSERR
, "_NSGetExecutablePath");
820 for (i
=0; i
< argc
; i
++) {
821 new_argv
[i
] = argv
[i
];
824 new_argv
[i
] = "--switched_apptype";
825 new_argv
[i
+1] = NULL
;
827 ret
= posix_spawnattr_init(&attr
);
828 if (ret
) errc(EX_OSERR
, ret
, "posix_spawnattr_init");
830 ret
= posix_spawnattr_setflags(&attr
, POSIX_SPAWN_SETEXEC
);
831 if (ret
) errc(EX_OSERR
, ret
, "posix_spawnattr_setflags");
833 ret
= posix_spawnattr_setprocesstype_np(&attr
, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT
);
834 if (ret
) errc(EX_OSERR
, ret
, "posix_spawnattr_setprocesstype_np");
836 ret
= posix_spawn(NULL
, prog
, NULL
, &attr
, new_argv
, environ
);
837 if (ret
) errc(EX_OSERR
, ret
, "posix_spawn");
841 * Admittedly not very attractive.
843 static void __attribute__((noreturn
))
846 errx(EX_USAGE
, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
847 "<realtime | timeshare | fixed> <iterations>\n\t\t"
848 "[--trace <traceworthy latency in ns>] "
849 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
850 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]",
854 static struct option
* g_longopts
;
855 static int option_index
;
861 /* char* optarg is a magic global */
863 uint32_t arg_val
= (uint32_t)strtoull(optarg
, &cp
, 10);
865 if (cp
== optarg
|| *cp
)
866 errx(EX_USAGE
, "arg --%s requires a decimal number, found \"%s\"",
867 g_longopts
[option_index
].name
, optarg
);
873 parse_args(int argc
, char *argv
[])
884 static struct option longopts
[] = {
885 { "spin-time", required_argument
, NULL
, OPT_SPIN_TIME
},
886 { "trace", required_argument
, NULL
, OPT_TRACE
},
887 { "priority", required_argument
, NULL
, OPT_PRIORITY
},
888 { "churn-pri", required_argument
, NULL
, OPT_CHURN_PRI
},
889 { "churn-count", required_argument
, NULL
, OPT_CHURN_COUNT
},
890 { "switched_apptype", no_argument
, (int*)&g_seen_apptype
, TRUE
},
891 { "spin-one", no_argument
, (int*)&g_do_one_long_spin
, TRUE
},
892 { "spin-all", no_argument
, (int*)&g_do_all_spin
, TRUE
},
893 { "affinity", no_argument
, (int*)&g_do_affinity
, TRUE
},
894 { "no-sleep", no_argument
, (int*)&g_do_sleep
, FALSE
},
895 { "drop-priority", no_argument
, (int*)&g_drop_priority
, TRUE
},
896 { "verbose", no_argument
, (int*)&g_verbose
, TRUE
},
897 { "help", no_argument
, NULL
, 'h' },
901 g_longopts
= longopts
;
904 while ((ch
= getopt_long(argc
, argv
, "h", longopts
, &option_index
)) != -1) {
907 /* getopt_long set a variable */
910 g_do_each_spin
= TRUE
;
911 g_each_spin_duration_ns
= read_dec_arg();
914 g_traceworthy_latency_ns
= read_dec_arg();
917 g_priority
= read_dec_arg();
920 g_churn_pri
= read_dec_arg();
922 case OPT_CHURN_COUNT
:
923 g_churn_count
= read_dec_arg();
934 * getopt_long reorders all the options to the beginning of the argv array.
935 * Jump past them to the non-option arguments.
942 warnx("Too many non-option arguments passed");
947 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
953 /* How many threads? */
954 g_numthreads
= (uint32_t)strtoull(argv
[0], &cp
, 10);
956 if (cp
== argv
[0] || *cp
)
957 errx(EX_USAGE
, "numthreads requires a decimal number, found \"%s\"", argv
[0]);
959 if (g_numthreads
< 1)
960 errx(EX_USAGE
, "Must use at least one thread");
962 /* What wakeup pattern? */
963 g_waketype
= parse_wakeup_pattern(argv
[1]);
966 g_policy
= parse_thread_policy(argv
[2]);
969 g_iterations
= (uint32_t)strtoull(argv
[3], &cp
, 10);
971 if (cp
== argv
[3] || *cp
)
972 errx(EX_USAGE
, "numthreads requires a decimal number, found \"%s\"", argv
[3]);
974 if (g_iterations
< 1)
975 errx(EX_USAGE
, "Must have at least one iteration");
977 if (g_numthreads
== 1 && g_waketype
== WAKE_CHAIN
)
978 errx(EX_USAGE
, "chain mode requires more than one thread");
980 if (g_numthreads
== 1 && g_waketype
== WAKE_HOP
)
981 errx(EX_USAGE
, "hop mode requires more than one thread");