]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
6d2010ae A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
6d2010ae A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
6d2010ae A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
6d2010ae A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | #include <unistd.h> | |
29 | #include <stdio.h> | |
30 | #include <math.h> | |
a1c7dba1 | 31 | #include <sys/kdebug.h> |
6d2010ae A |
32 | #include <stdlib.h> |
33 | #include <pthread.h> | |
6d2010ae | 34 | #include <errno.h> |
fe8ab488 | 35 | #include <err.h> |
6d2010ae | 36 | #include <string.h> |
3e170ce0 A |
37 | #include <assert.h> |
38 | #include <sysexits.h> | |
39 | #include <sys/sysctl.h> | |
40 | #include <getopt.h> | |
c3c9b80d | 41 | #include <libproc.h> |
6d2010ae | 42 | |
fe8ab488 A |
43 | #include <spawn.h> |
44 | #include <spawn_private.h> | |
45 | #include <sys/spawn_internal.h> | |
46 | #include <mach-o/dyld.h> | |
47 | ||
6d2010ae A |
48 | #include <mach/mach_time.h> |
49 | #include <mach/mach.h> | |
50 | #include <mach/task.h> | |
51 | #include <mach/semaphore.h> | |
52 | ||
3e170ce0 A |
53 | #include <pthread/qos_private.h> |
54 | ||
39037602 A |
55 | #include <sys/resource.h> |
56 | ||
5ba3f43e A |
57 | #include <stdatomic.h> |
58 | ||
0a7de745 | 59 | #include <os/tsd.h> |
c3c9b80d | 60 | #include <os/lock.h> |
f427ee49 | 61 | #include <TargetConditionals.h> |
0a7de745 | 62 | |
3e170ce0 | 63 | typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t; |
c3c9b80d | 64 | typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_TIMESHARE_NO_SMT, MY_POLICY_FIXEDPRI } my_policy_type_t; |
6d2010ae | 65 | |
3e170ce0 A |
66 | #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0) |
67 | #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0) | |
68 | #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0) | |
6d2010ae | 69 | |
0a7de745 A |
70 | #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */ |
71 | #define COMPUTATION_NANOS (10000000ll) /* 10 ms */ | |
f427ee49 A |
72 | #define LL_CONSTRAINT_NANOS ( 2000000ll) /* 2 ms */ |
73 | #define LL_COMPUTATION_NANOS ( 1000000ll) /* 1 ms */ | |
94ff46dc | 74 | #define RT_CHURN_COMP_NANOS ( 1000000ll) /* 1 ms */ |
0a7de745 | 75 | #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */ |
cb323159 | 76 | #define TRACEWORTHY_NANOS_TEST ( 2000000ll) /* 2 ms */ |
5ba3f43e | 77 | |
6d2010ae | 78 | #if DEBUG |
0a7de745 | 79 | #define debug_log(args ...) printf(args) |
6d2010ae | 80 | #else |
0a7de745 | 81 | #define debug_log(args ...) do { } while(0) |
6d2010ae A |
82 | #endif |
83 | ||
84 | /* Declarations */ | |
3e170ce0 A |
85 | static void* worker_thread(void *arg); |
86 | static void usage(); | |
87 | static int thread_setup(uint32_t my_id); | |
88 | static my_policy_type_t parse_thread_policy(const char *str); | |
89 | static void selfexec_with_apptype(int argc, char *argv[]); | |
90 | static void parse_args(int argc, char *argv[]); | |
6d2010ae | 91 | |
5ba3f43e A |
92 | static __attribute__((aligned(128))) _Atomic uint32_t g_done_threads; |
93 | static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop = FALSE; | |
94 | static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0; | |
95 | ||
6d2010ae | 96 | /* Global variables (general) */ |
3e170ce0 | 97 | static uint32_t g_numcpus; |
0a7de745 A |
98 | static uint32_t g_nphysicalcpu; |
99 | static uint32_t g_nlogicalcpu; | |
3e170ce0 A |
100 | static uint32_t g_numthreads; |
101 | static wake_type_t g_waketype; | |
102 | static policy_t g_policy; | |
103 | static uint32_t g_iterations; | |
104 | static struct mach_timebase_info g_mti; | |
105 | static semaphore_t g_main_sem; | |
106 | static uint64_t *g_thread_endtimes_abs; | |
3e170ce0 A |
107 | static boolean_t g_verbose = FALSE; |
108 | static boolean_t g_do_affinity = FALSE; | |
109 | static uint64_t g_starttime_abs; | |
110 | static uint32_t g_iteration_sleeptime_us = 0; | |
39037602 A |
111 | static uint32_t g_priority = 0; |
112 | static uint32_t g_churn_pri = 0; | |
113 | static uint32_t g_churn_count = 0; | |
94ff46dc | 114 | static uint32_t g_rt_churn_count = 0; |
39037602 A |
115 | |
116 | static pthread_t* g_churn_threads = NULL; | |
94ff46dc | 117 | static pthread_t* g_rt_churn_threads = NULL; |
3e170ce0 | 118 | |
f427ee49 A |
119 | /* should we skip test if run on non-intel */ |
120 | static boolean_t g_run_on_intel_only = FALSE; | |
121 | ||
3e170ce0 A |
122 | /* Threshold for dropping a 'bad run' tracepoint */ |
123 | static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS; | |
124 | ||
125 | /* Have we re-execed to set apptype? */ | |
126 | static boolean_t g_seen_apptype = FALSE; | |
127 | ||
128 | /* usleep in betweeen iterations */ | |
129 | static boolean_t g_do_sleep = TRUE; | |
130 | ||
131 | /* Every thread spins until all threads have checked in */ | |
132 | static boolean_t g_do_all_spin = FALSE; | |
133 | ||
39037602 A |
134 | /* Every thread backgrounds temporarily before parking */ |
135 | static boolean_t g_drop_priority = FALSE; | |
136 | ||
f427ee49 A |
137 | /* Use low-latency (sub 4ms deadline) realtime threads */ |
138 | static boolean_t g_rt_ll = FALSE; | |
139 | ||
0a7de745 A |
140 | /* Test whether realtime threads are scheduled on the separate CPUs */ |
141 | static boolean_t g_test_rt = FALSE; | |
142 | ||
94ff46dc A |
143 | static boolean_t g_rt_churn = FALSE; |
144 | ||
0a7de745 A |
145 | /* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */ |
146 | static boolean_t g_test_rt_smt = FALSE; | |
147 | ||
148 | /* Test whether realtime threads are successfully avoiding CPU 0 on Intel */ | |
149 | static boolean_t g_test_rt_avoid0 = FALSE; | |
150 | ||
cb323159 A |
151 | /* Print a histgram showing how many threads ran on each CPU */ |
152 | static boolean_t g_histogram = FALSE; | |
153 | ||
3e170ce0 A |
154 | /* One randomly chosen thread holds up the train for a certain duration. */ |
155 | static boolean_t g_do_one_long_spin = FALSE; | |
156 | static uint32_t g_one_long_spin_id = 0; | |
157 | static uint64_t g_one_long_spin_length_abs = 0; | |
158 | static uint64_t g_one_long_spin_length_ns = 0; | |
159 | ||
160 | /* Each thread spins for a certain duration after waking up before blocking again. */ | |
161 | static boolean_t g_do_each_spin = FALSE; | |
162 | static uint64_t g_each_spin_duration_abs = 0; | |
163 | static uint64_t g_each_spin_duration_ns = 0; | |
6d2010ae A |
164 | |
165 | /* Global variables (broadcast) */ | |
3e170ce0 A |
166 | static semaphore_t g_broadcastsem; |
167 | static semaphore_t g_leadersem; | |
168 | static semaphore_t g_readysem; | |
169 | static semaphore_t g_donesem; | |
94ff46dc A |
170 | static semaphore_t g_rt_churn_sem; |
171 | static semaphore_t g_rt_churn_start_sem; | |
6d2010ae A |
172 | |
173 | /* Global variables (chain) */ | |
3e170ce0 | 174 | static semaphore_t *g_semarr; |
6d2010ae | 175 | |
0a7de745 A |
176 | typedef struct { |
177 | __attribute__((aligned(128))) uint32_t current; | |
178 | uint32_t accum; | |
179 | } histogram_t; | |
180 | ||
181 | static histogram_t *g_cpu_histogram; | |
182 | static _Atomic uint64_t *g_cpu_map; | |
183 | ||
3e170ce0 | 184 | static uint64_t |
6d2010ae A |
185 | abs_to_nanos(uint64_t abstime) |
186 | { | |
187 | return (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom))); | |
188 | } | |
189 | ||
3e170ce0 | 190 | static uint64_t |
6d2010ae A |
191 | nanos_to_abs(uint64_t ns) |
192 | { | |
193 | return (uint64_t)(ns * (((double)g_mti.denom) / ((double)g_mti.numer))); | |
194 | } | |
195 | ||
39037602 A |
196 | inline static void |
197 | yield(void) | |
198 | { | |
5ba3f43e | 199 | #if defined(__arm__) || defined(__arm64__) |
0a7de745 | 200 | asm volatile ("yield"); |
5ba3f43e | 201 | #elif defined(__x86_64__) || defined(__i386__) |
0a7de745 | 202 | asm volatile ("pause"); |
39037602 A |
203 | #else |
204 | #error Unrecognized architecture | |
205 | #endif | |
206 | } | |
207 | ||
208 | static void * | |
209 | churn_thread(__unused void *arg) | |
210 | { | |
211 | uint64_t spin_count = 0; | |
212 | ||
213 | /* | |
214 | * As a safety measure to avoid wedging, we will bail on the spin if | |
215 | * it's been more than 1s after the most recent run start | |
216 | */ | |
217 | ||
218 | while (g_churn_stop == FALSE && | |
0a7de745 | 219 | mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) { |
39037602 A |
220 | spin_count++; |
221 | yield(); | |
222 | } | |
223 | ||
224 | /* This is totally racy, but only here to detect if anyone stops early */ | |
5ba3f43e | 225 | atomic_fetch_add_explicit(&g_churn_stopped_at, spin_count, memory_order_relaxed); |
39037602 A |
226 | |
227 | return NULL; | |
228 | } | |
229 | ||
230 | static void | |
231 | create_churn_threads() | |
232 | { | |
0a7de745 | 233 | if (g_churn_count == 0) { |
c3c9b80d | 234 | g_churn_count = g_test_rt_smt ? g_numcpus : g_numcpus - 1; |
0a7de745 | 235 | } |
39037602 A |
236 | |
237 | errno_t err; | |
238 | ||
239 | struct sched_param param = { .sched_priority = (int)g_churn_pri }; | |
240 | pthread_attr_t attr; | |
241 | ||
242 | /* Array for churn threads */ | |
243 | g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count); | |
244 | assert(g_churn_threads); | |
245 | ||
0a7de745 | 246 | if ((err = pthread_attr_init(&attr))) { |
39037602 | 247 | errc(EX_OSERR, err, "pthread_attr_init"); |
0a7de745 | 248 | } |
39037602 | 249 | |
0a7de745 | 250 | if ((err = pthread_attr_setschedparam(&attr, ¶m))) { |
39037602 | 251 | errc(EX_OSERR, err, "pthread_attr_setschedparam"); |
0a7de745 | 252 | } |
39037602 | 253 | |
0a7de745 | 254 | if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) { |
39037602 | 255 | errc(EX_OSERR, err, "pthread_attr_setschedpolicy"); |
0a7de745 | 256 | } |
39037602 | 257 | |
0a7de745 | 258 | for (uint32_t i = 0; i < g_churn_count; i++) { |
39037602 A |
259 | pthread_t new_thread; |
260 | ||
0a7de745 | 261 | if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL))) { |
39037602 | 262 | errc(EX_OSERR, err, "pthread_create"); |
0a7de745 | 263 | } |
39037602 A |
264 | g_churn_threads[i] = new_thread; |
265 | } | |
266 | ||
0a7de745 | 267 | if ((err = pthread_attr_destroy(&attr))) { |
39037602 | 268 | errc(EX_OSERR, err, "pthread_attr_destroy"); |
0a7de745 | 269 | } |
39037602 A |
270 | } |
271 | ||
272 | static void | |
273 | join_churn_threads(void) | |
274 | { | |
0a7de745 | 275 | if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) { |
39037602 | 276 | printf("Warning: Some of the churn threads may have stopped early: %lld\n", |
0a7de745 A |
277 | g_churn_stopped_at); |
278 | } | |
39037602 | 279 | |
5ba3f43e | 280 | atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst); |
39037602 A |
281 | |
282 | /* Rejoin churn threads */ | |
283 | for (uint32_t i = 0; i < g_churn_count; i++) { | |
284 | errno_t err = pthread_join(g_churn_threads[i], NULL); | |
0a7de745 A |
285 | if (err) { |
286 | errc(EX_OSERR, err, "pthread_join %d", i); | |
287 | } | |
39037602 A |
288 | } |
289 | } | |
290 | ||
94ff46dc A |
291 | /* |
292 | * Set policy | |
293 | */ | |
294 | static int | |
295 | rt_churn_thread_setup(void) | |
296 | { | |
297 | kern_return_t kr; | |
298 | thread_time_constraint_policy_data_t pol; | |
299 | ||
300 | /* Hard-coded realtime parameters (similar to what Digi uses) */ | |
301 | pol.period = 100000; | |
302 | pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS * 2); | |
303 | pol.computation = (uint32_t) nanos_to_abs(RT_CHURN_COMP_NANOS * 2); | |
304 | pol.preemptible = 0; /* Ignored by OS */ | |
305 | ||
306 | kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, | |
307 | (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT); | |
308 | mach_assert_zero_t(0, kr); | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
313 | static void * | |
314 | rt_churn_thread(__unused void *arg) | |
315 | { | |
316 | rt_churn_thread_setup(); | |
317 | ||
318 | for (uint32_t i = 0; i < g_iterations; i++) { | |
319 | kern_return_t kr = semaphore_wait_signal(g_rt_churn_start_sem, g_rt_churn_sem); | |
320 | mach_assert_zero_t(0, kr); | |
321 | ||
322 | volatile double x = 0.0; | |
323 | volatile double y = 0.0; | |
324 | ||
325 | uint64_t endspin = mach_absolute_time() + nanos_to_abs(RT_CHURN_COMP_NANOS); | |
326 | while (mach_absolute_time() < endspin) { | |
327 | y = y + 1.5 + x; | |
328 | x = sqrt(y); | |
329 | } | |
330 | } | |
331 | ||
332 | kern_return_t kr = semaphore_signal(g_rt_churn_sem); | |
333 | mach_assert_zero_t(0, kr); | |
334 | ||
335 | return NULL; | |
336 | } | |
337 | ||
338 | static void | |
339 | wait_for_rt_churn_threads(void) | |
340 | { | |
341 | for (uint32_t i = 0; i < g_rt_churn_count; i++) { | |
342 | kern_return_t kr = semaphore_wait(g_rt_churn_sem); | |
343 | mach_assert_zero_t(0, kr); | |
344 | } | |
345 | } | |
346 | ||
347 | static void | |
348 | start_rt_churn_threads(void) | |
349 | { | |
350 | for (uint32_t i = 0; i < g_rt_churn_count; i++) { | |
351 | kern_return_t kr = semaphore_signal(g_rt_churn_start_sem); | |
352 | mach_assert_zero_t(0, kr); | |
353 | } | |
354 | } | |
355 | ||
356 | static void | |
357 | create_rt_churn_threads(void) | |
358 | { | |
359 | if (g_rt_churn_count == 0) { | |
360 | /* Leave 1 CPU to ensure that the main thread can make progress */ | |
361 | g_rt_churn_count = g_numcpus - 1; | |
362 | } | |
363 | ||
364 | errno_t err; | |
365 | ||
366 | struct sched_param param = { .sched_priority = (int)g_churn_pri }; | |
367 | pthread_attr_t attr; | |
368 | ||
369 | /* Array for churn threads */ | |
370 | g_rt_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_rt_churn_count); | |
371 | assert(g_rt_churn_threads); | |
372 | ||
373 | if ((err = pthread_attr_init(&attr))) { | |
374 | errc(EX_OSERR, err, "pthread_attr_init"); | |
375 | } | |
376 | ||
377 | if ((err = pthread_attr_setschedparam(&attr, ¶m))) { | |
378 | errc(EX_OSERR, err, "pthread_attr_setschedparam"); | |
379 | } | |
380 | ||
381 | if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) { | |
382 | errc(EX_OSERR, err, "pthread_attr_setschedpolicy"); | |
383 | } | |
384 | ||
385 | for (uint32_t i = 0; i < g_rt_churn_count; i++) { | |
386 | pthread_t new_thread; | |
387 | ||
388 | if ((err = pthread_create(&new_thread, &attr, rt_churn_thread, NULL))) { | |
389 | errc(EX_OSERR, err, "pthread_create"); | |
390 | } | |
391 | g_rt_churn_threads[i] = new_thread; | |
392 | } | |
393 | ||
394 | if ((err = pthread_attr_destroy(&attr))) { | |
395 | errc(EX_OSERR, err, "pthread_attr_destroy"); | |
396 | } | |
397 | ||
398 | /* Wait until all threads have checked in */ | |
399 | wait_for_rt_churn_threads(); | |
400 | } | |
401 | ||
402 | static void | |
403 | join_rt_churn_threads(void) | |
404 | { | |
405 | /* Rejoin rt churn threads */ | |
406 | for (uint32_t i = 0; i < g_rt_churn_count; i++) { | |
407 | errno_t err = pthread_join(g_rt_churn_threads[i], NULL); | |
408 | if (err) { | |
409 | errc(EX_OSERR, err, "pthread_join %d", i); | |
410 | } | |
411 | } | |
412 | } | |
413 | ||
6d2010ae | 414 | /* |
0a7de745 | 415 | * Figure out what thread policy to use |
6d2010ae | 416 | */ |
3e170ce0 | 417 | static my_policy_type_t |
6d2010ae A |
418 | parse_thread_policy(const char *str) |
419 | { | |
420 | if (strcmp(str, "timeshare") == 0) { | |
421 | return MY_POLICY_TIMESHARE; | |
c3c9b80d A |
422 | } else if (strcmp(str, "timeshare_no_smt") == 0) { |
423 | return MY_POLICY_TIMESHARE_NO_SMT; | |
6d2010ae A |
424 | } else if (strcmp(str, "realtime") == 0) { |
425 | return MY_POLICY_REALTIME; | |
426 | } else if (strcmp(str, "fixed") == 0) { | |
427 | return MY_POLICY_FIXEDPRI; | |
428 | } else { | |
3e170ce0 | 429 | errx(EX_USAGE, "Invalid thread policy \"%s\"", str); |
6d2010ae A |
430 | } |
431 | } | |
432 | ||
433 | /* | |
434 | * Figure out what wakeup pattern to use | |
435 | */ | |
3e170ce0 | 436 | static wake_type_t |
0a7de745 | 437 | parse_wakeup_pattern(const char *str) |
6d2010ae A |
438 | { |
439 | if (strcmp(str, "chain") == 0) { | |
440 | return WAKE_CHAIN; | |
3e170ce0 A |
441 | } else if (strcmp(str, "hop") == 0) { |
442 | return WAKE_HOP; | |
6d2010ae A |
443 | } else if (strcmp(str, "broadcast-single-sem") == 0) { |
444 | return WAKE_BROADCAST_ONESEM; | |
445 | } else if (strcmp(str, "broadcast-per-thread") == 0) { | |
446 | return WAKE_BROADCAST_PERTHREAD; | |
447 | } else { | |
3e170ce0 | 448 | errx(EX_USAGE, "Invalid wakeup pattern \"%s\"", str); |
6d2010ae A |
449 | } |
450 | } | |
451 | ||
452 | /* | |
453 | * Set policy | |
454 | */ | |
3e170ce0 A |
455 | static int |
456 | thread_setup(uint32_t my_id) | |
6d2010ae | 457 | { |
3e170ce0 A |
458 | kern_return_t kr; |
459 | errno_t ret; | |
460 | thread_time_constraint_policy_data_t pol; | |
6d2010ae | 461 | |
39037602 A |
462 | if (g_priority) { |
463 | int policy = SCHED_OTHER; | |
0a7de745 | 464 | if (g_policy == MY_POLICY_FIXEDPRI) { |
39037602 | 465 | policy = SCHED_RR; |
0a7de745 | 466 | } |
39037602 A |
467 | |
468 | struct sched_param param = {.sched_priority = (int)g_priority}; | |
0a7de745 | 469 | if ((ret = pthread_setschedparam(pthread_self(), policy, ¶m))) { |
39037602 | 470 | errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id); |
0a7de745 | 471 | } |
39037602 A |
472 | } |
473 | ||
6d2010ae | 474 | switch (g_policy) { |
0a7de745 A |
475 | case MY_POLICY_TIMESHARE: |
476 | break; | |
c3c9b80d A |
477 | case MY_POLICY_TIMESHARE_NO_SMT: |
478 | proc_setthread_no_smt(); | |
479 | break; | |
0a7de745 A |
480 | case MY_POLICY_REALTIME: |
481 | /* Hard-coded realtime parameters (similar to what Digi uses) */ | |
482 | pol.period = 100000; | |
f427ee49 A |
483 | if (g_rt_ll) { |
484 | pol.constraint = (uint32_t) nanos_to_abs(LL_CONSTRAINT_NANOS); | |
485 | pol.computation = (uint32_t) nanos_to_abs(LL_COMPUTATION_NANOS); | |
486 | } else { | |
487 | pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS); | |
488 | pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS); | |
489 | } | |
0a7de745 A |
490 | pol.preemptible = 0; /* Ignored by OS */ |
491 | ||
492 | kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, | |
493 | (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT); | |
494 | mach_assert_zero_t(my_id, kr); | |
495 | break; | |
496 | case MY_POLICY_FIXEDPRI: | |
497 | ret = pthread_set_fixedpriority_self(); | |
498 | if (ret) { | |
499 | errc(EX_OSERR, ret, "pthread_set_fixedpriority_self"); | |
500 | } | |
501 | break; | |
502 | default: | |
503 | errx(EX_USAGE, "invalid policy type %d", g_policy); | |
6d2010ae A |
504 | } |
505 | ||
39236c6e A |
506 | if (g_do_affinity) { |
507 | thread_affinity_policy_data_t affinity; | |
508 | ||
509 | affinity.affinity_tag = my_id % 2; | |
510 | ||
3e170ce0 | 511 | kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY, |
0a7de745 | 512 | (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT); |
3e170ce0 | 513 | mach_assert_zero_t(my_id, kr); |
39236c6e A |
514 | } |
515 | ||
6d2010ae | 516 | return 0; |
6d2010ae A |
517 | } |
518 | ||
c3c9b80d A |
519 | time_value_t |
520 | get_thread_runtime(void) | |
521 | { | |
522 | thread_basic_info_data_t info; | |
523 | mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT; | |
524 | thread_info(pthread_mach_thread_np(pthread_self()), THREAD_BASIC_INFO, (thread_info_t)&info, &info_count); | |
525 | ||
526 | time_value_add(&info.user_time, &info.system_time); | |
527 | ||
528 | return info.user_time; | |
529 | } | |
530 | ||
531 | time_value_t worker_threads_total_runtime = {}; | |
532 | ||
6d2010ae | 533 | /* |
3e170ce0 A |
534 | * Wait for a wakeup, potentially wake up another of the "0-N" threads, |
535 | * and notify the main thread when done. | |
6d2010ae | 536 | */ |
3e170ce0 A |
537 | static void* |
538 | worker_thread(void *arg) | |
6d2010ae | 539 | { |
c3c9b80d A |
540 | static os_unfair_lock runtime_lock = OS_UNFAIR_LOCK_INIT; |
541 | ||
3e170ce0 A |
542 | uint32_t my_id = (uint32_t)(uintptr_t)arg; |
543 | kern_return_t kr; | |
6d2010ae | 544 | |
3e170ce0 A |
545 | volatile double x = 0.0; |
546 | volatile double y = 0.0; | |
6d2010ae | 547 | |
3e170ce0 A |
548 | /* Set policy and so forth */ |
549 | thread_setup(my_id); | |
6d2010ae | 550 | |
3e170ce0 A |
551 | for (uint32_t i = 0; i < g_iterations; i++) { |
552 | if (my_id == 0) { | |
553 | /* | |
554 | * Leader thread either wakes everyone up or starts the chain going. | |
555 | */ | |
6d2010ae | 556 | |
3e170ce0 | 557 | /* Give the worker threads undisturbed time to finish before waiting on them */ |
0a7de745 | 558 | if (g_do_sleep) { |
3e170ce0 | 559 | usleep(g_iteration_sleeptime_us); |
0a7de745 | 560 | } |
6d2010ae | 561 | |
3e170ce0 | 562 | debug_log("%d Leader thread wait for ready\n", i); |
6d2010ae | 563 | |
3e170ce0 A |
564 | /* |
565 | * Wait for everyone else to declare ready | |
566 | * Is there a better way to do this that won't interfere with the rest of the chain? | |
567 | * TODO: Invent 'semaphore wait for N signals' | |
568 | */ | |
6d2010ae | 569 | |
0a7de745 | 570 | for (uint32_t j = 0; j < g_numthreads - 1; j++) { |
3e170ce0 A |
571 | kr = semaphore_wait(g_readysem); |
572 | mach_assert_zero_t(my_id, kr); | |
573 | } | |
6d2010ae | 574 | |
3e170ce0 A |
575 | debug_log("%d Leader thread wait\n", i); |
576 | ||
0a7de745 A |
577 | if (i > 0) { |
578 | for (int cpuid = 0; cpuid < g_numcpus; cpuid++) { | |
579 | if (g_cpu_histogram[cpuid].current == 1) { | |
580 | atomic_fetch_or_explicit(&g_cpu_map[i - 1], (1UL << cpuid), memory_order_relaxed); | |
581 | g_cpu_histogram[cpuid].current = 0; | |
582 | } | |
583 | } | |
584 | } | |
585 | ||
3e170ce0 A |
586 | /* Signal main thread and wait for start of iteration */ |
587 | ||
588 | kr = semaphore_wait_signal(g_leadersem, g_main_sem); | |
589 | mach_assert_zero_t(my_id, kr); | |
6d2010ae | 590 | |
6d2010ae A |
591 | g_thread_endtimes_abs[my_id] = mach_absolute_time(); |
592 | ||
3e170ce0 A |
593 | debug_log("%d Leader thread go\n", i); |
594 | ||
5ba3f43e | 595 | assert_zero_t(my_id, atomic_load_explicit(&g_done_threads, memory_order_relaxed)); |
6d2010ae A |
596 | |
597 | switch (g_waketype) { | |
3e170ce0 A |
598 | case WAKE_BROADCAST_ONESEM: |
599 | kr = semaphore_signal_all(g_broadcastsem); | |
600 | mach_assert_zero_t(my_id, kr); | |
6d2010ae A |
601 | break; |
602 | case WAKE_BROADCAST_PERTHREAD: | |
3e170ce0 A |
603 | for (uint32_t j = 1; j < g_numthreads; j++) { |
604 | kr = semaphore_signal(g_semarr[j]); | |
605 | mach_assert_zero_t(my_id, kr); | |
6d2010ae A |
606 | } |
607 | break; | |
3e170ce0 A |
608 | case WAKE_CHAIN: |
609 | kr = semaphore_signal(g_semarr[my_id + 1]); | |
610 | mach_assert_zero_t(my_id, kr); | |
611 | break; | |
612 | case WAKE_HOP: | |
613 | kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]); | |
614 | mach_assert_zero_t(my_id, kr); | |
615 | break; | |
6d2010ae A |
616 | } |
617 | } else { | |
618 | /* | |
619 | * Everyone else waits to be woken up, | |
3e170ce0 | 620 | * records when she wakes up, and possibly |
6d2010ae A |
621 | * wakes up a friend. |
622 | */ | |
0a7de745 | 623 | switch (g_waketype) { |
6d2010ae | 624 | case WAKE_BROADCAST_ONESEM: |
3e170ce0 A |
625 | kr = semaphore_wait_signal(g_broadcastsem, g_readysem); |
626 | mach_assert_zero_t(my_id, kr); | |
6d2010ae A |
627 | |
628 | g_thread_endtimes_abs[my_id] = mach_absolute_time(); | |
6d2010ae | 629 | break; |
3e170ce0 | 630 | |
6d2010ae | 631 | case WAKE_BROADCAST_PERTHREAD: |
3e170ce0 A |
632 | kr = semaphore_wait_signal(g_semarr[my_id], g_readysem); |
633 | mach_assert_zero_t(my_id, kr); | |
6d2010ae A |
634 | |
635 | g_thread_endtimes_abs[my_id] = mach_absolute_time(); | |
636 | break; | |
637 | ||
638 | case WAKE_CHAIN: | |
3e170ce0 A |
639 | kr = semaphore_wait_signal(g_semarr[my_id], g_readysem); |
640 | mach_assert_zero_t(my_id, kr); | |
641 | ||
642 | /* Signal the next thread *after* recording wake time */ | |
6d2010ae A |
643 | |
644 | g_thread_endtimes_abs[my_id] = mach_absolute_time(); | |
645 | ||
646 | if (my_id < (g_numthreads - 1)) { | |
3e170ce0 A |
647 | kr = semaphore_signal(g_semarr[my_id + 1]); |
648 | mach_assert_zero_t(my_id, kr); | |
649 | } | |
650 | ||
651 | break; | |
652 | ||
653 | case WAKE_HOP: | |
654 | kr = semaphore_wait_signal(g_semarr[my_id], g_readysem); | |
655 | mach_assert_zero_t(my_id, kr); | |
656 | ||
657 | /* Signal the next thread *after* recording wake time */ | |
658 | ||
659 | g_thread_endtimes_abs[my_id] = mach_absolute_time(); | |
660 | ||
661 | if (my_id < (g_numthreads - 1)) { | |
662 | kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]); | |
663 | mach_assert_zero_t(my_id, kr); | |
664 | } else { | |
665 | kr = semaphore_signal_all(g_donesem); | |
666 | mach_assert_zero_t(my_id, kr); | |
6d2010ae A |
667 | } |
668 | ||
669 | break; | |
6d2010ae A |
670 | } |
671 | } | |
672 | ||
0a7de745 A |
673 | unsigned int cpuid = _os_cpu_number(); |
674 | assert(cpuid < g_numcpus); | |
675 | debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid, i); | |
676 | g_cpu_histogram[cpuid].current = 1; | |
677 | g_cpu_histogram[cpuid].accum++; | |
3e170ce0 A |
678 | |
679 | if (g_do_one_long_spin && g_one_long_spin_id == my_id) { | |
680 | /* One randomly chosen thread holds up the train for a while. */ | |
681 | ||
682 | uint64_t endspin = g_starttime_abs + g_one_long_spin_length_abs; | |
683 | while (mach_absolute_time() < endspin) { | |
684 | y = y + 1.5 + x; | |
685 | x = sqrt(y); | |
686 | } | |
687 | } | |
688 | ||
689 | if (g_do_each_spin) { | |
690 | /* Each thread spins for a certain duration after waking up before blocking again. */ | |
691 | ||
692 | uint64_t endspin = mach_absolute_time() + g_each_spin_duration_abs; | |
693 | while (mach_absolute_time() < endspin) { | |
694 | y = y + 1.5 + x; | |
695 | x = sqrt(y); | |
696 | } | |
697 | } | |
698 | ||
5ba3f43e A |
699 | uint32_t done_threads; |
700 | done_threads = atomic_fetch_add_explicit(&g_done_threads, 1, memory_order_relaxed) + 1; | |
3e170ce0 | 701 | |
5ba3f43e | 702 | debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads, i); |
3e170ce0 | 703 | |
39037602 A |
704 | if (g_drop_priority) { |
705 | /* Drop priority to BG momentarily */ | |
706 | errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG); | |
0a7de745 A |
707 | if (ret) { |
708 | errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG"); | |
709 | } | |
39037602 A |
710 | } |
711 | ||
3e170ce0 A |
712 | if (g_do_all_spin) { |
713 | /* Everyone spins until the last thread checks in. */ | |
714 | ||
5ba3f43e | 715 | while (atomic_load_explicit(&g_done_threads, memory_order_relaxed) < g_numthreads) { |
3e170ce0 A |
716 | y = y + 1.5 + x; |
717 | x = sqrt(y); | |
718 | } | |
719 | } | |
720 | ||
39037602 A |
721 | if (g_drop_priority) { |
722 | /* Restore normal priority */ | |
723 | errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0); | |
0a7de745 A |
724 | if (ret) { |
725 | errc(EX_OSERR, ret, "setpriority 0"); | |
726 | } | |
39037602 A |
727 | } |
728 | ||
3e170ce0 | 729 | debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i); |
6d2010ae A |
730 | } |
731 | ||
3e170ce0 A |
732 | if (my_id == 0) { |
733 | /* Give the worker threads undisturbed time to finish before waiting on them */ | |
0a7de745 | 734 | if (g_do_sleep) { |
3e170ce0 | 735 | usleep(g_iteration_sleeptime_us); |
0a7de745 | 736 | } |
6d2010ae | 737 | |
3e170ce0 | 738 | /* Wait for the worker threads to finish */ |
0a7de745 | 739 | for (uint32_t i = 0; i < g_numthreads - 1; i++) { |
3e170ce0 A |
740 | kr = semaphore_wait(g_readysem); |
741 | mach_assert_zero_t(my_id, kr); | |
742 | } | |
743 | ||
744 | /* Tell everyone and the main thread that the last iteration is done */ | |
0a7de745 A |
745 | debug_log("%d Leader thread done\n", g_iterations - 1); |
746 | ||
747 | for (int cpuid = 0; cpuid < g_numcpus; cpuid++) { | |
748 | if (g_cpu_histogram[cpuid].current == 1) { | |
749 | atomic_fetch_or_explicit(&g_cpu_map[g_iterations - 1], (1UL << cpuid), memory_order_relaxed); | |
750 | g_cpu_histogram[cpuid].current = 0; | |
751 | } | |
752 | } | |
3e170ce0 A |
753 | |
754 | kr = semaphore_signal_all(g_main_sem); | |
755 | mach_assert_zero_t(my_id, kr); | |
756 | } else { | |
757 | /* Hold up thread teardown so it doesn't affect the last iteration */ | |
758 | kr = semaphore_wait_signal(g_main_sem, g_readysem); | |
759 | mach_assert_zero_t(my_id, kr); | |
760 | } | |
761 | ||
c3c9b80d A |
762 | time_value_t runtime = get_thread_runtime(); |
763 | os_unfair_lock_lock(&runtime_lock); | |
764 | time_value_add(&worker_threads_total_runtime, &runtime); | |
765 | os_unfair_lock_unlock(&runtime_lock); | |
766 | ||
3e170ce0 | 767 | return 0; |
6d2010ae A |
768 | } |
769 | ||
770 | /* | |
771 | * Given an array of uint64_t values, compute average, max, min, and standard deviation | |
772 | */ | |
3e170ce0 | 773 | static void |
6d2010ae A |
774 | compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t *minp, float *stddevp) |
775 | { | |
3e170ce0 | 776 | uint32_t i; |
6d2010ae A |
777 | uint64_t _sum = 0; |
778 | uint64_t _max = 0; | |
779 | uint64_t _min = UINT64_MAX; | |
0a7de745 A |
780 | float _avg = 0; |
781 | float _dev = 0; | |
6d2010ae A |
782 | |
783 | for (i = 0; i < count; i++) { | |
784 | _sum += values[i]; | |
785 | _max = values[i] > _max ? values[i] : _max; | |
786 | _min = values[i] < _min ? values[i] : _min; | |
787 | } | |
788 | ||
789 | _avg = ((float)_sum) / ((float)count); | |
0a7de745 | 790 | |
6d2010ae A |
791 | _dev = 0; |
792 | for (i = 0; i < count; i++) { | |
793 | _dev += powf((((float)values[i]) - _avg), 2); | |
794 | } | |
0a7de745 | 795 | |
6d2010ae A |
796 | _dev /= count; |
797 | _dev = sqrtf(_dev); | |
798 | ||
799 | *averagep = _avg; | |
800 | *maxp = _max; | |
801 | *minp = _min; | |
802 | *stddevp = _dev; | |
803 | } | |
804 | ||
c3c9b80d A |
805 | typedef struct { |
806 | natural_t sys; | |
807 | natural_t user; | |
808 | natural_t idle; | |
809 | } cpu_time_t; | |
810 | ||
811 | void | |
812 | record_cpu_time(cpu_time_t *cpu_time) | |
813 | { | |
814 | host_cpu_load_info_data_t load; | |
815 | mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT; | |
816 | kern_return_t kr = host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO, (int *)&load, &count); | |
817 | mach_assert_zero_t(0, kr); | |
818 | ||
819 | natural_t total_system_time = load.cpu_ticks[CPU_STATE_SYSTEM]; | |
820 | natural_t total_user_time = load.cpu_ticks[CPU_STATE_USER] + load.cpu_ticks[CPU_STATE_NICE]; | |
821 | natural_t total_idle_time = load.cpu_ticks[CPU_STATE_IDLE]; | |
822 | ||
823 | cpu_time->sys = total_system_time; | |
824 | cpu_time->user = total_user_time; | |
825 | cpu_time->idle = total_idle_time; | |
826 | } | |
827 | ||
6d2010ae A |
828 | int |
829 | main(int argc, char **argv) | |
830 | { | |
3e170ce0 A |
831 | errno_t ret; |
832 | kern_return_t kr; | |
833 | ||
0a7de745 A |
834 | pthread_t *threads; |
835 | uint64_t *worst_latencies_ns; | |
836 | uint64_t *worst_latencies_from_first_ns; | |
837 | uint64_t max, min; | |
838 | float avg, stddev; | |
6d2010ae | 839 | |
0a7de745 | 840 | bool test_fail = false; |
c3c9b80d | 841 | bool test_warn = false; |
0a7de745 A |
842 | |
843 | for (int i = 0; i < argc; i++) { | |
844 | if (strcmp(argv[i], "--switched_apptype") == 0) { | |
3e170ce0 | 845 | g_seen_apptype = TRUE; |
0a7de745 A |
846 | } |
847 | } | |
6d2010ae | 848 | |
0a7de745 | 849 | if (!g_seen_apptype) { |
3e170ce0 | 850 | selfexec_with_apptype(argc, argv); |
0a7de745 | 851 | } |
6d2010ae | 852 | |
3e170ce0 | 853 | parse_args(argc, argv); |
6d2010ae | 854 | |
3e170ce0 | 855 | srand((unsigned int)time(NULL)); |
6d2010ae | 856 | |
3e170ce0 | 857 | mach_timebase_info(&g_mti); |
6d2010ae | 858 | |
f427ee49 A |
859 | #if TARGET_OS_OSX |
860 | /* SKIP test if running on arm platform */ | |
861 | if (g_run_on_intel_only) { | |
862 | int is_arm = 0; | |
863 | size_t is_arm_size = sizeof(is_arm); | |
864 | ret = sysctlbyname("hw.optional.arm64", &is_arm, &is_arm_size, NULL, 0); | |
865 | if (ret == 0 && is_arm) { | |
866 | printf("Unsupported platform. Skipping test.\n"); | |
867 | exit(0); | |
868 | } | |
869 | } | |
870 | #endif /* TARGET_OS_OSX */ | |
871 | ||
3e170ce0 A |
872 | size_t ncpu_size = sizeof(g_numcpus); |
873 | ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0); | |
0a7de745 A |
874 | if (ret) { |
875 | err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)"); | |
876 | } | |
877 | assert(g_numcpus <= 64); /* g_cpu_map needs to be extended for > 64 cpus */ | |
878 | ||
879 | size_t physicalcpu_size = sizeof(g_nphysicalcpu); | |
880 | ret = sysctlbyname("hw.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0); | |
881 | if (ret) { | |
882 | err(EX_OSERR, "Failed sysctlbyname(hw.physicalcpu)"); | |
883 | } | |
6d2010ae | 884 | |
0a7de745 A |
885 | size_t logicalcpu_size = sizeof(g_nlogicalcpu); |
886 | ret = sysctlbyname("hw.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0); | |
887 | if (ret) { | |
888 | err(EX_OSERR, "Failed sysctlbyname(hw.logicalcpu)"); | |
889 | } | |
890 | ||
891 | if (g_test_rt) { | |
892 | if (g_numthreads == 0) { | |
893 | g_numthreads = g_numcpus; | |
894 | } | |
895 | g_policy = MY_POLICY_REALTIME; | |
896 | g_do_all_spin = TRUE; | |
cb323159 A |
897 | g_histogram = true; |
898 | /* Don't change g_traceworthy_latency_ns if it's explicity been set to something other than the default */ | |
899 | if (g_traceworthy_latency_ns == TRACEWORTHY_NANOS) { | |
900 | g_traceworthy_latency_ns = TRACEWORTHY_NANOS_TEST; | |
901 | } | |
0a7de745 A |
902 | } else if (g_test_rt_smt) { |
903 | if (g_nlogicalcpu != 2 * g_nphysicalcpu) { | |
904 | /* Not SMT */ | |
905 | printf("Attempt to run --test-rt-smt on a non-SMT device\n"); | |
906 | exit(0); | |
907 | } | |
908 | ||
909 | if (g_numthreads == 0) { | |
910 | g_numthreads = g_nphysicalcpu; | |
911 | } | |
912 | g_policy = MY_POLICY_REALTIME; | |
913 | g_do_all_spin = TRUE; | |
cb323159 | 914 | g_histogram = true; |
0a7de745 A |
915 | } else if (g_test_rt_avoid0) { |
916 | #if defined(__x86_64__) || defined(__i386__) | |
917 | if (g_numthreads == 0) { | |
918 | g_numthreads = g_nphysicalcpu - 1; | |
919 | } | |
920 | if (g_numthreads == 0) { | |
921 | printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n"); | |
922 | exit(0); | |
923 | } | |
924 | g_policy = MY_POLICY_REALTIME; | |
925 | g_do_all_spin = TRUE; | |
cb323159 | 926 | g_histogram = true; |
0a7de745 A |
927 | #else |
928 | printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n"); | |
929 | exit(0); | |
930 | #endif | |
931 | } else if (g_numthreads == 0) { | |
932 | g_numthreads = g_numcpus; | |
933 | } | |
934 | ||
935 | if (g_do_each_spin) { | |
3e170ce0 | 936 | g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns); |
0a7de745 | 937 | } |
3e170ce0 A |
938 | |
939 | /* Configure the long-spin thread to take up half of its computation */ | |
940 | if (g_do_one_long_spin) { | |
941 | g_one_long_spin_length_ns = COMPUTATION_NANOS / 2; | |
942 | g_one_long_spin_length_abs = nanos_to_abs(g_one_long_spin_length_ns); | |
fe8ab488 A |
943 | } |
944 | ||
3e170ce0 A |
945 | /* Estimate the amount of time the cleanup phase needs to back off */ |
946 | g_iteration_sleeptime_us = g_numthreads * 20; | |
6d2010ae | 947 | |
3e170ce0 | 948 | uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1; |
0a7de745 | 949 | if (g_do_each_spin) { |
3e170ce0 | 950 | g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC); |
0a7de745 A |
951 | } |
952 | if (g_do_one_long_spin) { | |
3e170ce0 | 953 | g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC; |
0a7de745 | 954 | } |
6d2010ae A |
955 | |
956 | /* Arrays for threads and their wakeup times */ | |
3e170ce0 A |
957 | threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads); |
958 | assert(threads); | |
959 | ||
960 | size_t endtimes_size = sizeof(uint64_t) * g_numthreads; | |
961 | ||
962 | g_thread_endtimes_abs = (uint64_t*) valloc(endtimes_size); | |
963 | assert(g_thread_endtimes_abs); | |
964 | ||
965 | /* Ensure the allocation is pre-faulted */ | |
966 | ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size); | |
0a7de745 A |
967 | if (ret) { |
968 | errc(EX_OSERR, ret, "memset_s endtimes"); | |
969 | } | |
3e170ce0 A |
970 | |
971 | size_t latencies_size = sizeof(uint64_t) * g_iterations; | |
6d2010ae | 972 | |
3e170ce0 A |
973 | worst_latencies_ns = (uint64_t*) valloc(latencies_size); |
974 | assert(worst_latencies_ns); | |
6d2010ae | 975 | |
3e170ce0 A |
976 | /* Ensure the allocation is pre-faulted */ |
977 | ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size); | |
0a7de745 A |
978 | if (ret) { |
979 | errc(EX_OSERR, ret, "memset_s latencies"); | |
980 | } | |
6d2010ae | 981 | |
3e170ce0 A |
982 | worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size); |
983 | assert(worst_latencies_from_first_ns); | |
984 | ||
985 | /* Ensure the allocation is pre-faulted */ | |
986 | ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size); | |
0a7de745 A |
987 | if (ret) { |
988 | errc(EX_OSERR, ret, "memset_s latencies_from_first"); | |
989 | } | |
990 | ||
991 | size_t histogram_size = sizeof(histogram_t) * g_numcpus; | |
992 | g_cpu_histogram = (histogram_t *)valloc(histogram_size); | |
993 | assert(g_cpu_histogram); | |
994 | /* Ensure the allocation is pre-faulted */ | |
995 | ret = memset_s(g_cpu_histogram, histogram_size, 0, histogram_size); | |
996 | if (ret) { | |
997 | errc(EX_OSERR, ret, "memset_s g_cpu_histogram"); | |
998 | } | |
999 | ||
1000 | size_t map_size = sizeof(uint64_t) * g_iterations; | |
1001 | g_cpu_map = (_Atomic uint64_t *)valloc(map_size); | |
1002 | assert(g_cpu_map); | |
1003 | /* Ensure the allocation is pre-faulted */ | |
1004 | ret = memset_s(g_cpu_map, map_size, 0, map_size); | |
1005 | if (ret) { | |
1006 | errc(EX_OSERR, ret, "memset_s g_cpu_map"); | |
1007 | } | |
3e170ce0 A |
1008 | |
1009 | kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0); | |
1010 | mach_assert_zero(kr); | |
6d2010ae A |
1011 | |
1012 | /* Either one big semaphore or one per thread */ | |
3e170ce0 A |
1013 | if (g_waketype == WAKE_CHAIN || |
1014 | g_waketype == WAKE_BROADCAST_PERTHREAD || | |
1015 | g_waketype == WAKE_HOP) { | |
3e170ce0 A |
1016 | g_semarr = valloc(sizeof(semaphore_t) * g_numthreads); |
1017 | assert(g_semarr); | |
6d2010ae | 1018 | |
3e170ce0 A |
1019 | for (uint32_t i = 0; i < g_numthreads; i++) { |
1020 | kr = semaphore_create(mach_task_self(), &g_semarr[i], SYNC_POLICY_FIFO, 0); | |
1021 | mach_assert_zero(kr); | |
6d2010ae | 1022 | } |
3e170ce0 | 1023 | |
6d2010ae A |
1024 | g_leadersem = g_semarr[0]; |
1025 | } else { | |
3e170ce0 A |
1026 | kr = semaphore_create(mach_task_self(), &g_broadcastsem, SYNC_POLICY_FIFO, 0); |
1027 | mach_assert_zero(kr); | |
1028 | kr = semaphore_create(mach_task_self(), &g_leadersem, SYNC_POLICY_FIFO, 0); | |
1029 | mach_assert_zero(kr); | |
6d2010ae A |
1030 | } |
1031 | ||
3e170ce0 A |
1032 | if (g_waketype == WAKE_HOP) { |
1033 | kr = semaphore_create(mach_task_self(), &g_donesem, SYNC_POLICY_FIFO, 0); | |
1034 | mach_assert_zero(kr); | |
1035 | } | |
1036 | ||
1037 | kr = semaphore_create(mach_task_self(), &g_readysem, SYNC_POLICY_FIFO, 0); | |
1038 | mach_assert_zero(kr); | |
1039 | ||
94ff46dc A |
1040 | kr = semaphore_create(mach_task_self(), &g_rt_churn_sem, SYNC_POLICY_FIFO, 0); |
1041 | mach_assert_zero(kr); | |
1042 | ||
1043 | kr = semaphore_create(mach_task_self(), &g_rt_churn_start_sem, SYNC_POLICY_FIFO, 0); | |
1044 | mach_assert_zero(kr); | |
1045 | ||
5ba3f43e A |
1046 | atomic_store_explicit(&g_done_threads, 0, memory_order_relaxed); |
1047 | ||
6d2010ae | 1048 | /* Create the threads */ |
3e170ce0 A |
1049 | for (uint32_t i = 0; i < g_numthreads; i++) { |
1050 | ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i); | |
0a7de745 A |
1051 | if (ret) { |
1052 | errc(EX_OSERR, ret, "pthread_create %d", i); | |
1053 | } | |
6d2010ae A |
1054 | } |
1055 | ||
3e170ce0 | 1056 | ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL); |
0a7de745 A |
1057 | if (ret) { |
1058 | errc(EX_OSERR, ret, "setpriority"); | |
1059 | } | |
fe8ab488 | 1060 | |
3e170ce0 | 1061 | thread_setup(0); |
fe8ab488 | 1062 | |
39037602 A |
1063 | g_starttime_abs = mach_absolute_time(); |
1064 | ||
0a7de745 | 1065 | if (g_churn_pri) { |
39037602 | 1066 | create_churn_threads(); |
0a7de745 | 1067 | } |
94ff46dc A |
1068 | if (g_rt_churn) { |
1069 | create_rt_churn_threads(); | |
1070 | } | |
39037602 | 1071 | |
6d2010ae | 1072 | /* Let everyone get settled */ |
3e170ce0 A |
1073 | kr = semaphore_wait(g_main_sem); |
1074 | mach_assert_zero(kr); | |
1075 | ||
1076 | /* Give the system a bit more time to settle */ | |
0a7de745 | 1077 | if (g_do_sleep) { |
3e170ce0 | 1078 | usleep(g_iteration_sleeptime_us); |
0a7de745 | 1079 | } |
6d2010ae | 1080 | |
c3c9b80d A |
1081 | cpu_time_t start_time; |
1082 | cpu_time_t finish_time; | |
1083 | ||
1084 | record_cpu_time(&start_time); | |
1085 | ||
6d2010ae | 1086 | /* Go! */ |
3e170ce0 A |
1087 | for (uint32_t i = 0; i < g_iterations; i++) { |
1088 | uint32_t j; | |
6d2010ae A |
1089 | uint64_t worst_abs = 0, best_abs = UINT64_MAX; |
1090 | ||
0a7de745 | 1091 | if (g_do_one_long_spin) { |
3e170ce0 | 1092 | g_one_long_spin_id = (uint32_t)rand() % g_numthreads; |
0a7de745 | 1093 | } |
3e170ce0 | 1094 | |
94ff46dc A |
1095 | if (g_rt_churn) { |
1096 | start_rt_churn_threads(); | |
1097 | usleep(100); | |
1098 | } | |
1099 | ||
3e170ce0 A |
1100 | debug_log("%d Main thread reset\n", i); |
1101 | ||
5ba3f43e | 1102 | atomic_store_explicit(&g_done_threads, 0, memory_order_seq_cst); |
6d2010ae A |
1103 | |
1104 | g_starttime_abs = mach_absolute_time(); | |
1105 | ||
3e170ce0 A |
1106 | /* Fire them off and wait for worker threads to finish */ |
1107 | kr = semaphore_wait_signal(g_main_sem, g_leadersem); | |
1108 | mach_assert_zero(kr); | |
6d2010ae | 1109 | |
3e170ce0 | 1110 | debug_log("%d Main thread return\n", i); |
6d2010ae | 1111 | |
5ba3f43e A |
1112 | assert(atomic_load_explicit(&g_done_threads, memory_order_relaxed) == g_numthreads); |
1113 | ||
94ff46dc A |
1114 | if (g_rt_churn) { |
1115 | wait_for_rt_churn_threads(); | |
1116 | } | |
1117 | ||
3e170ce0 | 1118 | /* |
6d2010ae A |
1119 | * We report the worst latencies relative to start time |
1120 | * and relative to the lead worker thread. | |
1121 | */ | |
1122 | for (j = 0; j < g_numthreads; j++) { | |
1123 | uint64_t latency_abs; | |
3e170ce0 | 1124 | |
6d2010ae A |
1125 | latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs; |
1126 | worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs; | |
1127 | } | |
0a7de745 | 1128 | |
6d2010ae A |
1129 | worst_latencies_ns[i] = abs_to_nanos(worst_abs); |
1130 | ||
1131 | worst_abs = 0; | |
1132 | for (j = 1; j < g_numthreads; j++) { | |
1133 | uint64_t latency_abs; | |
0a7de745 | 1134 | |
6d2010ae A |
1135 | latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0]; |
1136 | worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs; | |
1137 | best_abs = best_abs > latency_abs ? latency_abs : best_abs; | |
1138 | } | |
1139 | ||
1140 | worst_latencies_from_first_ns[i] = abs_to_nanos(worst_abs); | |
1141 | ||
1142 | /* | |
1143 | * In the event of a bad run, cut a trace point. | |
1144 | */ | |
3e170ce0 A |
1145 | if (worst_latencies_from_first_ns[i] > g_traceworthy_latency_ns) { |
1146 | /* Ariadne's ad-hoc test signpost */ | |
1147 | kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns[i], g_traceworthy_latency_ns, 0, 0); | |
6d2010ae | 1148 | |
0a7de745 | 1149 | if (g_verbose) { |
6d2010ae | 1150 | printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0); |
0a7de745 | 1151 | } |
6d2010ae A |
1152 | } |
1153 | ||
3e170ce0 | 1154 | /* Give the system a bit more time to settle */ |
0a7de745 | 1155 | if (g_do_sleep) { |
3e170ce0 | 1156 | usleep(g_iteration_sleeptime_us); |
0a7de745 | 1157 | } |
6d2010ae A |
1158 | } |
1159 | ||
c3c9b80d A |
1160 | record_cpu_time(&finish_time); |
1161 | ||
6d2010ae | 1162 | /* Rejoin threads */ |
3e170ce0 A |
1163 | for (uint32_t i = 0; i < g_numthreads; i++) { |
1164 | ret = pthread_join(threads[i], NULL); | |
0a7de745 A |
1165 | if (ret) { |
1166 | errc(EX_OSERR, ret, "pthread_join %d", i); | |
1167 | } | |
6d2010ae A |
1168 | } |
1169 | ||
94ff46dc A |
1170 | if (g_rt_churn) { |
1171 | join_rt_churn_threads(); | |
1172 | } | |
1173 | ||
0a7de745 | 1174 | if (g_churn_pri) { |
39037602 | 1175 | join_churn_threads(); |
0a7de745 | 1176 | } |
39037602 | 1177 | |
c3c9b80d A |
1178 | uint32_t cpu_idle_time = (finish_time.idle - start_time.idle) * 10; |
1179 | uint32_t worker_threads_runtime = worker_threads_total_runtime.seconds * 1000 + worker_threads_total_runtime.microseconds / 1000; | |
1180 | ||
6d2010ae A |
1181 | compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev); |
1182 | printf("Results (from a stop):\n"); | |
1183 | printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0); | |
1184 | printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0); | |
1185 | printf("Avg:\t\t%.2f us\n", avg / 1000.0); | |
1186 | printf("Stddev:\t\t%.2f us\n", stddev / 1000.0); | |
1187 | ||
1188 | putchar('\n'); | |
1189 | ||
1190 | compute_stats(worst_latencies_from_first_ns, g_iterations, &avg, &max, &min, &stddev); | |
1191 | printf("Results (relative to first thread):\n"); | |
1192 | printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0); | |
1193 | printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0); | |
1194 | printf("Avg:\t\t%.2f us\n", avg / 1000.0); | |
1195 | printf("Stddev:\t\t%.2f us\n", stddev / 1000.0); | |
1196 | ||
1197 | #if 0 | |
3e170ce0 | 1198 | for (uint32_t i = 0; i < g_iterations; i++) { |
6d2010ae A |
1199 | printf("Iteration %d: %f us\n", i, worst_latencies_ns[i] / 1000.0); |
1200 | } | |
3e170ce0 A |
1201 | #endif |
1202 | ||
cb323159 | 1203 | if (g_histogram) { |
0a7de745 A |
1204 | putchar('\n'); |
1205 | ||
1206 | for (uint32_t i = 0; i < g_numcpus; i++) { | |
1207 | printf("%d\t%d\n", i, g_cpu_histogram[i].accum); | |
1208 | } | |
cb323159 | 1209 | } |
0a7de745 | 1210 | |
cb323159 | 1211 | if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) { |
0a7de745 A |
1212 | #define PRIMARY 0x5555555555555555ULL |
1213 | #define SECONDARY 0xaaaaaaaaaaaaaaaaULL | |
1214 | ||
1215 | int fail_count = 0; | |
1216 | ||
1217 | for (uint32_t i = 0; i < g_iterations; i++) { | |
1218 | bool secondary = false; | |
1219 | bool fail = false; | |
1220 | uint64_t map = g_cpu_map[i]; | |
1221 | if (g_test_rt_smt) { | |
1222 | /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */ | |
1223 | secondary = (map & SECONDARY); | |
1224 | /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */ | |
1225 | fail = ((map & PRIMARY) & ((map & SECONDARY) >> 1)); | |
1226 | } else if (g_test_rt) { | |
cb323159 | 1227 | fail = (__builtin_popcountll(map) != g_numthreads) && (worst_latencies_ns[i] > g_traceworthy_latency_ns); |
0a7de745 A |
1228 | } else if (g_test_rt_avoid0) { |
1229 | fail = ((map & 0x1) == 0x1); | |
1230 | } | |
1231 | if (secondary || fail) { | |
1232 | printf("Iteration %d: 0x%llx%s%s\n", i, map, | |
1233 | secondary ? " SECONDARY" : "", | |
1234 | fail ? " FAIL" : ""); | |
1235 | } | |
c3c9b80d | 1236 | test_warn |= (secondary || fail); |
0a7de745 A |
1237 | test_fail |= fail; |
1238 | fail_count += fail; | |
1239 | } | |
1240 | ||
1241 | if (test_fail && (g_iterations >= 100) && (fail_count <= g_iterations / 100)) { | |
1242 | printf("99%% or better success rate\n"); | |
1243 | test_fail = 0; | |
1244 | } | |
1245 | } | |
1246 | ||
c3c9b80d A |
1247 | if (g_test_rt_smt && (g_each_spin_duration_ns >= 200000) && !test_warn) { |
1248 | printf("cpu_idle_time=%dms worker_threads_runtime=%dms\n", cpu_idle_time, worker_threads_runtime); | |
1249 | if (cpu_idle_time < worker_threads_runtime / 4) { | |
1250 | printf("FAIL cpu_idle_time unexpectedly small\n"); | |
1251 | test_fail = 1; | |
1252 | } else if (cpu_idle_time > worker_threads_runtime * 2) { | |
1253 | printf("FAIL cpu_idle_time unexpectedly large\n"); | |
1254 | test_fail = 1; | |
1255 | } | |
1256 | } | |
1257 | ||
3e170ce0 A |
1258 | free(threads); |
1259 | free(g_thread_endtimes_abs); | |
1260 | free(worst_latencies_ns); | |
1261 | free(worst_latencies_from_first_ns); | |
0a7de745 A |
1262 | free(g_cpu_histogram); |
1263 | free(g_cpu_map); | |
6d2010ae | 1264 | |
0a7de745 | 1265 | return test_fail; |
6d2010ae | 1266 | } |
fe8ab488 A |
1267 | |
1268 | /* | |
1269 | * WARNING: This is SPI specifically intended for use by launchd to start UI | |
1270 | * apps. We use it here for a test tool only to opt into QoS using the same | |
1271 | * policies. Do not use this outside xnu or libxpc/launchd. | |
1272 | */ | |
3e170ce0 | 1273 | static void |
fe8ab488 A |
1274 | selfexec_with_apptype(int argc, char *argv[]) |
1275 | { | |
1276 | int ret; | |
1277 | posix_spawnattr_t attr; | |
1278 | extern char **environ; | |
1279 | char *new_argv[argc + 1 + 1 /* NULL */]; | |
1280 | int i; | |
1281 | char prog[PATH_MAX]; | |
a1c7dba1 | 1282 | uint32_t prog_size = PATH_MAX; |
fe8ab488 A |
1283 | |
1284 | ret = _NSGetExecutablePath(prog, &prog_size); | |
0a7de745 A |
1285 | if (ret) { |
1286 | err(EX_OSERR, "_NSGetExecutablePath"); | |
1287 | } | |
fe8ab488 | 1288 | |
0a7de745 | 1289 | for (i = 0; i < argc; i++) { |
fe8ab488 A |
1290 | new_argv[i] = argv[i]; |
1291 | } | |
1292 | ||
3e170ce0 | 1293 | new_argv[i] = "--switched_apptype"; |
0a7de745 | 1294 | new_argv[i + 1] = NULL; |
fe8ab488 A |
1295 | |
1296 | ret = posix_spawnattr_init(&attr); | |
0a7de745 A |
1297 | if (ret) { |
1298 | errc(EX_OSERR, ret, "posix_spawnattr_init"); | |
1299 | } | |
fe8ab488 A |
1300 | |
1301 | ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC); | |
0a7de745 A |
1302 | if (ret) { |
1303 | errc(EX_OSERR, ret, "posix_spawnattr_setflags"); | |
1304 | } | |
fe8ab488 A |
1305 | |
1306 | ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT); | |
0a7de745 A |
1307 | if (ret) { |
1308 | errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np"); | |
1309 | } | |
fe8ab488 A |
1310 | |
1311 | ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ); | |
0a7de745 A |
1312 | if (ret) { |
1313 | errc(EX_OSERR, ret, "posix_spawn"); | |
1314 | } | |
3e170ce0 A |
1315 | } |
1316 | ||
1317 | /* | |
1318 | * Admittedly not very attractive. | |
1319 | */ | |
1320 | static void __attribute__((noreturn)) | |
1321 | usage() | |
1322 | { | |
39037602 | 1323 | errx(EX_USAGE, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> " |
c3c9b80d | 1324 | "<realtime | timeshare | timeshare_no_smt | fixed> <iterations>\n\t\t" |
0a7de745 A |
1325 | "[--trace <traceworthy latency in ns>] " |
1326 | "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t" | |
f427ee49 A |
1327 | "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>]\n\t\t" |
1328 | "[--rt-churn] [--rt-churn-count <n>] [--rt-ll] [--test-rt] [--test-rt-smt] [--test-rt-avoid0]", | |
0a7de745 | 1329 | getprogname()); |
39037602 A |
1330 | } |
1331 | ||
1332 | static struct option* g_longopts; | |
1333 | static int option_index; | |
1334 | ||
1335 | static uint32_t | |
1336 | read_dec_arg() | |
1337 | { | |
1338 | char *cp; | |
1339 | /* char* optarg is a magic global */ | |
1340 | ||
1341 | uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10); | |
1342 | ||
0a7de745 | 1343 | if (cp == optarg || *cp) { |
39037602 | 1344 | errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"", |
0a7de745 A |
1345 | g_longopts[option_index].name, optarg); |
1346 | } | |
39037602 A |
1347 | |
1348 | return arg_val; | |
fe8ab488 | 1349 | } |
3e170ce0 A |
1350 | |
1351 | static void | |
1352 | parse_args(int argc, char *argv[]) | |
1353 | { | |
39037602 A |
1354 | enum { |
1355 | OPT_GETOPT = 0, | |
1356 | OPT_SPIN_TIME, | |
1357 | OPT_TRACE, | |
1358 | OPT_PRIORITY, | |
1359 | OPT_CHURN_PRI, | |
1360 | OPT_CHURN_COUNT, | |
94ff46dc | 1361 | OPT_RT_CHURN_COUNT, |
39037602 | 1362 | }; |
3e170ce0 A |
1363 | |
1364 | static struct option longopts[] = { | |
0a7de745 | 1365 | /* BEGIN IGNORE CODESTYLE */ |
39037602 A |
1366 | { "spin-time", required_argument, NULL, OPT_SPIN_TIME }, |
1367 | { "trace", required_argument, NULL, OPT_TRACE }, | |
1368 | { "priority", required_argument, NULL, OPT_PRIORITY }, | |
1369 | { "churn-pri", required_argument, NULL, OPT_CHURN_PRI }, | |
1370 | { "churn-count", required_argument, NULL, OPT_CHURN_COUNT }, | |
94ff46dc | 1371 | { "rt-churn-count", required_argument, NULL, OPT_RT_CHURN_COUNT }, |
3e170ce0 A |
1372 | { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE }, |
1373 | { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE }, | |
f427ee49 | 1374 | { "intel-only", no_argument, (int*)&g_run_on_intel_only, TRUE }, |
3e170ce0 A |
1375 | { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE }, |
1376 | { "affinity", no_argument, (int*)&g_do_affinity, TRUE }, | |
1377 | { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE }, | |
39037602 | 1378 | { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE }, |
0a7de745 A |
1379 | { "test-rt", no_argument, (int*)&g_test_rt, TRUE }, |
1380 | { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE }, | |
1381 | { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE }, | |
94ff46dc | 1382 | { "rt-churn", no_argument, (int*)&g_rt_churn, TRUE }, |
f427ee49 | 1383 | { "rt-ll", no_argument, (int*)&g_rt_ll, TRUE }, |
cb323159 | 1384 | { "histogram", no_argument, (int*)&g_histogram, TRUE }, |
3e170ce0 A |
1385 | { "verbose", no_argument, (int*)&g_verbose, TRUE }, |
1386 | { "help", no_argument, NULL, 'h' }, | |
1387 | { NULL, 0, NULL, 0 } | |
0a7de745 | 1388 | /* END IGNORE CODESTYLE */ |
3e170ce0 A |
1389 | }; |
1390 | ||
39037602 A |
1391 | g_longopts = longopts; |
1392 | int ch = 0; | |
1393 | ||
3e170ce0 A |
1394 | while ((ch = getopt_long(argc, argv, "h", longopts, &option_index)) != -1) { |
1395 | switch (ch) { | |
39037602 | 1396 | case OPT_GETOPT: |
3e170ce0 A |
1397 | /* getopt_long set a variable */ |
1398 | break; | |
39037602 | 1399 | case OPT_SPIN_TIME: |
3e170ce0 | 1400 | g_do_each_spin = TRUE; |
39037602 | 1401 | g_each_spin_duration_ns = read_dec_arg(); |
3e170ce0 | 1402 | break; |
39037602 A |
1403 | case OPT_TRACE: |
1404 | g_traceworthy_latency_ns = read_dec_arg(); | |
1405 | break; | |
1406 | case OPT_PRIORITY: | |
1407 | g_priority = read_dec_arg(); | |
1408 | break; | |
1409 | case OPT_CHURN_PRI: | |
1410 | g_churn_pri = read_dec_arg(); | |
1411 | break; | |
1412 | case OPT_CHURN_COUNT: | |
1413 | g_churn_count = read_dec_arg(); | |
3e170ce0 | 1414 | break; |
94ff46dc A |
1415 | case OPT_RT_CHURN_COUNT: |
1416 | g_rt_churn_count = read_dec_arg(); | |
1417 | break; | |
3e170ce0 A |
1418 | case '?': |
1419 | case 'h': | |
1420 | default: | |
1421 | usage(); | |
1422 | /* NORETURN */ | |
1423 | } | |
1424 | } | |
1425 | ||
1426 | /* | |
1427 | * getopt_long reorders all the options to the beginning of the argv array. | |
1428 | * Jump past them to the non-option arguments. | |
1429 | */ | |
1430 | ||
1431 | argc -= optind; | |
1432 | argv += optind; | |
1433 | ||
1434 | if (argc > 4) { | |
1435 | warnx("Too many non-option arguments passed"); | |
1436 | usage(); | |
1437 | } | |
1438 | ||
1439 | if (argc != 4) { | |
1440 | warnx("Missing required <threads> <waketype> <policy> <iterations> arguments"); | |
1441 | usage(); | |
1442 | } | |
1443 | ||
39037602 A |
1444 | char *cp; |
1445 | ||
3e170ce0 A |
1446 | /* How many threads? */ |
1447 | g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10); | |
1448 | ||
0a7de745 | 1449 | if (cp == argv[0] || *cp) { |
3e170ce0 | 1450 | errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]); |
0a7de745 | 1451 | } |
3e170ce0 A |
1452 | |
1453 | /* What wakeup pattern? */ | |
1454 | g_waketype = parse_wakeup_pattern(argv[1]); | |
1455 | ||
1456 | /* Policy */ | |
1457 | g_policy = parse_thread_policy(argv[2]); | |
1458 | ||
1459 | /* Iterations */ | |
1460 | g_iterations = (uint32_t)strtoull(argv[3], &cp, 10); | |
1461 | ||
0a7de745 | 1462 | if (cp == argv[3] || *cp) { |
3e170ce0 | 1463 | errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]); |
0a7de745 | 1464 | } |
3e170ce0 | 1465 | |
0a7de745 | 1466 | if (g_iterations < 1) { |
3e170ce0 | 1467 | errx(EX_USAGE, "Must have at least one iteration"); |
0a7de745 | 1468 | } |
3e170ce0 | 1469 | |
0a7de745 | 1470 | if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) { |
3e170ce0 | 1471 | errx(EX_USAGE, "chain mode requires more than one thread"); |
0a7de745 | 1472 | } |
3e170ce0 | 1473 | |
0a7de745 | 1474 | if (g_numthreads == 1 && g_waketype == WAKE_HOP) { |
3e170ce0 | 1475 | errx(EX_USAGE, "hop mode requires more than one thread"); |
0a7de745 | 1476 | } |
3e170ce0 | 1477 | } |