+/*
+ * Set policy
+ */
+static int
+rt_churn_thread_setup(void)
+{
+ kern_return_t kr;
+ thread_time_constraint_policy_data_t pol;
+
+ /* Hard-coded realtime parameters (similar to what Digi uses) */
+ pol.period = 100000;
+ pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS * 2);
+ pol.computation = (uint32_t) nanos_to_abs(RT_CHURN_COMP_NANOS * 2);
+ pol.preemptible = 0; /* Ignored by OS */
+
+ kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
+ (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
+ mach_assert_zero_t(0, kr);
+
+ return 0;
+}
+
+static void *
+rt_churn_thread(__unused void *arg)
+{
+ rt_churn_thread_setup();
+
+ for (uint32_t i = 0; i < g_iterations; i++) {
+ kern_return_t kr = semaphore_wait_signal(g_rt_churn_start_sem, g_rt_churn_sem);
+ mach_assert_zero_t(0, kr);
+
+ volatile double x = 0.0;
+ volatile double y = 0.0;
+
+ uint64_t endspin = mach_absolute_time() + nanos_to_abs(RT_CHURN_COMP_NANOS);
+ while (mach_absolute_time() < endspin) {
+ y = y + 1.5 + x;
+ x = sqrt(y);
+ }
+ }
+
+ kern_return_t kr = semaphore_signal(g_rt_churn_sem);
+ mach_assert_zero_t(0, kr);
+
+ return NULL;
+}
+
+static void
+wait_for_rt_churn_threads(void)
+{
+ for (uint32_t i = 0; i < g_rt_churn_count; i++) {
+ kern_return_t kr = semaphore_wait(g_rt_churn_sem);
+ mach_assert_zero_t(0, kr);
+ }
+}
+
+static void
+start_rt_churn_threads(void)
+{
+ for (uint32_t i = 0; i < g_rt_churn_count; i++) {
+ kern_return_t kr = semaphore_signal(g_rt_churn_start_sem);
+ mach_assert_zero_t(0, kr);
+ }
+}
+
+static void
+create_rt_churn_threads(void)
+{
+ if (g_rt_churn_count == 0) {
+ /* Leave 1 CPU to ensure that the main thread can make progress */
+ g_rt_churn_count = g_numcpus - 1;
+ }
+
+ errno_t err;
+
+ struct sched_param param = { .sched_priority = (int)g_churn_pri };
+ pthread_attr_t attr;
+
+ /* Array for churn threads */
+ g_rt_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_rt_churn_count);
+ assert(g_rt_churn_threads);
+
+ if ((err = pthread_attr_init(&attr))) {
+ errc(EX_OSERR, err, "pthread_attr_init");
+ }
+
+ if ((err = pthread_attr_setschedparam(&attr, ¶m))) {
+ errc(EX_OSERR, err, "pthread_attr_setschedparam");
+ }
+
+ if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
+ errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
+ }
+
+ for (uint32_t i = 0; i < g_rt_churn_count; i++) {
+ pthread_t new_thread;
+
+ if ((err = pthread_create(&new_thread, &attr, rt_churn_thread, NULL))) {
+ errc(EX_OSERR, err, "pthread_create");
+ }
+ g_rt_churn_threads[i] = new_thread;
+ }
+
+ if ((err = pthread_attr_destroy(&attr))) {
+ errc(EX_OSERR, err, "pthread_attr_destroy");
+ }
+
+ /* Wait until all threads have checked in */
+ wait_for_rt_churn_threads();
+}
+
+static void
+join_rt_churn_threads(void)
+{
+ /* Rejoin rt churn threads */
+ for (uint32_t i = 0; i < g_rt_churn_count; i++) {
+ errno_t err = pthread_join(g_rt_churn_threads[i], NULL);
+ if (err) {
+ errc(EX_OSERR, err, "pthread_join %d", i);
+ }
+ }
+}
+