+#if defined(CONFIG_SCHED_TIMESHARE_CORE)
+extern uint32_t sched_pri_shift;
+extern uint32_t sched_background_pri_shift;
+extern uint32_t sched_combined_fgbg_pri_shift;
+extern uint32_t sched_fixed_shift;
+extern int8_t sched_load_shifts[NRQS];
+extern uint32_t sched_decay_usage_age_factor;
+extern uint32_t sched_use_combined_fgbg_decay;
+void sched_timeshare_consider_maintenance(uint64_t);
+#endif /* CONFIG_SCHED_TIMESHARE_CORE */
+
+extern int32_t sched_poll_yield_shift;
+extern uint64_t sched_safe_duration;
+
+extern uint32_t sched_run_count, sched_share_count, sched_background_count;
+extern uint32_t sched_load_average, sched_mach_factor;
+
+extern uint32_t avenrun[3], mach_factor[3];
+
+extern uint64_t max_unsafe_computation;
+extern uint64_t max_poll_computation;
+
+/* TH_RUN & !TH_IDLE controls whether a thread has a run count */
+#define sched_run_incr(th) \
+ hw_atomic_add(&sched_run_count, 1) \
+
+#define sched_run_decr(th) \
+ hw_atomic_sub(&sched_run_count, 1) \
+
+#if MACH_ASSERT
+extern void sched_share_incr(thread_t thread);
+extern void sched_share_decr(thread_t thread);
+extern void sched_background_incr(thread_t thread);
+extern void sched_background_decr(thread_t thread);
+
+extern void assert_thread_sched_count(thread_t thread);
+
+#else /* MACH_ASSERT */
+/* sched_mode == TH_MODE_TIMESHARE controls whether a thread has a timeshare count when it has a run count */
+#define sched_share_incr(th) \
+MACRO_BEGIN \
+ (void)hw_atomic_add(&sched_share_count, 1); \
+MACRO_END