-/* Select timer coalescing window based on per-task quality-of-service hints */
-static boolean_t tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax, boolean_t *pratelimited) {
- uint32_t latency_qos;
- boolean_t adjusted = FALSE;
- task_t ctask = t->task;
-
- if (ctask) {
- latency_qos = proc_get_effective_task_policy(ctask, TASK_POLICY_LATENCY_QOS);
-
- assert(latency_qos <= NUM_LATENCY_QOS_TIERS);
-
- if (latency_qos) {
- *tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1];
- *tmax = tcoal_prio_params.latency_qos_ns_max[latency_qos - 1];
- *pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1];
- adjusted = TRUE;
- }
- }
- return adjusted;
-}
-
-/* Adjust timer deadlines based on priority of the thread and the
- * urgency value provided at timeout establishment. With this mechanism,
- * timers are no longer necessarily sorted in order of soft deadline
- * on a given timer queue, i.e. they may be differentially skewed.
- * In the current scheme, this could lead to fewer pending timers
- * processed than is technically possible when the HW deadline arrives.
- */
-static void
-timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax, boolean_t *pratelimited) {
- int16_t tpri = cthread->sched_pri;
-
- if ((urgency & TIMER_CALL_USER_MASK) != 0) {
- if (tpri >= BASEPRI_RTQUEUES ||
- urgency == TIMER_CALL_USER_CRITICAL) {
- *tshift = tcoal_prio_params.timer_coalesce_rt_shift;
- *tmax = tcoal_prio_params.timer_coalesce_rt_ns_max;
- TCOAL_PRIO_STAT(rt_tcl);
- } else if ((urgency == TIMER_CALL_USER_BACKGROUND) ||
- proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG)) {
- /* Determine if timer should be subjected to a lower QoS */
- if (tcoal_qos_adjust(cthread, tshift, tmax, pratelimited)) {
- if (*tmax > tcoal_prio_params.timer_coalesce_bg_ns_max) {
- return;
- } else {
- *pratelimited = FALSE;
- }
- }
- *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
- *tmax = tcoal_prio_params.timer_coalesce_bg_ns_max;
- TCOAL_PRIO_STAT(bg_tcl);
- } else if (tpri >= MINPRI_KERNEL) {
- *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
- *tmax = tcoal_prio_params.timer_coalesce_kt_ns_max;
- TCOAL_PRIO_STAT(kt_tcl);
- } else if (cthread->sched_mode == TH_MODE_FIXED) {
- *tshift = tcoal_prio_params.timer_coalesce_fp_shift;
- *tmax = tcoal_prio_params.timer_coalesce_fp_ns_max;
- TCOAL_PRIO_STAT(fp_tcl);
- } else if (tcoal_qos_adjust(cthread, tshift, tmax, pratelimited)) {
- TCOAL_PRIO_STAT(qos_tcl);
- } else if (cthread->sched_mode == TH_MODE_TIMESHARE) {
- *tshift = tcoal_prio_params.timer_coalesce_ts_shift;
- *tmax = tcoal_prio_params.timer_coalesce_ts_ns_max;
- TCOAL_PRIO_STAT(ts_tcl);
- } else {
- TCOAL_PRIO_STAT(nc_tcl);
- }
- } else if (urgency == TIMER_CALL_SYS_BACKGROUND) {
- *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
- *tmax = tcoal_prio_params.timer_coalesce_bg_ns_max;
- TCOAL_PRIO_STAT(bg_tcl);
- } else {
- *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
- *tmax = tcoal_prio_params.timer_coalesce_kt_ns_max;
- TCOAL_PRIO_STAT(kt_tcl);
- }
-}
-
-int timer_user_idle_level;
-
-uint64_t
-timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited)
-{
- int32_t tcs_shift = 0;
- uint64_t tcs_ns_max = 0;
- uint64_t adjval;
- uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK);
-
- if (mach_timer_coalescing_enabled &&
- (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) {
- timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_ns_max, pratelimited);
-
- if (tcs_shift >= 0)
- adjval = MIN((deadline - now) >> tcs_shift, tcs_ns_max);
- else
- adjval = MIN((deadline - now) << (-tcs_shift), tcs_ns_max);
- /* Apply adjustments derived from "user idle level" heuristic */
- adjval += (adjval * timer_user_idle_level) >> 7;
- return adjval;
- } else {
- return 0;
- }
-}
-