+
+
+/* Select timer coalescing window based on per-task quality-of-service hints */
+static boolean_t tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) {
+ uint32_t latency_qos;
+ boolean_t adjusted = FALSE;
+ task_t ctask = t->task;
+
+ if (ctask) {
+ latency_qos = proc_get_effective_thread_policy(t, TASK_POLICY_LATENCY_QOS);
+
+ assert(latency_qos <= NUM_LATENCY_QOS_TIERS);
+
+ if (latency_qos) {
+ *tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1];
+ *tmax_abstime = tcoal_prio_params.latency_qos_abstime_max[latency_qos - 1];
+ *pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1];
+ adjusted = TRUE;
+ }
+ }
+ return adjusted;
+}
+
+
+/* Adjust timer deadlines based on priority of the thread and the
+ * urgency value provided at timeout establishment. With this mechanism,
+ * timers are no longer necessarily sorted in order of soft deadline
+ * on a given timer queue, i.e. they may be differentially skewed.
+ * In the current scheme, this could lead to fewer pending timers
+ * processed than is technically possible when the HW deadline arrives.
+ */
+static void
+timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) {
+ int16_t tpri = cthread->sched_pri;
+ if ((urgency & TIMER_CALL_USER_MASK) != 0) {
+ if (tpri >= BASEPRI_RTQUEUES ||
+ urgency == TIMER_CALL_USER_CRITICAL) {
+ *tshift = tcoal_prio_params.timer_coalesce_rt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max;
+ TCOAL_PRIO_STAT(rt_tcl);
+ } else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) ||
+ (urgency == TIMER_CALL_USER_BACKGROUND)) {
+ /* Determine if timer should be subjected to a lower QoS */
+ if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
+ if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) {
+ return;
+ } else {
+ *pratelimited = FALSE;
+ }
+ }
+ *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
+ TCOAL_PRIO_STAT(bg_tcl);
+ } else if (tpri >= MINPRI_KERNEL) {
+ *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
+ TCOAL_PRIO_STAT(kt_tcl);
+ } else if (cthread->sched_mode == TH_MODE_FIXED) {
+ *tshift = tcoal_prio_params.timer_coalesce_fp_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_fp_abstime_max;
+ TCOAL_PRIO_STAT(fp_tcl);
+ } else if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
+ TCOAL_PRIO_STAT(qos_tcl);
+ } else if (cthread->sched_mode == TH_MODE_TIMESHARE) {
+ *tshift = tcoal_prio_params.timer_coalesce_ts_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_ts_abstime_max;
+ TCOAL_PRIO_STAT(ts_tcl);
+ } else {
+ TCOAL_PRIO_STAT(nc_tcl);
+ }
+ } else if (urgency == TIMER_CALL_SYS_BACKGROUND) {
+ *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
+ TCOAL_PRIO_STAT(bg_tcl);
+ } else {
+ *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
+ TCOAL_PRIO_STAT(kt_tcl);
+ }
+}
+
+
+int timer_user_idle_level;
+
+uint64_t
+timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited)
+{
+ int32_t tcs_shift = 0;
+ uint64_t tcs_max_abstime = 0;
+ uint64_t adjval;
+ uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK);
+
+ if (mach_timer_coalescing_enabled &&
+ (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) {
+ timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited);
+
+ if (tcs_shift >= 0)
+ adjval = MIN((deadline - now) >> tcs_shift, tcs_max_abstime);
+ else
+ adjval = MIN((deadline - now) << (-tcs_shift), tcs_max_abstime);
+ /* Apply adjustments derived from "user idle level" heuristic */
+ adjval += (adjval * timer_user_idle_level) >> 7;
+ return adjval;
+ } else {
+ return 0;
+ }
+}
+
+int
+timer_get_user_idle_level(void) {
+ return timer_user_idle_level;
+}
+
+kern_return_t timer_set_user_idle_level(int ilevel) {
+ boolean_t do_reeval = FALSE;
+
+ if ((ilevel < 0) || (ilevel > 128))
+ return KERN_INVALID_ARGUMENT;
+
+ if (ilevel < timer_user_idle_level) {
+ do_reeval = TRUE;
+ }
+
+ timer_user_idle_level = ilevel;
+
+ if (do_reeval)
+ ml_timer_evaluate();
+
+ return KERN_SUCCESS;
+}