+ if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) {
+ /* case (2c) lock order inversion, dequeue only */
+ timer_call_entry_dequeue_async(call);
+ continue;
+ }
+ if (deadline > threshold) {
+ /* move from master to longterm */
+ timer_call_entry_dequeue(call);
+ timer_call_entry_enqueue_tail(call, timer_longterm_queue);
+ if (deadline < tlp->threshold.deadline) {
+ tlp->threshold.deadline = deadline;
+ tlp->threshold.call = call;
+ }
+ }
+ simple_unlock(&call->lock);
+ }
+ timer_queue_unlock(timer_master_queue);
+}
+
+static void
+timer_sysctl_set_threshold(uint64_t value)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+ spl_t s = splclock();
+ boolean_t threshold_increase;
+
+ timer_queue_lock_spin(timer_longterm_queue);
+
+ timer_call_cancel(&tlp->threshold.timer);
+
+ /*
+ * Set the new threshold and note whther it's increasing.
+ */
+ if (value == 0) {
+ tlp->threshold.interval = TIMER_LONGTERM_NONE;
+ threshold_increase = TRUE;
+ timer_call_cancel(&tlp->threshold.timer);
+ } else {
+ uint64_t old_interval = tlp->threshold.interval;
+ tlp->threshold.interval = value * NSEC_PER_MSEC;
+ nanoseconds_to_absolutetime(tlp->threshold.interval,
+ &tlp->threshold.interval);
+ tlp->threshold.margin = tlp->threshold.interval / 10;
+ if (old_interval == TIMER_LONGTERM_NONE) {
+ threshold_increase = FALSE;
+ } else {
+ threshold_increase = (tlp->threshold.interval > old_interval);
+ }
+ }
+
+ if (threshold_increase /* or removal */) {
+ /* Escalate timers from the longterm queue */
+ timer_longterm_scan(tlp, mach_absolute_time());
+ } else { /* decrease or addition */
+ /*
+ * We scan the local/master queue for timers now longterm.
+ * To be strictly correct, we should scan all processor queues
+ * but timer migration results in most timers gravitating to the
+ * master processor in any case.
+ */
+ timer_master_scan(tlp, mach_absolute_time());
+ }
+
+ /* Set new timer accordingly */
+ tlp->threshold.deadline_set = tlp->threshold.deadline;
+ if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
+ tlp->threshold.deadline_set -= tlp->threshold.margin;
+ tlp->threshold.deadline_set -= tlp->threshold.latency;
+ timer_call_enter(
+ &tlp->threshold.timer,
+ tlp->threshold.deadline_set,
+ TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
+ }
+
+ /* Reset stats */
+ tlp->enqueues = 0;
+ tlp->dequeues = 0;
+ tlp->escalates = 0;
+ tlp->scan_pauses = 0;
+ tlp->threshold.scans = 0;
+ tlp->threshold.preempts = 0;
+ tlp->threshold.latency = 0;
+ tlp->threshold.latency_min = EndOfAllTime;
+ tlp->threshold.latency_max = 0;
+
+ timer_queue_unlock(timer_longterm_queue);
+ splx(s);
+}
+
+int
+timer_sysctl_set(int oid, uint64_t value)
+{
+ switch (oid) {
+ case THRESHOLD:
+ timer_call_cpu(
+ master_cpu,
+ (void (*)(void *))timer_sysctl_set_threshold,
+ (void *) value);
+ return KERN_SUCCESS;
+ case SCAN_LIMIT:
+ timer_longterm.scan_limit = value;
+ return KERN_SUCCESS;
+ case SCAN_INTERVAL:
+ timer_longterm.scan_interval = value;
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+}
+
+
+/* Select timer coalescing window based on per-task quality-of-service hints */
+static boolean_t
+tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited)
+{
+ uint32_t latency_qos;
+ boolean_t adjusted = FALSE;
+ task_t ctask = t->task;
+
+ if (ctask) {
+ latency_qos = proc_get_effective_thread_policy(t, TASK_POLICY_LATENCY_QOS);
+
+ assert(latency_qos <= NUM_LATENCY_QOS_TIERS);
+
+ if (latency_qos) {
+ *tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1];
+ *tmax_abstime = tcoal_prio_params.latency_qos_abstime_max[latency_qos - 1];
+ *pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1];
+ adjusted = TRUE;
+ }
+ }
+ return adjusted;
+}
+
+
+/* Adjust timer deadlines based on priority of the thread and the
+ * urgency value provided at timeout establishment. With this mechanism,
+ * timers are no longer necessarily sorted in order of soft deadline
+ * on a given timer queue, i.e. they may be differentially skewed.
+ * In the current scheme, this could lead to fewer pending timers
+ * processed than is technically possible when the HW deadline arrives.
+ */
+static void
+timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited)
+{
+ int16_t tpri = cthread->sched_pri;
+ if ((urgency & TIMER_CALL_USER_MASK) != 0) {
+ if (tpri >= BASEPRI_RTQUEUES ||
+ urgency == TIMER_CALL_USER_CRITICAL) {
+ *tshift = tcoal_prio_params.timer_coalesce_rt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max;
+ TCOAL_PRIO_STAT(rt_tcl);
+ } else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) ||
+ (urgency == TIMER_CALL_USER_BACKGROUND)) {
+ /* Determine if timer should be subjected to a lower QoS */
+ if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
+ if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) {
+ return;
+ } else {
+ *pratelimited = FALSE;
+ }
+ }
+ *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
+ TCOAL_PRIO_STAT(bg_tcl);
+ } else if (tpri >= MINPRI_KERNEL) {
+ *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
+ TCOAL_PRIO_STAT(kt_tcl);
+ } else if (cthread->sched_mode == TH_MODE_FIXED) {
+ *tshift = tcoal_prio_params.timer_coalesce_fp_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_fp_abstime_max;
+ TCOAL_PRIO_STAT(fp_tcl);
+ } else if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
+ TCOAL_PRIO_STAT(qos_tcl);
+ } else if (cthread->sched_mode == TH_MODE_TIMESHARE) {
+ *tshift = tcoal_prio_params.timer_coalesce_ts_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_ts_abstime_max;
+ TCOAL_PRIO_STAT(ts_tcl);
+ } else {
+ TCOAL_PRIO_STAT(nc_tcl);
+ }
+ } else if (urgency == TIMER_CALL_SYS_BACKGROUND) {
+ *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
+ TCOAL_PRIO_STAT(bg_tcl);
+ } else {
+ *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
+ TCOAL_PRIO_STAT(kt_tcl);
+ }
+}
+
+
+int timer_user_idle_level;
+
+uint64_t
+timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited)
+{
+ int32_t tcs_shift = 0;
+ uint64_t tcs_max_abstime = 0;
+ uint64_t adjval;
+ uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK);
+
+ if (mach_timer_coalescing_enabled &&
+ (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) {
+ timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited);
+
+ if (tcs_shift >= 0) {
+ adjval = MIN((deadline - now) >> tcs_shift, tcs_max_abstime);
+ } else {
+ adjval = MIN((deadline - now) << (-tcs_shift), tcs_max_abstime);
+ }
+ /* Apply adjustments derived from "user idle level" heuristic */
+ adjval += (adjval * timer_user_idle_level) >> 7;
+ return adjval;
+ } else {
+ return 0;
+ }
+}
+
+int
+timer_get_user_idle_level(void)
+{
+ return timer_user_idle_level;
+}