+ timer_longterm_update_locked(tlp);
+
+ if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
+ timer_call_enter(
+ &tlp->threshold.timer,
+ tlp->threshold.deadline_set,
+ TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
+ }
+
+ timer_queue_unlock(timer_longterm_queue);
+ splx(s);
+}
+
+void
+timer_longterm_init(void)
+{
+ uint32_t longterm;
+ timer_longterm_t *tlp = &timer_longterm;
+
+ DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue);
+
+ /*
+ * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD
+ * or TIMER_LONGTERM_NONE (disabled) for server;
+ * overridden longterm boot-arg
+ */
+ tlp->threshold.interval = serverperfmode ? TIMER_LONGTERM_NONE
+ : TIMER_LONGTERM_THRESHOLD;
+ if (PE_parse_boot_argn("longterm", &longterm, sizeof(longterm))) {
+ tlp->threshold.interval = (longterm == 0) ?
+ TIMER_LONGTERM_NONE :
+ longterm * NSEC_PER_MSEC;
+ }
+ if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
+ printf("Longterm timer threshold: %llu ms\n",
+ tlp->threshold.interval / NSEC_PER_MSEC);
+ kprintf("Longterm timer threshold: %llu ms\n",
+ tlp->threshold.interval / NSEC_PER_MSEC);
+ nanoseconds_to_absolutetime(tlp->threshold.interval,
+ &tlp->threshold.interval);
+ tlp->threshold.margin = tlp->threshold.interval / 10;
+ tlp->threshold.latency_min = EndOfAllTime;
+ tlp->threshold.latency_max = 0;
+ }
+
+ tlp->threshold.preempted = TIMER_LONGTERM_NONE;
+ tlp->threshold.deadline = TIMER_LONGTERM_NONE;
+
+ lck_attr_setdefault(&timer_longterm_lck_attr);
+ lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr);
+ lck_grp_init(&timer_longterm_lck_grp,
+ "timer_longterm", &timer_longterm_lck_grp_attr);
+ mpqueue_init(&tlp->queue,
+ &timer_longterm_lck_grp, &timer_longterm_lck_attr);
+
+ timer_call_setup(&tlp->threshold.timer,
+ timer_longterm_callout, (timer_call_param_t) tlp);
+
+ timer_longterm_queue = &tlp->queue;
+}
+
+enum {
+ THRESHOLD, QCOUNT,
+ ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
+ LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES
+};
+uint64_t
+timer_sysctl_get(int oid)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+
+ switch (oid) {
+ case THRESHOLD:
+ return (tlp->threshold.interval == TIMER_LONGTERM_NONE) ?
+ 0 : tlp->threshold.interval / NSEC_PER_MSEC;
+ case QCOUNT:
+ return tlp->queue.count;
+ case ENQUEUES:
+ return tlp->enqueues;
+ case DEQUEUES:
+ return tlp->dequeues;
+ case ESCALATES:
+ return tlp->escalates;
+ case SCANS:
+ return tlp->threshold.scans;
+ case PREEMPTS:
+ return tlp->threshold.preempts;
+ case LATENCY:
+ return tlp->threshold.latency;
+ case LATENCY_MIN:
+ return tlp->threshold.latency_min;
+ case LATENCY_MAX:
+ return tlp->threshold.latency_max;
+ case SCAN_LIMIT:
+ return tlp->scan_limit;
+ case SCAN_INTERVAL:
+ return tlp->scan_interval;
+ case PAUSES:
+ return tlp->scan_pauses;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * timer_master_scan() is the inverse of timer_longterm_scan()
+ * since it un-escalates timers to the longterm queue.
+ */
+static void
+timer_master_scan(timer_longterm_t *tlp,
+ uint64_t now)
+{
+ queue_entry_t qe;
+ timer_call_t call;
+ uint64_t threshold;
+ uint64_t deadline;
+ mpqueue_head_t *timer_master_queue;
+
+ if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
+ threshold = now + tlp->threshold.interval;
+ } else {
+ threshold = TIMER_LONGTERM_NONE;
+ }
+
+ timer_master_queue = timer_queue_cpu(master_cpu);
+ timer_queue_lock_spin(timer_master_queue);
+
+ qe = queue_first(&timer_master_queue->head);
+ while (!queue_end(&timer_master_queue->head, qe)) {
+ call = TIMER_CALL(qe);
+ deadline = TCE(call)->deadline;
+ qe = queue_next(qe);
+ if ((call->flags & TIMER_CALL_LOCAL) != 0) {
+ continue;
+ }
+ if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) {
+ /* case (2c) lock order inversion, dequeue only */
+ timer_call_entry_dequeue_async(call);
+ continue;
+ }
+ if (deadline > threshold) {
+ /* move from master to longterm */
+ timer_call_entry_dequeue(call);
+ timer_call_entry_enqueue_tail(call, timer_longterm_queue);
+ if (deadline < tlp->threshold.deadline) {
+ tlp->threshold.deadline = deadline;
+ tlp->threshold.call = call;
+ }
+ }
+ simple_unlock(&call->lock);
+ }
+ timer_queue_unlock(timer_master_queue);
+}
+
+static void
+timer_sysctl_set_threshold(uint64_t value)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+ spl_t s = splclock();
+ boolean_t threshold_increase;
+
+ timer_queue_lock_spin(timer_longterm_queue);
+
+ timer_call_cancel(&tlp->threshold.timer);
+
+ /*
+ * Set the new threshold and note whther it's increasing.
+ */
+ if (value == 0) {
+ tlp->threshold.interval = TIMER_LONGTERM_NONE;
+ threshold_increase = TRUE;
+ timer_call_cancel(&tlp->threshold.timer);
+ } else {
+ uint64_t old_interval = tlp->threshold.interval;
+ tlp->threshold.interval = value * NSEC_PER_MSEC;
+ nanoseconds_to_absolutetime(tlp->threshold.interval,
+ &tlp->threshold.interval);
+ tlp->threshold.margin = tlp->threshold.interval / 10;
+ if (old_interval == TIMER_LONGTERM_NONE) {
+ threshold_increase = FALSE;
+ } else {
+ threshold_increase = (tlp->threshold.interval > old_interval);
+ }
+ }
+
+ if (threshold_increase /* or removal */) {
+ /* Escalate timers from the longterm queue */
+ timer_longterm_scan(tlp, mach_absolute_time());
+ } else { /* decrease or addition */
+ /*
+ * We scan the local/master queue for timers now longterm.
+ * To be strictly correct, we should scan all processor queues
+ * but timer migration results in most timers gravitating to the
+ * master processor in any case.
+ */
+ timer_master_scan(tlp, mach_absolute_time());
+ }
+
+ /* Set new timer accordingly */
+ tlp->threshold.deadline_set = tlp->threshold.deadline;
+ if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
+ tlp->threshold.deadline_set -= tlp->threshold.margin;
+ tlp->threshold.deadline_set -= tlp->threshold.latency;
+ timer_call_enter(
+ &tlp->threshold.timer,
+ tlp->threshold.deadline_set,
+ TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
+ }
+
+ /* Reset stats */
+ tlp->enqueues = 0;
+ tlp->dequeues = 0;
+ tlp->escalates = 0;
+ tlp->scan_pauses = 0;
+ tlp->threshold.scans = 0;
+ tlp->threshold.preempts = 0;
+ tlp->threshold.latency = 0;
+ tlp->threshold.latency_min = EndOfAllTime;
+ tlp->threshold.latency_max = 0;
+
+ timer_queue_unlock(timer_longterm_queue);
+ splx(s);
+}
+
+int
+timer_sysctl_set(int oid, uint64_t value)
+{
+ switch (oid) {
+ case THRESHOLD:
+ timer_call_cpu(
+ master_cpu,
+ (void (*)(void *))timer_sysctl_set_threshold,
+ (void *) value);
+ return KERN_SUCCESS;
+ case SCAN_LIMIT:
+ timer_longterm.scan_limit = value;
+ return KERN_SUCCESS;
+ case SCAN_INTERVAL:
+ timer_longterm.scan_interval = value;
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+}
+
+
+/* Select timer coalescing window based on per-task quality-of-service hints */
+static boolean_t
+tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited)
+{
+ uint32_t latency_qos;
+ boolean_t adjusted = FALSE;
+ task_t ctask = t->task;
+
+ if (ctask) {
+ latency_qos = proc_get_effective_thread_policy(t, TASK_POLICY_LATENCY_QOS);
+
+ assert(latency_qos <= NUM_LATENCY_QOS_TIERS);
+
+ if (latency_qos) {
+ *tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1];
+ *tmax_abstime = tcoal_prio_params.latency_qos_abstime_max[latency_qos - 1];
+ *pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1];
+ adjusted = TRUE;
+ }
+ }
+ return adjusted;
+}
+
+
+/* Adjust timer deadlines based on priority of the thread and the
+ * urgency value provided at timeout establishment. With this mechanism,
+ * timers are no longer necessarily sorted in order of soft deadline
+ * on a given timer queue, i.e. they may be differentially skewed.
+ * In the current scheme, this could lead to fewer pending timers
+ * processed than is technically possible when the HW deadline arrives.
+ */
+static void
+timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited)
+{
+ int16_t tpri = cthread->sched_pri;
+ if ((urgency & TIMER_CALL_USER_MASK) != 0) {
+ if (tpri >= BASEPRI_RTQUEUES ||
+ urgency == TIMER_CALL_USER_CRITICAL) {
+ *tshift = tcoal_prio_params.timer_coalesce_rt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max;
+ TCOAL_PRIO_STAT(rt_tcl);
+ } else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) ||
+ (urgency == TIMER_CALL_USER_BACKGROUND)) {
+ /* Determine if timer should be subjected to a lower QoS */
+ if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
+ if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) {
+ return;
+ } else {
+ *pratelimited = FALSE;
+ }
+ }
+ *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
+ TCOAL_PRIO_STAT(bg_tcl);
+ } else if (tpri >= MINPRI_KERNEL) {
+ *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
+ TCOAL_PRIO_STAT(kt_tcl);
+ } else if (cthread->sched_mode == TH_MODE_FIXED) {
+ *tshift = tcoal_prio_params.timer_coalesce_fp_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_fp_abstime_max;
+ TCOAL_PRIO_STAT(fp_tcl);
+ } else if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
+ TCOAL_PRIO_STAT(qos_tcl);
+ } else if (cthread->sched_mode == TH_MODE_TIMESHARE) {
+ *tshift = tcoal_prio_params.timer_coalesce_ts_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_ts_abstime_max;
+ TCOAL_PRIO_STAT(ts_tcl);
+ } else {
+ TCOAL_PRIO_STAT(nc_tcl);
+ }
+ } else if (urgency == TIMER_CALL_SYS_BACKGROUND) {
+ *tshift = tcoal_prio_params.timer_coalesce_bg_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
+ TCOAL_PRIO_STAT(bg_tcl);
+ } else {
+ *tshift = tcoal_prio_params.timer_coalesce_kt_shift;
+ *tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
+ TCOAL_PRIO_STAT(kt_tcl);
+ }
+}
+
+
+int timer_user_idle_level;
+
+uint64_t
+timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited)
+{
+ int32_t tcs_shift = 0;
+ uint64_t tcs_max_abstime = 0;
+ uint64_t adjval;
+ uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK);
+
+ if (mach_timer_coalescing_enabled &&
+ (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) {
+ timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited);
+
+ if (tcs_shift >= 0) {
+ adjval = MIN((deadline - now) >> tcs_shift, tcs_max_abstime);
+ } else {
+ adjval = MIN((deadline - now) << (-tcs_shift), tcs_max_abstime);
+ }
+ /* Apply adjustments derived from "user idle level" heuristic */
+ adjval += (adjval * timer_user_idle_level) >> 7;
+ return adjval;
+ } else {
+ return 0;
+ }
+}
+
+int
+timer_get_user_idle_level(void)
+{
+ return timer_user_idle_level;
+}
+
+kern_return_t
+timer_set_user_idle_level(int ilevel)
+{
+ boolean_t do_reeval = FALSE;
+
+ if ((ilevel < 0) || (ilevel > 128)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (ilevel < timer_user_idle_level) {
+ do_reeval = TRUE;
+ }
+
+ timer_user_idle_level = ilevel;
+
+ if (do_reeval) {
+ ml_timer_evaluate();
+ }