+
+void
+timer_queue_trace_cpu(int ncpu)
+{
+ timer_call_nosync_cpu(
+ ncpu,
+ (void(*)())timer_queue_trace,
+ (void*) timer_queue_cpu(ncpu));
+}
+
+void
+timer_queue_trace(
+ mpqueue_head_t *queue)
+{
+ timer_call_t call;
+ spl_t s;
+
+ if (!kdebug_enable)
+ return;
+
+ s = splclock();
+ timer_queue_lock_spin(queue);
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_QUEUE | DBG_FUNC_START,
+ queue->count, mach_absolute_time(), 0, 0, 0);
+
+ if (!queue_empty(&queue->head)) {
+ call = TIMER_CALL(queue_first(&queue->head));
+ do {
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_QUEUE | DBG_FUNC_NONE,
+ call->soft_deadline,
+ CE(call)->deadline,
+ CE(call)->entry_time,
+ CE(call)->func,
+ 0);
+ call = TIMER_CALL(queue_next(qe(call)));
+ } while (!queue_end(&queue->head, qe(call)));
+ }
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_QUEUE | DBG_FUNC_END,
+ queue->count, mach_absolute_time(), 0, 0, 0);
+
+ timer_queue_unlock(queue);
+ splx(s);
+}
+
+void
+timer_longterm_dequeued_locked(timer_call_t call)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+
+ tlp->dequeues++;
+ if (call == tlp->threshold.call)
+ tlp->threshold.call = NULL;
+}
+
+/*
+ * Place a timer call in the longterm list
+ * and adjust the next timer callout deadline if the new timer is first.
+ */
+mpqueue_head_t *
+timer_longterm_enqueue_unlocked(timer_call_t call,
+ uint64_t now,
+ uint64_t deadline,
+ mpqueue_head_t **old_queue)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+ boolean_t update_required = FALSE;
+ uint64_t longterm_threshold;
+
+ longterm_threshold = now + tlp->threshold.interval;
+
+ /*
+ * Return NULL without doing anything if:
+ * - this timer is local, or
+ * - the longterm mechanism is disabled, or
+ * - this deadline is too short.
+ */
+ if (__probable((call->flags & TIMER_CALL_LOCAL) != 0 ||
+ (tlp->threshold.interval == TIMER_LONGTERM_NONE) ||
+ (deadline <= longterm_threshold)))
+ return NULL;
+
+ /*
+ * Remove timer from its current queue, if any.
+ */
+ *old_queue = timer_call_dequeue_unlocked(call);
+
+ /*
+ * Lock the longterm queue, queue timer and determine
+ * whether an update is necessary.
+ */
+ assert(!ml_get_interrupts_enabled());
+ simple_lock(&call->lock);
+ timer_queue_lock_spin(timer_longterm_queue);
+ timer_call_entry_enqueue_tail(call, timer_longterm_queue);
+ CE(call)->deadline = deadline;
+
+ tlp->enqueues++;
+
+ /*
+ * We'll need to update the currently set threshold timer
+ * if the new deadline is sooner and no sooner update is in flight.
+ */
+ if (deadline < tlp->threshold.deadline &&
+ deadline < tlp->threshold.preempted) {
+ tlp->threshold.preempted = deadline;
+ tlp->threshold.call = call;
+ update_required = TRUE;
+ }
+ timer_queue_unlock(timer_longterm_queue);
+ simple_unlock(&call->lock);
+
+ if (update_required) {
+ timer_call_nosync_cpu(
+ master_cpu,
+ (void (*)(void *)) timer_longterm_update,
+ (void *)tlp);
+ }
+
+ return timer_longterm_queue;
+}
+
+/*
+ * Scan for timers below the longterm threshold.
+ * Move these to the local timer queue (of the boot processor on which the
+ * calling thread is running).
+ * Both the local (boot) queue and the longterm queue are locked.
+ * The scan is similar to the timer migrate sequence but is performed by
+ * successively examining each timer on the longterm queue:
+ * - if within the short-term threshold
+ * - enter on the local queue (unless being deleted),
+ * - otherwise:
+ * - if sooner, deadline becomes the next threshold deadline.
+ */
+void
+timer_longterm_scan(timer_longterm_t *tlp,
+ uint64_t now)
+{
+ queue_entry_t qe;
+ timer_call_t call;
+ uint64_t threshold;
+ uint64_t deadline;
+ mpqueue_head_t *timer_master_queue;
+
+ assert(!ml_get_interrupts_enabled());
+ assert(cpu_number() == master_cpu);
+
+ if (tlp->threshold.interval != TIMER_LONGTERM_NONE)
+ threshold = now + tlp->threshold.interval;
+ else
+ threshold = TIMER_LONGTERM_NONE;
+
+ tlp->threshold.deadline = TIMER_LONGTERM_NONE;
+ tlp->threshold.call = NULL;
+
+ if (queue_empty(&timer_longterm_queue->head))
+ return;
+
+ timer_master_queue = timer_queue_cpu(master_cpu);
+ timer_queue_lock_spin(timer_master_queue);
+
+ qe = queue_first(&timer_longterm_queue->head);
+ while (!queue_end(&timer_longterm_queue->head, qe)) {
+ call = TIMER_CALL(qe);
+ deadline = call->soft_deadline;
+ qe = queue_next(qe);
+ if (!simple_lock_try(&call->lock)) {
+ /* case (2c) lock order inversion, dequeue only */
+#ifdef TIMER_ASSERT
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
+ call,
+ CE(call)->queue,
+ call->lock.interlock.lock_data,
+ 0x2c, 0);
+#endif
+ timer_call_entry_dequeue_async(call);
+ continue;
+ }
+ if (deadline < threshold) {
+ /*
+ * This timer needs moving (escalating)
+ * to the local (boot) processor's queue.
+ */
+#ifdef TIMER_ASSERT
+ if (deadline < now)
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_OVERDUE | DBG_FUNC_NONE,
+ call,
+ deadline,
+ now,
+ threshold,
+ 0);
+#endif
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_ESCALATE | DBG_FUNC_NONE,
+ call,
+ CE(call)->deadline,
+ CE(call)->entry_time,
+ CE(call)->func,
+ 0);
+ tlp->escalates++;
+ timer_call_entry_dequeue(call);
+ timer_call_entry_enqueue_deadline(
+ call, timer_master_queue, CE(call)->deadline);
+ /*
+ * A side-effect of the following call is to update
+ * the actual hardware deadline if required.
+ */
+ (void) timer_queue_assign(deadline);
+ } else {
+ if (deadline < tlp->threshold.deadline) {
+ tlp->threshold.deadline = deadline;
+ tlp->threshold.call = call;
+ }
+ }
+ simple_unlock(&call->lock);
+ }
+
+ timer_queue_unlock(timer_master_queue);
+}
+
+void
+timer_longterm_callout(timer_call_param_t p0, __unused timer_call_param_t p1)
+{
+ timer_longterm_t *tlp = (timer_longterm_t *) p0;
+
+ timer_longterm_update(tlp);
+}
+
+void
+timer_longterm_update_locked(timer_longterm_t *tlp)
+{
+ uint64_t latency;
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_UPDATE | DBG_FUNC_START,
+ &tlp->queue,
+ tlp->threshold.deadline,
+ tlp->threshold.preempted,
+ tlp->queue.count, 0);
+
+ tlp->scan_time = mach_absolute_time();
+ if (tlp->threshold.preempted != TIMER_LONGTERM_NONE) {
+ tlp->threshold.preempts++;
+ tlp->threshold.deadline = tlp->threshold.preempted;
+ tlp->threshold.preempted = TIMER_LONGTERM_NONE;
+ /*
+ * Note: in the unlikely event that a pre-empted timer has
+ * itself been cancelled, we'll simply re-scan later at the
+ * time of the preempted/cancelled timer.
+ */
+ } else {
+ tlp->threshold.scans++;
+
+ /*
+ * Maintain a moving average of our wakeup latency.
+ * Clamp latency to 0 and ignore above threshold interval.
+ */
+ if (tlp->scan_time > tlp->threshold.deadline_set)
+ latency = tlp->scan_time - tlp->threshold.deadline_set;
+ else
+ latency = 0;
+ if (latency < tlp->threshold.interval) {
+ tlp->threshold.latency_min =
+ MIN(tlp->threshold.latency_min, latency);
+ tlp->threshold.latency_max =
+ MAX(tlp->threshold.latency_max, latency);
+ tlp->threshold.latency =
+ (tlp->threshold.latency*99 + latency) / 100;
+ }
+
+ timer_longterm_scan(tlp, tlp->scan_time);
+ }
+
+ tlp->threshold.deadline_set = tlp->threshold.deadline;
+ /* The next deadline timer to be set is adjusted */
+ if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
+ tlp->threshold.deadline_set -= tlp->threshold.margin;
+ tlp->threshold.deadline_set -= tlp->threshold.latency;
+ }
+
+ TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
+ DECR_TIMER_UPDATE | DBG_FUNC_END,
+ &tlp->queue,
+ tlp->threshold.deadline,
+ tlp->threshold.scans,
+ tlp->queue.count, 0);
+}
+
+void
+timer_longterm_update(timer_longterm_t *tlp)
+{
+ spl_t s = splclock();
+
+ timer_queue_lock_spin(timer_longterm_queue);
+
+ if (cpu_number() != master_cpu)
+ panic("timer_longterm_update_master() on non-boot cpu");
+
+ timer_longterm_update_locked(tlp);
+
+ if (tlp->threshold.deadline != TIMER_LONGTERM_NONE)
+ timer_call_enter(
+ &tlp->threshold.timer,
+ tlp->threshold.deadline_set,
+ TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
+
+ timer_queue_unlock(timer_longterm_queue);
+ splx(s);
+}
+
+void
+timer_longterm_init(void)
+{
+ uint32_t longterm;
+ timer_longterm_t *tlp = &timer_longterm;
+
+ DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue);
+
+ /*
+ * Set the longterm timer threshold.
+ * Defaults to TIMER_LONGTERM_THRESHOLD; overridden longterm boot-arg
+ */
+ tlp->threshold.interval = TIMER_LONGTERM_THRESHOLD;
+ if (PE_parse_boot_argn("longterm", &longterm, sizeof (longterm))) {
+ tlp->threshold.interval = (longterm == 0) ?
+ TIMER_LONGTERM_NONE :
+ longterm * NSEC_PER_MSEC;
+ }
+ if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
+ printf("Longterm timer threshold: %llu ms\n",
+ tlp->threshold.interval / NSEC_PER_MSEC);
+ kprintf("Longterm timer threshold: %llu ms\n",
+ tlp->threshold.interval / NSEC_PER_MSEC);
+ nanoseconds_to_absolutetime(tlp->threshold.interval,
+ &tlp->threshold.interval);
+ tlp->threshold.margin = tlp->threshold.interval / 10;
+ tlp->threshold.latency_min = EndOfAllTime;
+ tlp->threshold.latency_max = 0;
+ }
+
+ tlp->threshold.preempted = TIMER_LONGTERM_NONE;
+ tlp->threshold.deadline = TIMER_LONGTERM_NONE;
+
+ lck_attr_setdefault(&timer_longterm_lck_attr);
+ lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr);
+ lck_grp_init(&timer_longterm_lck_grp,
+ "timer_longterm", &timer_longterm_lck_grp_attr);
+ mpqueue_init(&tlp->queue,
+ &timer_longterm_lck_grp, &timer_longterm_lck_attr);
+
+ timer_call_setup(&tlp->threshold.timer,
+ timer_longterm_callout, (timer_call_param_t) tlp);
+
+ timer_longterm_queue = &tlp->queue;
+}
+
+enum {
+ THRESHOLD, QCOUNT,
+ ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
+ LATENCY, LATENCY_MIN, LATENCY_MAX
+};
+uint64_t
+timer_sysctl_get(int oid)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+
+ switch (oid) {
+ case THRESHOLD:
+ return (tlp->threshold.interval == TIMER_LONGTERM_NONE) ?
+ 0 : tlp->threshold.interval / NSEC_PER_MSEC;
+ case QCOUNT:
+ return tlp->queue.count;
+ case ENQUEUES:
+ return tlp->enqueues;
+ case DEQUEUES:
+ return tlp->dequeues;
+ case ESCALATES:
+ return tlp->escalates;
+ case SCANS:
+ return tlp->threshold.scans;
+ case PREEMPTS:
+ return tlp->threshold.preempts;
+ case LATENCY:
+ return tlp->threshold.latency;
+ case LATENCY_MIN:
+ return tlp->threshold.latency_min;
+ case LATENCY_MAX:
+ return tlp->threshold.latency_max;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * timer_master_scan() is the inverse of timer_longterm_scan()
+ * since it un-escalates timers to the longterm queue.
+ */
+static void
+timer_master_scan(timer_longterm_t *tlp,
+ uint64_t now)
+{
+ queue_entry_t qe;
+ timer_call_t call;
+ uint64_t threshold;
+ uint64_t deadline;
+ mpqueue_head_t *timer_master_queue;
+
+ if (tlp->threshold.interval != TIMER_LONGTERM_NONE)
+ threshold = now + tlp->threshold.interval;
+ else
+ threshold = TIMER_LONGTERM_NONE;
+
+ timer_master_queue = timer_queue_cpu(master_cpu);
+ timer_queue_lock_spin(timer_master_queue);
+
+ qe = queue_first(&timer_master_queue->head);
+ while (!queue_end(&timer_master_queue->head, qe)) {
+ call = TIMER_CALL(qe);
+ deadline = CE(call)->deadline;
+ qe = queue_next(qe);
+ if ((call->flags & TIMER_CALL_LOCAL) != 0)
+ continue;
+ if (!simple_lock_try(&call->lock)) {
+ /* case (2c) lock order inversion, dequeue only */
+ timer_call_entry_dequeue_async(call);
+ continue;
+ }
+ if (deadline > threshold) {
+ /* move from master to longterm */
+ timer_call_entry_dequeue(call);
+ timer_call_entry_enqueue_tail(call, timer_longterm_queue);
+ if (deadline < tlp->threshold.deadline) {
+ tlp->threshold.deadline = deadline;
+ tlp->threshold.call = call;
+ }
+ }
+ simple_unlock(&call->lock);
+ }
+ timer_queue_unlock(timer_master_queue);
+}
+
+static void
+timer_sysctl_set_threshold(uint64_t value)
+{
+ timer_longterm_t *tlp = &timer_longterm;
+ spl_t s = splclock();
+ boolean_t threshold_increase;
+
+ timer_queue_lock_spin(timer_longterm_queue);
+
+ timer_call_cancel(&tlp->threshold.timer);
+
+ /*
+ * Set the new threshold and note whther it's increasing.
+ */
+ if (value == 0) {
+ tlp->threshold.interval = TIMER_LONGTERM_NONE;
+ threshold_increase = TRUE;
+ timer_call_cancel(&tlp->threshold.timer);
+ } else {
+ uint64_t old_interval = tlp->threshold.interval;
+ tlp->threshold.interval = value * NSEC_PER_MSEC;
+ nanoseconds_to_absolutetime(tlp->threshold.interval,
+ &tlp->threshold.interval);
+ tlp->threshold.margin = tlp->threshold.interval / 10;
+ if (old_interval == TIMER_LONGTERM_NONE)
+ threshold_increase = FALSE;
+ else
+ threshold_increase = (tlp->threshold.interval > old_interval);
+ }
+
+ if (threshold_increase /* or removal */) {
+ /* Escalate timers from the longterm queue */
+ timer_longterm_scan(tlp, mach_absolute_time());
+ } else /* decrease or addition */ {
+ /*
+ * We scan the local/master queue for timers now longterm.
+ * To be strictly correct, we should scan all processor queues
+ * but timer migration results in most timers gravitating to the
+ * master processor in any case.
+ */
+ timer_master_scan(tlp, mach_absolute_time());
+ }
+
+ /* Set new timer accordingly */
+ tlp->threshold.deadline_set = tlp->threshold.deadline;
+ if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
+ tlp->threshold.deadline_set -= tlp->threshold.margin;
+ tlp->threshold.deadline_set -= tlp->threshold.latency;
+ timer_call_enter(
+ &tlp->threshold.timer,
+ tlp->threshold.deadline_set,
+ TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
+ }
+
+ /* Reset stats */
+ tlp->enqueues = 0;
+ tlp->dequeues = 0;
+ tlp->escalates = 0;
+ tlp->threshold.scans = 0;
+ tlp->threshold.preempts = 0;
+ tlp->threshold.latency = 0;
+ tlp->threshold.latency_min = EndOfAllTime;
+ tlp->threshold.latency_max = 0;
+
+ timer_queue_unlock(timer_longterm_queue);
+ splx(s);
+}
+
+int
+timer_sysctl_set(int oid, uint64_t value)
+{
+ switch (oid) {
+ case THRESHOLD:
+ timer_call_cpu(
+ master_cpu,
+ (void (*)(void *)) timer_sysctl_set_threshold,
+ (void *) value);
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+}