+
+#if DEBUG || DEVELOPMENT
+void
+kernel_spin(uint64_t spin_ns)
+{
+ boolean_t istate;
+ uint64_t spin_abs;
+ uint64_t deadline;
+ cpu_data_t *cdp;
+
+ kprintf("kernel_spin(%llu) spinning uninterruptibly\n", spin_ns);
+ istate = ml_set_interrupts_enabled(FALSE);
+ cdp = current_cpu_datap();
+ nanoseconds_to_absolutetime(spin_ns, &spin_abs);
+
+ /* Fake interrupt handler entry for testing mp_interrupt_watchdog() */
+ cdp->cpu_int_event_time = mach_absolute_time();
+ cdp->cpu_int_state = (void *) USER_STATE(current_thread());
+
+ deadline = mach_absolute_time() + spin_ns;
+ while (mach_absolute_time() < deadline)
+ cpu_pause();
+
+ cdp->cpu_int_event_time = 0;
+ cdp->cpu_int_state = NULL;
+
+ ml_set_interrupts_enabled(istate);
+ kprintf("kernel_spin() continuing\n");
+}
+
+/*
+ * Called from the scheduler's maintenance thread,
+ * scan running processors for long-running ISRs and:
+ * - panic if longer than LockTimeOut, or
+ * - log if more than a quantum.
+ */
+void
+mp_interrupt_watchdog(void)
+{
+ cpu_t cpu;
+ boolean_t intrs_enabled = FALSE;
+ uint16_t cpu_int_num;
+ uint64_t cpu_int_event_time;
+ uint64_t cpu_rip;
+ uint64_t cpu_int_duration;
+ uint64_t now;
+ x86_saved_state_t *cpu_int_state;
+
+ if (__improbable(!mp_interrupt_watchdog_enabled))
+ return;
+
+ intrs_enabled = ml_set_interrupts_enabled(FALSE);
+ now = mach_absolute_time();
+ /*
+ * While timeouts are not suspended,
+ * check all other processors for long outstanding interrupt handling.
+ */
+ for (cpu = 0;
+ cpu < (cpu_t) real_ncpus && !machine_timeout_suspended();
+ cpu++) {
+ if ((cpu == (cpu_t) cpu_number()) ||
+ (!cpu_is_running(cpu)))
+ continue;
+ cpu_int_event_time = cpu_datap(cpu)->cpu_int_event_time;
+ if (cpu_int_event_time == 0)
+ continue;
+ if (__improbable(now < cpu_int_event_time))
+ continue; /* skip due to inter-processor skew */
+ cpu_int_state = cpu_datap(cpu)->cpu_int_state;
+ if (__improbable(cpu_int_state == NULL))
+ /* The interrupt may have been dismissed */
+ continue;
+
+ /* Here with a cpu handling an interrupt */
+
+ cpu_int_duration = now - cpu_int_event_time;
+ if (__improbable(cpu_int_duration > LockTimeOut)) {
+ cpu_int_num = saved_state64(cpu_int_state)->isf.trapno;
+ cpu_rip = saved_state64(cpu_int_state)->isf.rip;
+ vector_timed_out = cpu_int_num;
+ NMIPI_panic(cpu_to_cpumask(cpu), INTERRUPT_WATCHDOG);
+ panic("Interrupt watchdog, "
+ "cpu: %d interrupt: 0x%x time: %llu..%llu state: %p RIP: 0x%llx",
+ cpu, cpu_int_num, cpu_int_event_time, now, cpu_int_state, cpu_rip);
+ /* NOT REACHED */
+ } else if (__improbable(cpu_int_duration > (uint64_t) std_quantum)) {
+ mp_interrupt_watchdog_events++;
+ cpu_int_num = saved_state64(cpu_int_state)->isf.trapno;
+ cpu_rip = saved_state64(cpu_int_state)->isf.rip;
+ ml_set_interrupts_enabled(intrs_enabled);
+ printf("Interrupt watchdog, "
+ "cpu: %d interrupt: 0x%x time: %llu..%llu RIP: 0x%llx\n",
+ cpu, cpu_int_num, cpu_int_event_time, now, cpu_rip);
+ return;
+ }
+ }
+
+ ml_set_interrupts_enabled(intrs_enabled);
+}
+#endif