+ if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) {
+ cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
+ }
+
+ if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) {
+ itype = DBG_INTR_TYPE_IPI;
+ } else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) {
+ itype = DBG_INTR_TYPE_TIMER;
+ } else {
+ itype = DBG_INTR_TYPE_OTHER;
+ }
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
+ interrupt_num,
+ (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)),
+ user_mode, itype, 0);
+
+ SCHED_STATS_INC(interrupt_count);
+
+#if CONFIG_TELEMETRY
+ if (telemetry_needs_record) {
+ telemetry_mark_curthread(user_mode, FALSE);
+ }
+#endif
+
+ ipl = get_preemption_level();
+
+ /*
+ * Handle local APIC interrupts
+ * else call platform expert for devices.
+ */
+ handled = lapic_interrupt(interrupt_num, state);
+
+ if (!handled) {
+ if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_CMCI_INTERRUPT)) {
+ /*
+ * CMCI can be signalled on any logical processor, and the kexts
+ * that implement handling CMCI use IOKit to register handlers for
+ * the CMCI vector, so if we see a CMCI, do not encode a CPU
+ * number in bits 8:31 (since the vector is the same regardless of
+ * the handling CPU).
+ */
+ PE_incoming_interrupt(interrupt_num);
+ } else if (cnum <= lapic_max_interrupt_cpunum) {
+ PE_incoming_interrupt((cnum << 8) | interrupt_num);
+ }
+ }
+
+ if (__improbable(get_preemption_level() != ipl)) {
+ panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x\n", interrupt_num, ipl, get_preemption_level());
+ }
+
+
+ if (__improbable(cdp->cpu_nested_istack)) {
+ cdp->cpu_nested_istack_events++;
+ } else {
+ uint64_t ctime = mach_absolute_time();
+ uint64_t int_latency = ctime - cdp->cpu_int_event_time;
+ uint64_t esdeadline, ehdeadline;
+ /* Attempt to process deferred timers in the context of
+ * this interrupt, unless interrupt time has already exceeded
+ * TCOAL_ILAT_THRESHOLD.
+ */
+#define TCOAL_ILAT_THRESHOLD (30000ULL)
+
+ if ((int_latency < TCOAL_ILAT_THRESHOLD) &&
+ interrupt_timer_coalescing_enabled) {
+ esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline;
+ ehdeadline = cdp->rtclock_timer.deadline;
+ if ((ctime >= esdeadline) && (ctime < ehdeadline)) {
+ interrupt_coalesced_timers++;
+ TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0);
+ rtclock_intr(state);
+ TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0);
+ } else {
+ TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0);
+ }
+ }
+
+ if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) {
+ panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals);
+ }
+
+ if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) {
+ cdp->cpu_max_observed_int_latency = int_latency;
+ cdp->cpu_max_observed_int_latency_vector = interrupt_num;
+ }
+ }
+
+ /*
+ * Having serviced the interrupt first, look at the interrupted stack depth.
+ */
+ if (!user_mode) {
+ uint64_t depth = cdp->cpu_kernel_stack
+ + sizeof(struct thread_kernel_state)
+ + sizeof(struct i386_exception_link *)
+ - rsp;
+ if (__improbable(depth > kernel_stack_depth_max)) {
+ kernel_stack_depth_max = (vm_offset_t)depth;
+ KERNEL_DEBUG_CONSTANT(
+ MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH),
+ (long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0);
+ }
+ }
+
+ if (cnum == master_cpu) {
+ entropy_collect();
+ }
+
+#if KPERF
+ kperf_interrupt();
+#endif /* KPERF */
+
+ KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
+ interrupt_num);