+ cpu_data_t *my_cpu = current_cpu_datap();
+ __unused uint32_t cnum = my_cpu->cpu_number;
+ uint64_t ctime, rtime, itime;
+#if CST_DEMOTION_DEBUG
+ processor_t cproc = my_cpu->cpu_processor;
+ uint64_t cwakeups = my_cpu->cpu_wakeups_issued_total;
+#endif /* CST_DEMOTION_DEBUG */
+ uint64_t esdeadline, ehdeadline;
+ boolean_t do_process_pending_timers = FALSE;
+
+ ctime = mach_absolute_time();
+ esdeadline = my_cpu->rtclock_timer.queue.earliest_soft_deadline;
+ ehdeadline = my_cpu->rtclock_timer.deadline;
+/* Determine if pending timers exist */
+ if ((ctime >= esdeadline) && (ctime < ehdeadline) &&
+ ((ehdeadline - ctime) < idle_entry_timer_processing_hdeadline_threshold)) {
+ idle_pending_timers_processed++;
+ do_process_pending_timers = TRUE;
+ goto machine_idle_exit;
+ } else {
+ TCOAL_DEBUG(0xCCCC0000, ctime, my_cpu->rtclock_timer.queue.earliest_soft_deadline, my_cpu->rtclock_timer.deadline, idle_pending_timers_processed, 0);
+ }
+
+ my_cpu->lcpu.state = LCPU_IDLE;
+ DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
+ MARK_CPU_IDLE(cnum);
+
+ rtime = ctime - my_cpu->cpu_ixtime;
+
+ my_cpu->cpu_rtime_total += rtime;
+ machine_classify_interval(rtime, &my_cpu->cpu_rtimes[0], &cpu_rtime_bins[0], CPU_RTIME_BINS);
+#if CST_DEMOTION_DEBUG
+ uint32_t cl = 0, ch = 0;
+ uint64_t c3res, c6res, c7res;
+ rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
+ c3res = ((uint64_t)ch << 32) | cl;
+ rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
+ c6res = ((uint64_t)ch << 32) | cl;
+ rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
+ c7res = ((uint64_t)ch << 32) | cl;
+#endif
+
+ if (pmInitDone) {
+ /*
+ * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
+ * were called prior to the CPU PM kext being registered. We do
+ * this here since we know at this point the values will be first
+ * used since idle is where the decisions using these values is made.
+ */
+ if (earlyMaxBusDelay != DELAY_UNSET) {
+ ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
+ }
+ if (earlyMaxIntDelay != DELAY_UNSET) {
+ ml_set_maxintdelay(earlyMaxIntDelay);
+ }
+ }
+
+ if (pmInitDone
+ && pmDispatch != NULL
+ && pmDispatch->MachineIdle != NULL) {
+ (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
+ } else {
+ /*
+ * If no power management, re-enable interrupts and halt.
+ * This will keep the CPU from spinning through the scheduler
+ * and will allow at least some minimal power savings (but it
+ * cause problems in some MP configurations w.r.t. the APIC
+ * stopping during a GV3 transition).
+ */
+ pal_hlt();
+ /* Once woken, re-disable interrupts. */
+ pal_cli();
+ }
+
+ /*
+ * Mark the CPU as running again.
+ */
+ MARK_CPU_ACTIVE(cnum);
+ DBGLOG(cpu_handle, cnum, MP_UNIDLE);
+ my_cpu->lcpu.state = LCPU_RUN;
+ uint64_t ixtime = my_cpu->cpu_ixtime = mach_absolute_time();
+ itime = ixtime - ctime;
+ my_cpu->cpu_idle_exits++;
+ my_cpu->cpu_itime_total += itime;
+ machine_classify_interval(itime, &my_cpu->cpu_itimes[0], &cpu_itime_bins[0], CPU_ITIME_BINS);
+#if CST_DEMOTION_DEBUG
+ cl = ch = 0;
+ rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
+ c3res = (((uint64_t)ch << 32) | cl) - c3res;
+ rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
+ c6res = (((uint64_t)ch << 32) | cl) - c6res;
+ rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
+ c7res = (((uint64_t)ch << 32) | cl) - c7res;
+
+ uint64_t ndelta = itime - tmrCvt(c3res + c6res + c7res, tscFCvtt2n);
+ KERNEL_DEBUG_CONSTANT(0xcead0000, ndelta, itime, c7res, c6res, c3res);
+ if ((itime > 1000000) && (ndelta > 250000)) {
+ KERNEL_DEBUG_CONSTANT(0xceae0000, ndelta, itime, c7res, c6res, c3res);
+ }
+#endif
+
+machine_idle_exit:
+ /*
+ * Re-enable interrupts.
+ */
+
+ pal_sti();
+
+ if (do_process_pending_timers) {
+ TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, idle_pending_timers_processed, 0);
+
+ /* Adjust to reflect that this isn't truly a package idle exit */
+ __sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
+ lapic_timer_swi(); /* Trigger software timer interrupt */
+ __sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);
+
+ TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_END, ctime, esdeadline, idle_pending_timers_processed, 0, 0);
+ }
+#if CST_DEMOTION_DEBUG
+ uint64_t nwakeups = my_cpu->cpu_wakeups_issued_total;
+
+ if ((nwakeups == cwakeups) && (topoParms.nLThreadsPerPackage == my_cpu->lcpu.package->num_idle)) {
+ KERNEL_DEBUG_CONSTANT(0xceaa0000, cwakeups, 0, 0, 0, 0);
+ }
+#endif