+ pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */
+ thread_t csw_old_thread;
+ thread_t csw_new_thread;
+#if defined(__x86_64__)
+ uint32_t cpu_pmap_pcid_enabled;
+ pcid_t cpu_active_pcid;
+ pcid_t cpu_last_pcid;
+ volatile pcid_ref_t *cpu_pmap_pcid_coherentp;
+ volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel;
+#define PMAP_PCID_MAX_PCID (0x1000)
+ pcid_t cpu_pcid_free_hint;
+ pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID];
+ pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];
+#ifdef PCID_STATS
+ uint64_t cpu_pmap_pcid_flushes;
+ uint64_t cpu_pmap_pcid_preserves;
+#endif
+#endif /* x86_64 */
+ uint64_t cpu_max_observed_int_latency;
+ int cpu_max_observed_int_latency_vector;
+ uint64_t debugger_entry_time;
+ volatile boolean_t cpu_NMI_acknowledged;
+ /* A separate nested interrupt stack flag, to account
+ * for non-nested interrupts arriving while on the interrupt stack
+ * Currently only occurs when AICPM enables interrupts on the
+ * interrupt stack during processor offlining.
+ */
+ uint32_t cpu_nested_istack;
+ uint32_t cpu_nested_istack_events;
+ x86_saved_state64_t *cpu_fatal_trap_state;
+ x86_saved_state64_t *cpu_post_fatal_trap_state;