+#define HWINTCNT_SIZE 256
+ uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */
+ uint64_t cpu_hwIntpexits[HWINTCNT_SIZE];
+ uint64_t cpu_dr7; /* debug control register */
+ uint64_t cpu_int_event_time; /* intr entry/exit time */
+ pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */
+#if KPC
+ /* double-buffered performance counter data */
+ uint64_t *cpu_kpc_buf[2];
+ /* PMC shadow and reload value buffers */
+ uint64_t *cpu_kpc_shadow;
+ uint64_t *cpu_kpc_reload;
+#endif
+#if MONOTONIC
+ struct mt_cpu cpu_monotonic;
+#endif /* MONOTONIC */
+ uint32_t cpu_pmap_pcid_enabled;
+ pcid_t cpu_active_pcid;
+ pcid_t cpu_last_pcid;
+ pcid_t cpu_kernel_pcid;
+ volatile pcid_ref_t *cpu_pmap_pcid_coherentp;
+ volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel;
+ pcid_cdata_t *cpu_pcid_data;
+#ifdef PCID_STATS
+ uint64_t cpu_pmap_pcid_flushes;
+ uint64_t cpu_pmap_pcid_preserves;
+#endif
+ uint64_t cpu_aperf;
+ uint64_t cpu_mperf;
+ uint64_t cpu_c3res;
+ uint64_t cpu_c6res;
+ uint64_t cpu_c7res;
+ uint64_t cpu_itime_total;
+ uint64_t cpu_rtime_total;
+ uint64_t cpu_ixtime;
+ uint64_t cpu_idle_exits;
+ uint64_t cpu_rtimes[CPU_RTIME_BINS];
+ uint64_t cpu_itimes[CPU_ITIME_BINS];
+#if !MONOTONIC
+ uint64_t cpu_cur_insns;
+ uint64_t cpu_cur_ucc;
+ uint64_t cpu_cur_urc;
+#endif /* !MONOTONIC */
+ uint64_t cpu_gpmcs[4];
+ uint64_t cpu_max_observed_int_latency;
+ int cpu_max_observed_int_latency_vector;
+ volatile boolean_t cpu_NMI_acknowledged;
+ uint64_t debugger_entry_time;
+ uint64_t debugger_ipi_time;
+ /* A separate nested interrupt stack flag, to account
+ * for non-nested interrupts arriving while on the interrupt stack
+ * Currently only occurs when AICPM enables interrupts on the
+ * interrupt stack during processor offlining.
+ */
+ uint32_t cpu_nested_istack;
+ uint32_t cpu_nested_istack_events;
+ x86_saved_state64_t *cpu_fatal_trap_state;
+ x86_saved_state64_t *cpu_post_fatal_trap_state;
+#if CONFIG_VMX
+ vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */
+#endif
+#if CONFIG_MCA
+ struct mca_state *cpu_mca_state; /* State at MC fault */
+#endif
+ int cpu_type;
+ int cpu_subtype;
+ int cpu_threadtype;
+ boolean_t cpu_iflag;
+ boolean_t cpu_boot_complete;
+ int cpu_hibernate;
+#define MAX_PREEMPTION_RECORDS (8)
+#if DEVELOPMENT || DEBUG
+ int cpu_plri;
+ plrecord_t plrecords[MAX_PREEMPTION_RECORDS];
+#endif
+ void *cpu_console_buf;
+ struct x86_lcpu lcpu;
+ int cpu_phys_number; /* Physical CPU */
+ cpu_id_t cpu_id; /* Platform Expert */
+#if DEBUG
+ uint64_t cpu_entry_cr3;
+ uint64_t cpu_exit_cr3;
+ uint64_t cpu_pcid_last_cr3;
+#endif
+ boolean_t cpu_rendezvous_in_progress;
+} cpu_data_t;
+
+extern cpu_data_t *cpu_data_ptr[];
+
+/* Macro to generate inline bodies to retrieve per-cpu data fields. */
+#if defined(__clang__)
+#define GS_RELATIVE volatile __attribute__((address_space(256)))
+#ifndef offsetof
+#define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER)
+#endif