+typedef uint16_t pcid_t;
+typedef uint8_t pcid_ref_t;
+
+#define CPU_RTIME_BINS (12)
+#define CPU_ITIME_BINS (CPU_RTIME_BINS)
+
+/*
+ * Per-cpu data.
+ *
+ * Each processor has a per-cpu data area which is dereferenced through the
+ * current_cpu_datap() macro. For speed, the %gs segment is based here, and
+ * using this, inlines provides single-instruction access to frequently used
+ * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/
+ * current_thread().
+ *
+ * Cpu data owned by another processor can be accessed using the
+ * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu
+ * pointers.
+ */
+typedef struct cpu_data
+{
+ struct pal_cpu_data cpu_pal_data; /* PAL-specific data */
+#define cpu_pd cpu_pal_data /* convenience alias */
+ struct cpu_data *cpu_this; /* pointer to myself */
+ thread_t cpu_active_thread;
+ thread_t cpu_nthread;
+ volatile int cpu_preemption_level;
+ int cpu_number; /* Logical CPU */
+ void *cpu_int_state; /* interrupt state */
+ vm_offset_t cpu_active_stack; /* kernel stack base */
+ vm_offset_t cpu_kernel_stack; /* kernel stack top */
+ vm_offset_t cpu_int_stack_top;
+ int cpu_interrupt_level;
+ int cpu_phys_number; /* Physical CPU */
+ cpu_id_t cpu_id; /* Platform Expert */
+ volatile int cpu_signals; /* IPI events */
+ volatile int cpu_prior_signals; /* Last set of events,
+ * debugging
+ */
+ ast_t cpu_pending_ast;
+ volatile int cpu_running;
+ boolean_t cpu_fixed_pmcs_enabled;
+ rtclock_timer_t rtclock_timer;
+ volatile addr64_t cpu_active_cr3 __attribute((aligned(64)));
+ union {
+ volatile uint32_t cpu_tlb_invalid;
+ struct {
+ volatile uint16_t cpu_tlb_invalid_local;
+ volatile uint16_t cpu_tlb_invalid_global;
+ };
+ };
+ volatile task_map_t cpu_task_map;
+ volatile addr64_t cpu_task_cr3;
+ addr64_t cpu_kernel_cr3;
+ cpu_uber_t cpu_uber;
+ void *cpu_chud;
+ void *cpu_console_buf;
+ struct x86_lcpu lcpu;
+ struct processor *cpu_processor;
+#if NCOPY_WINDOWS > 0
+ struct cpu_pmap *cpu_pmap;
+#endif
+ struct cpu_desc_table *cpu_desc_tablep;
+ struct fake_descriptor *cpu_ldtp;
+ cpu_desc_index_t cpu_desc_index;
+ int cpu_ldt;
+#if NCOPY_WINDOWS > 0
+ vm_offset_t cpu_copywindow_base;
+ uint64_t *cpu_copywindow_pdp;
+
+ vm_offset_t cpu_physwindow_base;
+ uint64_t *cpu_physwindow_ptep;
+#endif
+
+#define HWINTCNT_SIZE 256
+ uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */
+ uint64_t cpu_hwIntpexits[HWINTCNT_SIZE];
+ uint64_t cpu_hwIntcexits[HWINTCNT_SIZE];
+ uint64_t cpu_dr7; /* debug control register */
+ uint64_t cpu_int_event_time; /* intr entry/exit time */
+ pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */
+#if CONFIG_COUNTERS
+ thread_t csw_old_thread;
+ thread_t csw_new_thread;
+#endif /* CONFIG COUNTERS */
+#if KPC
+ /* double-buffered performance counter data */
+ uint64_t *cpu_kpc_buf[2];
+ /* PMC shadow and reload value buffers */
+ uint64_t *cpu_kpc_shadow;
+ uint64_t *cpu_kpc_reload;
+#endif
+ uint32_t cpu_pmap_pcid_enabled;
+ pcid_t cpu_active_pcid;
+ pcid_t cpu_last_pcid;
+ volatile pcid_ref_t *cpu_pmap_pcid_coherentp;
+ volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel;
+#define PMAP_PCID_MAX_PCID (0x1000)
+ pcid_t cpu_pcid_free_hint;
+ pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID];
+ pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];
+#ifdef PCID_STATS
+ uint64_t cpu_pmap_pcid_flushes;
+ uint64_t cpu_pmap_pcid_preserves;
+#endif
+ uint64_t cpu_aperf;
+ uint64_t cpu_mperf;
+ uint64_t cpu_c3res;
+ uint64_t cpu_c6res;
+ uint64_t cpu_c7res;
+ uint64_t cpu_itime_total;
+ uint64_t cpu_rtime_total;
+ uint64_t cpu_ixtime;
+ uint64_t cpu_idle_exits;
+ uint64_t cpu_rtimes[CPU_RTIME_BINS];
+ uint64_t cpu_itimes[CPU_ITIME_BINS];
+ uint64_t cpu_cur_insns;
+ uint64_t cpu_cur_ucc;
+ uint64_t cpu_cur_urc;
+ uint64_t cpu_max_observed_int_latency;
+ int cpu_max_observed_int_latency_vector;
+ volatile boolean_t cpu_NMI_acknowledged;
+ uint64_t debugger_entry_time;
+ uint64_t debugger_ipi_time;
+ /* A separate nested interrupt stack flag, to account
+ * for non-nested interrupts arriving while on the interrupt stack
+ * Currently only occurs when AICPM enables interrupts on the
+ * interrupt stack during processor offlining.
+ */
+ uint32_t cpu_nested_istack;
+ uint32_t cpu_nested_istack_events;
+ x86_saved_state64_t *cpu_fatal_trap_state;
+ x86_saved_state64_t *cpu_post_fatal_trap_state;
+#if CONFIG_VMX
+ vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */
+#endif
+#if CONFIG_MCA
+ struct mca_state *cpu_mca_state; /* State at MC fault */