} rtclock_timer_t;
typedef struct {
- struct x86_64_tss *cdi_ktss;
- x86_64_desc_register_t cdi_gdt;
- x86_64_desc_register_t cdi_idt;
- struct fake_descriptor *cdi_ldt;
- vm_offset_t cdi_sstk;
+ /* The 'u' suffixed fields store the double-mapped descriptor addresses */
+ struct x86_64_tss *cdi_ktssu;
+ struct x86_64_tss *cdi_ktssb;
+ x86_64_desc_register_t cdi_gdtu;
+ x86_64_desc_register_t cdi_gdtb;
+ x86_64_desc_register_t cdi_idtu;
+ x86_64_desc_register_t cdi_idtb;
+ struct fake_descriptor *cdi_ldtu;
+ struct fake_descriptor *cdi_ldtb;
+ vm_offset_t cdi_sstku;
+ vm_offset_t cdi_sstkb;
} cpu_desc_index_t;
typedef enum {
#define CPU_RTIME_BINS (12)
#define CPU_ITIME_BINS (CPU_RTIME_BINS)
-#define MAXPLFRAMES (32)
+#define MAXPLFRAMES (16)
typedef struct {
boolean_t pltype;
int plevel;
* cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu
* pointers.
*/
+typedef struct {
+ pcid_t cpu_pcid_free_hint;
+#define PMAP_PCID_MAX_PCID (0x800)
+ pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID];
+ pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];
+} pcid_cdata_t;
+
typedef struct cpu_data
{
struct pal_cpu_data cpu_pal_data; /* PAL-specific data */
vm_offset_t cpu_kernel_stack; /* kernel stack top */
vm_offset_t cpu_int_stack_top;
int cpu_interrupt_level;
- int cpu_phys_number; /* Physical CPU */
- cpu_id_t cpu_id; /* Platform Expert */
volatile int cpu_signals; /* IPI events */
volatile int cpu_prior_signals; /* Last set of events,
* debugging
volatile task_map_t cpu_task_map;
volatile addr64_t cpu_task_cr3;
addr64_t cpu_kernel_cr3;
+ volatile addr64_t cpu_ucr3;
boolean_t cpu_pagezero_mapped;
cpu_uber_t cpu_uber;
- void *cpu_chud;
- void *cpu_console_buf;
- struct x86_lcpu lcpu;
+/* Double-mapped per-CPU exception stack address */
+ uintptr_t cd_estack;
+ int cpu_xstate;
+/* Address of shadowed, partially mirrored CPU data structures located
+ * in the double mapped PML4
+ */
+ void *cd_shadow;
struct processor *cpu_processor;
#if NCOPY_WINDOWS > 0
struct cpu_pmap *cpu_pmap;
#endif
+ struct real_descriptor *cpu_ldtp;
struct cpu_desc_table *cpu_desc_tablep;
- struct fake_descriptor *cpu_ldtp;
cpu_desc_index_t cpu_desc_index;
int cpu_ldt;
#if NCOPY_WINDOWS > 0
#define HWINTCNT_SIZE 256
uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */
uint64_t cpu_hwIntpexits[HWINTCNT_SIZE];
- uint64_t cpu_hwIntcexits[HWINTCNT_SIZE];
uint64_t cpu_dr7; /* debug control register */
uint64_t cpu_int_event_time; /* intr entry/exit time */
pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */
pcid_t cpu_kernel_pcid;
volatile pcid_ref_t *cpu_pmap_pcid_coherentp;
volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel;
-#define PMAP_PCID_MAX_PCID (0x1000)
- pcid_t cpu_pcid_free_hint;
- pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID];
- pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID];
+ pcid_cdata_t *cpu_pcid_data;
#ifdef PCID_STATS
uint64_t cpu_pmap_pcid_flushes;
uint64_t cpu_pmap_pcid_preserves;
#if CONFIG_MCA
struct mca_state *cpu_mca_state; /* State at MC fault */
#endif
- struct prngContext *cpu_prng; /* PRNG's context */
int cpu_type;
int cpu_subtype;
int cpu_threadtype;
boolean_t cpu_iflag;
boolean_t cpu_boot_complete;
int cpu_hibernate;
-#define MAX_PREEMPTION_RECORDS (128)
+#define MAX_PREEMPTION_RECORDS (8)
#if DEVELOPMENT || DEBUG
int cpu_plri;
plrecord_t plrecords[MAX_PREEMPTION_RECORDS];
#endif
+ void *cpu_console_buf;
+ struct x86_lcpu lcpu;
+ int cpu_phys_number; /* Physical CPU */
+ cpu_id_t cpu_id; /* Platform Expert */
+#if DEBUG
+ uint64_t cpu_entry_cr3;
+ uint64_t cpu_exit_cr3;
+ uint64_t cpu_pcid_last_cr3;
+#endif
+ boolean_t cpu_rendezvous_in_progress;
} cpu_data_t;
extern cpu_data_t *cpu_data_ptr[];
* inline versions of these routines. Everyone outside, must call
* the real thing,
*/
+
+
+/*
+ * The "volatile" flavor of current_thread() is intended for use by
+ * scheduler code which may need to update the thread pointer in the
+ * course of a context switch. Any call to current_thread() made
+ * prior to the thread pointer update should be safe to optimize away
+ * as it should be consistent with that thread's state to the extent
+ * the compiler can reason about it. Likewise, the context switch
+ * path will eventually result in an arbitrary branch to the new
+ * thread's pc, about which the compiler won't be able to reason.
+ * Thus any compile-time optimization of current_thread() calls made
+ * within the new thread should be safely encapsulated in its
+ * register/stack state. The volatile form therefore exists to cover
+ * the window between the thread pointer update and the branch to
+ * the new pc.
+ */
static inline thread_t
+get_active_thread_volatile(void)
+{
+ CPU_DATA_GET(cpu_active_thread,thread_t)
+}
+
+static inline __pure2 thread_t
get_active_thread(void)
{
CPU_DATA_GET(cpu_active_thread,thread_t)
}
+
#define current_thread_fast() get_active_thread()
+#define current_thread_volatile() get_active_thread_volatile()
#define current_thread() current_thread_fast()
#define cpu_mode_is64bit() TRUE
#define MACHINE_PREEMPTION_MACROS (1)
#endif
-
static inline cpu_data_t *
cpu_datap(int cpu) {
return cpu_data_ptr[cpu];
}
+static inline int
+cpu_is_running(int cpu) {
+ return ((cpu_datap(cpu) != NULL) && (cpu_datap(cpu)->cpu_running));
+}
+
+#ifdef MACH_KERNEL_PRIVATE
+static inline cpu_data_t *
+cpu_shadowp(int cpu) {
+ return cpu_data_ptr[cpu]->cd_shadow;
+}
+
+#endif
extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu);
extern void cpu_data_realloc(void);