X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/c910b4d9d2451126ae3917b931cd4390c11e1d52..7ddcb079202367355dddccdfa4318e57d50318be:/osfmk/i386/cpu_data.h?ds=sidebyside diff --git a/osfmk/i386/cpu_data.h b/osfmk/i386/cpu_data.h index e41f6b8cd..22de8b2b0 100644 --- a/osfmk/i386/cpu_data.h +++ b/osfmk/i386/cpu_data.h @@ -35,19 +35,23 @@ #include -#if defined(__GNUC__) - #include #include +#include #include #include #include #include -#include +#include +#include #include #include +#if CONFIG_VMX #include +#endif + +#include /* * Data structures referenced (anonymously) from per-cpu data: @@ -56,27 +60,30 @@ struct cpu_cons_buffer; struct cpu_desc_table; struct mca_state; - /* * Data structures embedded in per-cpu data: */ typedef struct rtclock_timer { - queue_head_t queue; + mpqueue_head_t queue; uint64_t deadline; - boolean_t is_set; + uint64_t when_set; boolean_t has_expired; } rtclock_timer_t; +#if defined(__i386__) + typedef struct { struct i386_tss *cdi_ktss; #if MACH_KDB struct i386_tss *cdi_dbtss; #endif /* MACH_KDB */ - struct fake_descriptor *cdi_gdt; - struct fake_descriptor *cdi_idt; - struct fake_descriptor *cdi_ldt; - vm_offset_t cdi_sstk; + struct __attribute__((packed)) { + uint16_t size; + struct fake_descriptor *ptr; + } cdi_gdt, cdi_idt; + struct fake_descriptor *cdi_ldt; + vm_offset_t cdi_sstk; } cpu_desc_index_t; typedef enum { @@ -85,6 +92,31 @@ typedef enum { TASK_MAP_64BIT_SHARED /* 64-bit, kernel-shared addr space */ } task_map_t; +#elif defined(__x86_64__) + + +typedef struct { + struct x86_64_tss *cdi_ktss; +#if MACH_KDB + struct x86_64_tss *cdi_dbtss; +#endif /* MACH_KDB */ + struct __attribute__((packed)) { + uint16_t size; + void *ptr; + } cdi_gdt, cdi_idt; + struct fake_descriptor *cdi_ldt; + vm_offset_t cdi_sstk; +} cpu_desc_index_t; + +typedef enum { + TASK_MAP_32BIT, /* 32-bit user, compatibility mode */ + TASK_MAP_64BIT, /* 64-bit user thread, shared space */ +} task_map_t; + +#else +#error Unsupported architecture +#endif + /* * This structure is used on entry into the (uber-)kernel on syscall from * a 64-bit user. It contains the address of the machine state save area @@ -94,9 +126,11 @@ typedef enum { typedef struct { addr64_t cu_isf; /* thread->pcb->iss.isf */ uint64_t cu_tmp; /* temporary scratch */ - addr64_t cu_user_gs_base; + addr64_t cu_user_gs_base; } cpu_uber_t; +typedef uint16_t pcid_t; +typedef uint8_t pcid_ref_t; /* * Per-cpu data. * @@ -112,19 +146,23 @@ typedef struct { */ typedef struct cpu_data { + struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ +#define cpu_pd cpu_pal_data /* convenience alias */ struct cpu_data *cpu_this; /* pointer to myself */ thread_t cpu_active_thread; + int cpu_preemption_level; + int cpu_number; /* Logical CPU */ void *cpu_int_state; /* interrupt state */ vm_offset_t cpu_active_stack; /* kernel stack base */ vm_offset_t cpu_kernel_stack; /* kernel stack top */ vm_offset_t cpu_int_stack_top; - int cpu_preemption_level; - int cpu_simple_lock_count; int cpu_interrupt_level; - int cpu_number; /* Logical CPU */ int cpu_phys_number; /* Physical CPU */ cpu_id_t cpu_id; /* Platform Expert */ int cpu_signals; /* IPI events */ + int cpu_prior_signals; /* Last set of events, + * debugging + */ int cpu_mcount_off; /* mcount recursion */ ast_t cpu_pending_ast; int cpu_type; @@ -133,16 +171,25 @@ typedef struct cpu_data int cpu_running; rtclock_timer_t rtclock_timer; boolean_t cpu_is64bit; - task_map_t cpu_task_map; - addr64_t cpu_task_cr3; - addr64_t cpu_active_cr3; + volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); + union { + volatile uint32_t cpu_tlb_invalid; + struct { + volatile uint16_t cpu_tlb_invalid_local; + volatile uint16_t cpu_tlb_invalid_global; + }; + }; + volatile task_map_t cpu_task_map; + volatile addr64_t cpu_task_cr3; addr64_t cpu_kernel_cr3; cpu_uber_t cpu_uber; void *cpu_chud; void *cpu_console_buf; struct x86_lcpu lcpu; struct processor *cpu_processor; +#if NCOPY_WINDOWS > 0 struct cpu_pmap *cpu_pmap; +#endif struct cpu_desc_table *cpu_desc_tablep; struct fake_descriptor *cpu_ldtp; cpu_desc_index_t cpu_desc_index; @@ -159,19 +206,25 @@ typedef struct cpu_data boolean_t cpu_iflag; boolean_t cpu_boot_complete; int cpu_hibernate; - +#if NCOPY_WINDOWS > 0 vm_offset_t cpu_copywindow_base; uint64_t *cpu_copywindow_pdp; vm_offset_t cpu_physwindow_base; uint64_t *cpu_physwindow_ptep; +#endif void *cpu_hi_iss; - boolean_t cpu_tlb_invalid; - uint32_t cpu_hwIntCnt[256]; /* Interrupt counts */ + +#define HWINTCNT_SIZE 256 + uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ uint64_t cpu_dr7; /* debug control register */ uint64_t cpu_int_event_time; /* intr entry/exit time */ +#if CONFIG_VMX vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */ +#endif +#if CONFIG_MCA struct mca_state *cpu_mca_state; /* State at MC fault */ +#endif uint64_t cpu_uber_arg_store; /* Double mapped address * of current thread's * uu_arg array. @@ -181,8 +234,37 @@ typedef struct cpu_data * arg store * validity flag. */ - rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ - + pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ + thread_t csw_old_thread; + thread_t csw_new_thread; +#if defined(__x86_64__) + uint32_t cpu_pmap_pcid_enabled; + pcid_t cpu_active_pcid; + pcid_t cpu_last_pcid; + volatile pcid_ref_t *cpu_pmap_pcid_coherentp; + volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel; +#define PMAP_PCID_MAX_PCID (0x1000) + pcid_t cpu_pcid_free_hint; + pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID]; + pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID]; +#ifdef PCID_STATS + uint64_t cpu_pmap_pcid_flushes; + uint64_t cpu_pmap_pcid_preserves; +#endif +#endif /* x86_64 */ + uint64_t cpu_max_observed_int_latency; + int cpu_max_observed_int_latency_vector; + uint64_t debugger_entry_time; + volatile boolean_t cpu_NMI_acknowledged; + /* A separate nested interrupt stack flag, to account + * for non-nested interrupts arriving while on the interrupt stack + * Currently only occurs when AICPM enables interrupts on the + * interrupt stack during processor offlining. + */ + uint32_t cpu_nested_istack; + uint32_t cpu_nested_istack_events; + x86_saved_state64_t *cpu_fatal_trap_state; + x86_saved_state64_t *cpu_post_fatal_trap_state; } cpu_data_t; extern cpu_data_t *cpu_data_ptr[]; @@ -194,11 +276,29 @@ extern cpu_data_t cpu_data_master; #endif /* offsetof */ #define CPU_DATA_GET(member,type) \ type ret; \ - __asm__ volatile ("movl %%gs:%P1,%0" \ + __asm__ volatile ("mov %%gs:%P1,%0" \ : "=r" (ret) \ : "i" (offsetof(cpu_data_t,member))); \ return ret; +#define CPU_DATA_GET_INDEX(member,index,type) \ + type ret; \ + __asm__ volatile ("mov %%gs:(%1),%0" \ + : "=r" (ret) \ + : "r" (offsetof(cpu_data_t,member[index]))); \ + return ret; + +#define CPU_DATA_SET(member,value) \ + __asm__ volatile ("mov %0,%%gs:%P1" \ + : \ + : "r" (value), "i" (offsetof(cpu_data_t,member))); +#define CPU_DATA_XCHG(member,value,type) \ + type ret; \ + __asm__ volatile ("xchg %0,%%gs:%P1" \ + : "=r" (ret) \ + : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ + return ret; + /* * Everyone within the osfmk part of the kernel can use the fast * inline versions of these routines. Everyone outside, must call @@ -217,7 +317,11 @@ get_is64bit(void) { CPU_DATA_GET(cpu_is64bit, boolean_t) } +#if CONFIG_YONAH #define cpu_mode_is64bit() get_is64bit() +#else +#define cpu_mode_is64bit() TRUE +#endif static inline int get_preemption_level(void) @@ -225,11 +329,6 @@ get_preemption_level(void) CPU_DATA_GET(cpu_preemption_level,int) } static inline int -get_simple_lock_count(void) -{ - CPU_DATA_GET(cpu_simple_lock_count,int) -} -static inline int get_interrupt_level(void) { CPU_DATA_GET(cpu_interrupt_level,int) @@ -245,6 +344,7 @@ get_cpu_phys_number(void) CPU_DATA_GET(cpu_phys_number,int) } + static inline void disable_preemption(void) { @@ -305,14 +405,9 @@ current_cpu_datap(void) static inline cpu_data_t * cpu_datap(int cpu) { - assert(cpu_data_ptr[cpu]); return cpu_data_ptr[cpu]; } extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu); -#else /* !defined(__GNUC__) */ - -#endif /* defined(__GNUC__) */ - #endif /* I386_CPU_DATA */