X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..bd504ef0e0b883cdd7917b73b3574eb9ce669905:/osfmk/i386/proc_reg.h diff --git a/osfmk/i386/proc_reg.h b/osfmk/i386/proc_reg.h index 38c09a1b7..019d0aebe 100644 --- a/osfmk/i386/proc_reg.h +++ b/osfmk/i386/proc_reg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -145,59 +145,184 @@ /* * CR4 */ -#define CR4_FXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ -#define CR4_XMM 0x00000400 /* SSE/SSE2 instructions supported in OS */ -#define CR4_PGE 0x00000080 /* p6: Page Global Enable */ -#define CR4_MCE 0x00000040 /* p5: Machine Check Exceptions */ -#define CR4_PAE 0x00000020 /* p5: Physical Address Extensions */ -#define CR4_PSE 0x00000010 /* p5: Page Size Extensions */ -#define CR4_DE 0x00000008 /* p5: Debugging Extensions */ -#define CR4_TSD 0x00000004 /* p5: Time Stamp Disable */ -#define CR4_PVI 0x00000002 /* p5: Protected-mode Virtual Interrupts */ -#define CR4_VME 0x00000001 /* p5: Virtual-8086 Mode Extensions */ +#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */ +#define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */ +#define CR4_PCIDE 0x00020000 /* PCID Enable */ +#define CR4_RDWRFSGS 0x00010000 /* RDWRFSGS Enable */ +#define CR4_SMXE 0x00004000 /* Enable SMX operation */ +#define CR4_VMXE 0x00002000 /* Enable VMX operation */ +#define CR4_OSXMM 0x00000400 /* SSE/SSE2 exception support in OS */ +#define CR4_OSFXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ +#define CR4_PCE 0x00000100 /* Performance-Monitor Count Enable */ +#define CR4_PGE 0x00000080 /* Page Global Enable */ +#define CR4_MCE 0x00000040 /* Machine Check Exceptions */ +#define CR4_PAE 0x00000020 /* Physical Address Extensions */ +#define CR4_PSE 0x00000010 /* Page Size Extensions */ +#define CR4_DE 0x00000008 /* Debugging Extensions */ +#define CR4_TSD 0x00000004 /* Time Stamp Disable */ +#define CR4_PVI 0x00000002 /* Protected-mode Virtual Interrupts */ +#define CR4_VME 0x00000001 /* Virtual-8086 Mode Extensions */ + +/* + * XCR0 - XFEATURE_ENABLED_MASK (a.k.a. XFEM) register + */ +#define XCR0_YMM 0x0000000000000004ULL /* YMM state available */ +#define XFEM_YMM XCR0_YMM +#define XCR0_SSE 0x0000000000000002ULL /* SSE supported by XSAVE/XRESTORE */ +#define XCR0_X87 0x0000000000000001ULL /* x87, FPU/MMX (always set) */ +#define XFEM_SSE XCR0_SSE +#define XFEM_X87 XCR0_X87 +#define XCR0 (0) + +#define PMAP_PCID_PRESERVE (1ULL << 63) +#define PMAP_PCID_MASK (0xFFF) + +#define RDRAND_RAX .byte 0x48, 0x0f, 0xc7, 0xf0 #ifndef ASSEMBLER #include +#include + __BEGIN_DECLS -#define set_ts() \ - set_cr0(get_cr0() | CR0_TS) +#define set_ts() set_cr0(get_cr0() | CR0_TS) + +static inline uint16_t get_es(void) +{ + uint16_t es; + __asm__ volatile("mov %%es, %0" : "=r" (es)); + return es; +} + +static inline void set_es(uint16_t es) +{ + __asm__ volatile("mov %0, %%es" : : "r" (es)); +} + +static inline uint16_t get_ds(void) +{ + uint16_t ds; + __asm__ volatile("mov %%ds, %0" : "=r" (ds)); + return ds; +} + +static inline void set_ds(uint16_t ds) +{ + __asm__ volatile("mov %0, %%ds" : : "r" (ds)); +} + +static inline uint16_t get_fs(void) +{ + uint16_t fs; + __asm__ volatile("mov %%fs, %0" : "=r" (fs)); + return fs; +} + +static inline void set_fs(uint16_t fs) +{ + __asm__ volatile("mov %0, %%fs" : : "r" (fs)); +} + +static inline uint16_t get_gs(void) +{ + uint16_t gs; + __asm__ volatile("mov %%gs, %0" : "=r" (gs)); + return gs; +} -static inline unsigned int get_cr0(void) +static inline void set_gs(uint16_t gs) { - register unsigned int cr0; + __asm__ volatile("mov %0, %%gs" : : "r" (gs)); +} + +static inline uint16_t get_ss(void) +{ + uint16_t ss; + __asm__ volatile("mov %%ss, %0" : "=r" (ss)); + return ss; +} + +static inline void set_ss(uint16_t ss) +{ + __asm__ volatile("mov %0, %%ss" : : "r" (ss)); +} + +static inline uintptr_t get_cr0(void) +{ + uintptr_t cr0; __asm__ volatile("mov %%cr0, %0" : "=r" (cr0)); return(cr0); } -static inline void set_cr0(unsigned int value) +static inline void set_cr0(uintptr_t value) { __asm__ volatile("mov %0, %%cr0" : : "r" (value)); } -static inline unsigned int get_cr2(void) +static inline uintptr_t get_cr2(void) { - register unsigned int cr2; + uintptr_t cr2; __asm__ volatile("mov %%cr2, %0" : "=r" (cr2)); return(cr2); } -static inline unsigned int get_cr3(void) +static inline uintptr_t get_cr3_raw(void) { - register unsigned int cr3; + register uintptr_t cr3; __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); return(cr3); } -static inline void set_cr3(unsigned int value) +static inline void set_cr3_raw(uintptr_t value) { __asm__ volatile("mov %0, %%cr3" : : "r" (value)); } -/* Implemented in locore: */ -extern uint32_t get_cr4(void); -extern void set_cr4(uint32_t); +#if defined(__i386__) +static inline uintptr_t get_cr3(void) +{ + register uintptr_t cr3; + __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); + return(cr3); +} + +static inline void set_cr3(uintptr_t value) +{ + __asm__ volatile("mov %0, %%cr3" : : "r" (value)); +} +#else +static inline uintptr_t get_cr3_base(void) +{ + register uintptr_t cr3; + __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); + return(cr3 & ~(0xFFFULL)); +} + +static inline void set_cr3_composed(uintptr_t base, uint16_t pcid, uint32_t preserve) +{ + __asm__ volatile("mov %0, %%cr3" : : "r" (base | pcid | ( ( (uint64_t)preserve) << 63) ) ); +} + +#endif +static inline uintptr_t get_cr4(void) +{ + uintptr_t cr4; + __asm__ volatile("mov %%cr4, %0" : "=r" (cr4)); + return(cr4); +} + +static inline void set_cr4(uintptr_t value) +{ + __asm__ volatile("mov %0, %%cr4" : : "r" (value)); +} + +static inline uintptr_t x86_get_flags(void) +{ + uintptr_t erflags; + __asm__ volatile("pushf; pop %0" : "=r" (erflags)); + return erflags; +} static inline void clear_ts(void) { @@ -216,30 +341,72 @@ static inline void set_tr(unsigned int seg) __asm__ volatile("ltr %0" : : "rm" ((unsigned short)(seg))); } -static inline unsigned short get_ldt(void) +static inline unsigned short sldt(void) { unsigned short seg; __asm__ volatile("sldt %0" : "=rm" (seg)); return(seg); } -static inline void set_ldt(unsigned int seg) +static inline void lldt(unsigned int seg) { __asm__ volatile("lldt %0" : : "rm" ((unsigned short)(seg))); } +static inline void lgdt(uintptr_t *desc) +{ + __asm__ volatile("lgdt %0" : : "m" (*desc)); +} + +static inline void lidt(uintptr_t *desc) +{ + __asm__ volatile("lidt %0" : : "m" (*desc)); +} + +static inline void swapgs(void) +{ + __asm__ volatile("swapgs"); +} + +#ifdef MACH_KERNEL_PRIVATE + +#ifdef __i386__ + +#include + +extern void cpuid64(uint32_t); +extern void flush_tlb64(void); +extern uint64_t get64_cr3(void); +extern void set64_cr3(uint64_t); static inline void flush_tlb(void) { - unsigned long cr3_temp; - __asm__ volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (cr3_temp) :: "memory"); + if (cpu_mode_is64bit()) { + flush_tlb64(); + } else { + set_cr3(get_cr3()); + } +} +static inline void flush_tlb_raw(void) +{ + flush_tlb(); +} + +#elif defined(__x86_64__) +static inline void flush_tlb_raw(void) +{ + set_cr3_raw(get_cr3_raw()); } +#endif +extern int rdmsr64_carefully(uint32_t msr, uint64_t *val); +extern int wrmsr64_carefully(uint32_t msr, uint64_t val); +#endif /* MACH_KERNEL_PRIVATE */ static inline void wbinvd(void) { __asm__ volatile("wbinvd"); } -static inline void invlpg(unsigned long addr) +static inline void invlpg(uintptr_t addr) { __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory"); } @@ -257,13 +424,15 @@ static inline void invlpg(unsigned long addr) __asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi)) #define rdtsc(lo,hi) \ - __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)) + __asm__ volatile("lfence; rdtsc; lfence" : "=a" (lo), "=d" (hi)) #define write_tsc(lo,hi) wrmsr(0x10, lo, hi) #define rdpmc(counter,lo,hi) \ __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) +#ifdef __i386__ + static inline uint64_t rdmsr64(uint32_t msr) { uint64_t ret; @@ -279,75 +448,204 @@ static inline void wrmsr64(uint32_t msr, uint64_t val) static inline uint64_t rdtsc64(void) { uint64_t ret; - __asm__ volatile("rdtsc" : "=A" (ret)); + __asm__ volatile("lfence; rdtsc; lfence" : "=A" (ret)); + return ret; +} + +static inline uint64_t rdtscp64(uint32_t *aux) +{ + uint64_t ret; + __asm__ volatile("rdtscp; mov %%ecx, %1" + : "=A" (ret), "=m" (*aux) + : + : "ecx"); return ret; } +#elif defined(__x86_64__) + +static inline uint64_t rdmsr64(uint32_t msr) +{ + uint32_t lo=0, hi=0; + rdmsr(msr, lo, hi); + return (((uint64_t)hi) << 32) | ((uint64_t)lo); +} + +static inline void wrmsr64(uint32_t msr, uint64_t val) +{ + wrmsr(msr, (val & 0xFFFFFFFFUL), ((val >> 32) & 0xFFFFFFFFUL)); +} + +static inline uint64_t rdtsc64(void) +{ + uint64_t lo, hi; + rdtsc(lo, hi); + return ((hi) << 32) | (lo); +} + +static inline uint64_t rdtscp64(uint32_t *aux) +{ + uint64_t lo, hi; + __asm__ volatile("rdtscp; mov %%ecx, %1" + : "=a" (lo), "=d" (hi), "=m" (*aux) + : + : "ecx"); + return ((hi) << 32) | (lo); +} + +#else +#error Unsupported architecture +#endif + /* * rdmsr_carefully() returns 0 when the MSR has been read successfully, * or non-zero (1) if the MSR does not exist. * The implementation is in locore.s. */ extern int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi); - __END_DECLS #endif /* ASSEMBLER */ -#define MSR_IA32_P5_MC_ADDR 0 -#define MSR_IA32_P5_MC_TYPE 1 -#define MSR_IA32_PLATFORM_ID 0x17 -#define MSR_IA32_EBL_CR_POWERON 0x2a - -#define MSR_IA32_APIC_BASE 0x1b -#define MSR_IA32_APIC_BASE_BSP (1<<8) -#define MSR_IA32_APIC_BASE_ENABLE (1<<11) -#define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) - -#define MSR_IA32_UCODE_WRITE 0x79 -#define MSR_IA32_UCODE_REV 0x8b - -#define MSR_IA32_PERFCTR0 0xc1 -#define MSR_IA32_PERFCTR1 0xc2 - -#define MSR_IA32_BBL_CR_CTL 0x119 - -#define MSR_IA32_MCG_CAP 0x179 -#define MSR_IA32_MCG_STATUS 0x17a -#define MSR_IA32_MCG_CTL 0x17b - -#define MSR_IA32_EVNTSEL0 0x186 -#define MSR_IA32_EVNTSEL1 0x187 - -#define MSR_IA32_MISC_ENABLE 0x1a0 - -#define MSR_IA32_DEBUGCTLMSR 0x1d9 -#define MSR_IA32_LASTBRANCHFROMIP 0x1db -#define MSR_IA32_LASTBRANCHTOIP 0x1dc -#define MSR_IA32_LASTINTFROMIP 0x1dd -#define MSR_IA32_LASTINTTOIP 0x1de - -#define MSR_IA32_CR_PAT 0x277 - -#define MSR_IA32_MC0_CTL 0x400 -#define MSR_IA32_MC0_STATUS 0x401 -#define MSR_IA32_MC0_ADDR 0x402 -#define MSR_IA32_MC0_MISC 0x403 - -#define MSR_IA32_MTRRCAP 0xfe -#define MSR_IA32_MTRR_DEF_TYPE 0x2ff -#define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2*(n)) -#define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2*(n) + 1) -#define MSR_IA32_MTRR_FIX64K_00000 0x250 -#define MSR_IA32_MTRR_FIX16K_80000 0x258 -#define MSR_IA32_MTRR_FIX16K_A0000 0x259 -#define MSR_IA32_MTRR_FIX4K_C0000 0x268 -#define MSR_IA32_MTRR_FIX4K_C8000 0x269 -#define MSR_IA32_MTRR_FIX4K_D0000 0x26a -#define MSR_IA32_MTRR_FIX4K_D8000 0x26b -#define MSR_IA32_MTRR_FIX4K_E0000 0x26c -#define MSR_IA32_MTRR_FIX4K_E8000 0x26d -#define MSR_IA32_MTRR_FIX4K_F0000 0x26e -#define MSR_IA32_MTRR_FIX4K_F8000 0x26f +#define MSR_IA32_P5_MC_ADDR 0 +#define MSR_IA32_P5_MC_TYPE 1 +#define MSR_IA32_PLATFORM_ID 0x17 +#define MSR_IA32_EBL_CR_POWERON 0x2a + +#define MSR_IA32_APIC_BASE 0x1b +#define MSR_IA32_APIC_BASE_BSP (1<<8) +#define MSR_IA32_APIC_BASE_EXTENDED (1<<10) +#define MSR_IA32_APIC_BASE_ENABLE (1<<11) +#define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) + +#define MSR_CORE_THREAD_COUNT 0x35 + +#define MSR_IA32_FEATURE_CONTROL 0x3a +#define MSR_IA32_FEATCTL_LOCK (1<<0) +#define MSR_IA32_FEATCTL_VMXON_SMX (1<<1) +#define MSR_IA32_FEATCTL_VMXON (1<<2) +#define MSR_IA32_FEATCTL_CSTATE_SMI (1<<16) + +#define MSR_IA32_UPDT_TRIG 0x79 +#define MSR_IA32_BIOS_SIGN_ID 0x8b +#define MSR_IA32_UCODE_WRITE MSR_IA32_UPDT_TRIG +#define MSR_IA32_UCODE_REV MSR_IA32_BIOS_SIGN_ID + +#define MSR_IA32_PERFCTR0 0xc1 +#define MSR_IA32_PERFCTR1 0xc2 + +#define MSR_PLATFORM_INFO 0xce + +#define MSR_IA32_MPERF 0xE7 +#define MSR_IA32_APERF 0xE8 + +#define MSR_IA32_BBL_CR_CTL 0x119 + +#define MSR_IA32_SYSENTER_CS 0x174 +#define MSR_IA32_SYSENTER_ESP 0x175 +#define MSR_IA32_SYSENTER_EIP 0x176 + +#define MSR_IA32_MCG_CAP 0x179 +#define MSR_IA32_MCG_STATUS 0x17a +#define MSR_IA32_MCG_CTL 0x17b + +#define MSR_IA32_EVNTSEL0 0x186 +#define MSR_IA32_EVNTSEL1 0x187 + +#define MSR_FLEX_RATIO 0x194 +#define MSR_IA32_PERF_STS 0x198 +#define MSR_IA32_PERF_CTL 0x199 +#define MSR_IA32_CLOCK_MODULATION 0x19a + +#define MSR_IA32_MISC_ENABLE 0x1a0 + +#define MSR_IA32_PACKAGE_THERM_STATUS 0x1b1 +#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x1b2 + +#define MSR_IA32_DEBUGCTLMSR 0x1d9 +#define MSR_IA32_LASTBRANCHFROMIP 0x1db +#define MSR_IA32_LASTBRANCHTOIP 0x1dc +#define MSR_IA32_LASTINTFROMIP 0x1dd +#define MSR_IA32_LASTINTTOIP 0x1de + +#define MSR_IA32_CR_PAT 0x277 + +#define MSR_IA32_MTRRCAP 0xfe +#define MSR_IA32_MTRR_DEF_TYPE 0x2ff +#define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2*(n)) +#define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2*(n) + 1) +#define MSR_IA32_MTRR_FIX64K_00000 0x250 +#define MSR_IA32_MTRR_FIX16K_80000 0x258 +#define MSR_IA32_MTRR_FIX16K_A0000 0x259 +#define MSR_IA32_MTRR_FIX4K_C0000 0x268 +#define MSR_IA32_MTRR_FIX4K_C8000 0x269 +#define MSR_IA32_MTRR_FIX4K_D0000 0x26a +#define MSR_IA32_MTRR_FIX4K_D8000 0x26b +#define MSR_IA32_MTRR_FIX4K_E0000 0x26c +#define MSR_IA32_MTRR_FIX4K_E8000 0x26d +#define MSR_IA32_MTRR_FIX4K_F0000 0x26e +#define MSR_IA32_MTRR_FIX4K_F8000 0x26f + +#define MSR_IA32_PKG_C3_RESIDENCY 0x3F8 +#define MSR_IA32_PKG_C6_RESIDENCY 0x3F9 +#define MSR_IA32_PKG_C7_RESIDENCY 0x3FA + +#define MSR_IA32_CORE_C3_RESIDENCY 0x3FC +#define MSR_IA32_CORE_C6_RESIDENCY 0x3FD +#define MSR_IA32_CORE_C7_RESIDENCY 0x3FE + +#define MSR_IA32_MC0_CTL 0x400 +#define MSR_IA32_MC0_STATUS 0x401 +#define MSR_IA32_MC0_ADDR 0x402 +#define MSR_IA32_MC0_MISC 0x403 + +#define MSR_IA32_VMX_BASE 0x480 +#define MSR_IA32_VMX_BASIC MSR_IA32_VMX_BASE +#define MSR_IA32_VMXPINBASED_CTLS MSR_IA32_VMX_BASE+1 +#define MSR_IA32_PROCBASED_CTLS MSR_IA32_VMX_BASE+2 +#define MSR_IA32_VMX_EXIT_CTLS MSR_IA32_VMX_BASE+3 +#define MSR_IA32_VMX_ENTRY_CTLS MSR_IA32_VMX_BASE+4 +#define MSR_IA32_VMX_MISC MSR_IA32_VMX_BASE+5 +#define MSR_IA32_VMX_CR0_FIXED0 MSR_IA32_VMX_BASE+6 +#define MSR_IA32_VMX_CR0_FIXED1 MSR_IA32_VMX_BASE+7 +#define MSR_IA32_VMX_CR4_FIXED0 MSR_IA32_VMX_BASE+8 +#define MSR_IA32_VMX_CR4_FIXED1 MSR_IA32_VMX_BASE+9 + +#define MSR_IA32_DS_AREA 0x600 + +#define MSR_IA32_PKG_POWER_SKU_UNIT 0x606 +#define MSR_IA32_PKG_C2_RESIDENCY 0x60D +#define MSR_IA32_PKG_ENERGY_STATUS 0x611 + +#define MSR_IA32_DDR_ENERGY_STATUS 0x619 +#define MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER 0x61D +#define MSR_IA32_RING_PERF_STATUS 0x621 + +#define MSR_IA32_PKG_C8_RESIDENCY 0x630 +#define MSR_IA32_PKG_C9_RESIDENCY 0x631 +#define MSR_IA32_PKG_C10_RESIDENCY 0x632 + +#define MSR_IA32_PP0_ENERGY_STATUS 0x639 +#define MSR_IA32_PP1_ENERGY_STATUS 0x641 +#define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690 +#define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0 + +#define MSR_IA32_TSC_DEADLINE 0x6e0 + +#define MSR_IA32_EFER 0xC0000080 +#define MSR_IA32_EFER_SCE 0x00000001 +#define MSR_IA32_EFER_LME 0x00000100 +#define MSR_IA32_EFER_LMA 0x00000400 +#define MSR_IA32_EFER_NXE 0x00000800 + +#define MSR_IA32_STAR 0xC0000081 +#define MSR_IA32_LSTAR 0xC0000082 +#define MSR_IA32_CSTAR 0xC0000083 +#define MSR_IA32_FMASK 0xC0000084 + +#define MSR_IA32_FS_BASE 0xC0000100 +#define MSR_IA32_GS_BASE 0xC0000101 +#define MSR_IA32_KERNEL_GS_BASE 0xC0000102 +#define MSR_IA32_TSC_AUX 0xC0000103 #endif /* _I386_PROC_REG_H_ */