X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/43866e378188c25dd1e2208016ab3cbeb086ae6c..8ad349bb6ed4a0be06e34c92be0d98b92e078db4:/osfmk/i386/proc_reg.h diff --git a/osfmk/i386/proc_reg.h b/osfmk/i386/proc_reg.h index 74d9d50eb..158c856db 100644 --- a/osfmk/i386/proc_reg.h +++ b/osfmk/i386/proc_reg.h @@ -1,26 +1,31 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the + * License may not be used to create, or enable the creation or + * redistribution of, unlawful or unlicensed copies of an Apple operating + * system, or to circumvent, violate, or enable the circumvention or + * violation of, any terms of an Apple operating system software license + * agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * + * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -142,7 +147,11 @@ /* * CR4 */ +#define CR4_FXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ +#define CR4_XMM 0x00000400 /* SSE/SSE2 instructions supported in OS */ +#define CR4_PGE 0x00000080 /* p6: Page Global Enable */ #define CR4_MCE 0x00000040 /* p5: Machine Check Exceptions */ +#define CR4_PAE 0x00000020 /* p5: Physical Address Extensions */ #define CR4_PSE 0x00000010 /* p5: Page Size Extensions */ #define CR4_DE 0x00000008 /* p5: Debugging Extensions */ #define CR4_TSD 0x00000004 /* p5: Time Stamp Disable */ @@ -150,107 +159,197 @@ #define CR4_VME 0x00000001 /* p5: Virtual-8086 Mode Extensions */ #ifndef ASSEMBLER -extern unsigned int get_cr0(void); -extern void set_cr0( - unsigned int value); -extern unsigned int get_cr2(void); -extern unsigned int get_cr3(void); -extern void set_cr3( - unsigned int value); -extern unsigned int get_cr4(void); -extern void set_cr4( - unsigned int value); + +#include +__BEGIN_DECLS #define set_ts() \ set_cr0(get_cr0() | CR0_TS) -extern void clear_ts(void); - -extern unsigned short get_tr(void); -extern void set_tr( - unsigned int seg); -extern unsigned short get_ldt(void); -extern void set_ldt( - unsigned int seg); -#ifdef __GNUC__ -extern __inline__ unsigned int get_cr0(void) +static inline unsigned int get_cr0(void) { register unsigned int cr0; __asm__ volatile("mov %%cr0, %0" : "=r" (cr0)); return(cr0); } -extern __inline__ void set_cr0(unsigned int value) +static inline void set_cr0(unsigned int value) { __asm__ volatile("mov %0, %%cr0" : : "r" (value)); } -extern __inline__ unsigned int get_cr2(void) +static inline unsigned int get_cr2(void) { register unsigned int cr2; __asm__ volatile("mov %%cr2, %0" : "=r" (cr2)); return(cr2); } -#if NCPUS > 1 && AT386 -/* - * get_cr3 and set_cr3 are more complicated for the MPs. cr3 is where - * the cpu number gets stored. The MP versions live in locore.s - */ -#else /* NCPUS > 1 && AT386 */ -extern __inline__ unsigned int get_cr3(void) +static inline unsigned int get_cr3(void) { register unsigned int cr3; __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); return(cr3); } -extern __inline__ void set_cr3(unsigned int value) +static inline void set_cr3(unsigned int value) { __asm__ volatile("mov %0, %%cr3" : : "r" (value)); } -#endif /* NCPUS > 1 && AT386 */ -extern __inline__ void clear_ts(void) +/* Implemented in locore: */ +extern uint32_t get_cr4(void); +extern void set_cr4(uint32_t); + +static inline void clear_ts(void) { __asm__ volatile("clts"); } -extern __inline__ unsigned short get_tr(void) +static inline unsigned short get_tr(void) { unsigned short seg; __asm__ volatile("str %0" : "=rm" (seg)); return(seg); } -extern __inline__ void set_tr(unsigned int seg) +static inline void set_tr(unsigned int seg) { __asm__ volatile("ltr %0" : : "rm" ((unsigned short)(seg))); } -extern __inline__ unsigned short get_ldt(void) +static inline unsigned short get_ldt(void) { unsigned short seg; __asm__ volatile("sldt %0" : "=rm" (seg)); return(seg); } -extern __inline__ void set_ldt(unsigned int seg) +static inline void set_ldt(unsigned int seg) { __asm__ volatile("lldt %0" : : "rm" ((unsigned short)(seg))); } -extern __inline__ void flush_tlb(void) +static inline void flush_tlb(void) { unsigned long cr3_temp; __asm__ volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (cr3_temp) :: "memory"); } -extern __inline__ void invlpg(unsigned long addr) +static inline void wbinvd(void) +{ + __asm__ volatile("wbinvd"); +} + +static inline void invlpg(unsigned long addr) { __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory"); } -#endif /* __GNUC__ */ + +/* + * Access to machine-specific registers (available on 586 and better only) + * Note: the rd* operations modify the parameters directly (without using + * pointer indirection), this allows gcc to optimize better + */ + +#define rdmsr(msr,lo,hi) \ + __asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr)) + +#define wrmsr(msr,lo,hi) \ + __asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi)) + +#define rdtsc(lo,hi) \ + __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)) + +#define write_tsc(lo,hi) wrmsr(0x10, lo, hi) + +#define rdpmc(counter,lo,hi) \ + __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) + +static inline uint64_t rdmsr64(uint32_t msr) +{ + uint64_t ret; + __asm__ volatile("rdmsr" : "=A" (ret) : "c" (msr)); + return ret; +} + +static inline void wrmsr64(uint32_t msr, uint64_t val) +{ + __asm__ volatile("wrmsr" : : "c" (msr), "A" (val)); +} + +static inline uint64_t rdtsc64(void) +{ + uint64_t ret; + __asm__ volatile("rdtsc" : "=A" (ret)); + return ret; +} + +/* + * rdmsr_carefully() returns 0 when the MSR has been read successfully, + * or non-zero (1) if the MSR does not exist. + * The implementation is in locore.s. + */ +extern int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi); + +__END_DECLS + #endif /* ASSEMBLER */ +#define MSR_IA32_P5_MC_ADDR 0 +#define MSR_IA32_P5_MC_TYPE 1 +#define MSR_IA32_PLATFORM_ID 0x17 +#define MSR_IA32_EBL_CR_POWERON 0x2a + +#define MSR_IA32_APIC_BASE 0x1b +#define MSR_IA32_APIC_BASE_BSP (1<<8) +#define MSR_IA32_APIC_BASE_ENABLE (1<<11) +#define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) + +#define MSR_IA32_UCODE_WRITE 0x79 +#define MSR_IA32_UCODE_REV 0x8b + +#define MSR_IA32_PERFCTR0 0xc1 +#define MSR_IA32_PERFCTR1 0xc2 + +#define MSR_IA32_BBL_CR_CTL 0x119 + +#define MSR_IA32_MCG_CAP 0x179 +#define MSR_IA32_MCG_STATUS 0x17a +#define MSR_IA32_MCG_CTL 0x17b + +#define MSR_IA32_EVNTSEL0 0x186 +#define MSR_IA32_EVNTSEL1 0x187 + +#define MSR_IA32_MISC_ENABLE 0x1a0 + +#define MSR_IA32_DEBUGCTLMSR 0x1d9 +#define MSR_IA32_LASTBRANCHFROMIP 0x1db +#define MSR_IA32_LASTBRANCHTOIP 0x1dc +#define MSR_IA32_LASTINTFROMIP 0x1dd +#define MSR_IA32_LASTINTTOIP 0x1de + +#define MSR_IA32_CR_PAT 0x277 + +#define MSR_IA32_MC0_CTL 0x400 +#define MSR_IA32_MC0_STATUS 0x401 +#define MSR_IA32_MC0_ADDR 0x402 +#define MSR_IA32_MC0_MISC 0x403 + +#define MSR_IA32_MTRRCAP 0xfe +#define MSR_IA32_MTRR_DEF_TYPE 0x2ff +#define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2*(n)) +#define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2*(n) + 1) +#define MSR_IA32_MTRR_FIX64K_00000 0x250 +#define MSR_IA32_MTRR_FIX16K_80000 0x258 +#define MSR_IA32_MTRR_FIX16K_A0000 0x259 +#define MSR_IA32_MTRR_FIX4K_C0000 0x268 +#define MSR_IA32_MTRR_FIX4K_C8000 0x269 +#define MSR_IA32_MTRR_FIX4K_D0000 0x26a +#define MSR_IA32_MTRR_FIX4K_D8000 0x26b +#define MSR_IA32_MTRR_FIX4K_E0000 0x26c +#define MSR_IA32_MTRR_FIX4K_E8000 0x26d +#define MSR_IA32_MTRR_FIX4K_F0000 0x26e +#define MSR_IA32_MTRR_FIX4K_F8000 0x26f + #endif /* _I386_PROC_REG_H_ */