]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* CMU_ENDHIST */ | |
29 | /* | |
30 | * Mach Operating System | |
31 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
32 | * All Rights Reserved. | |
33 | * | |
34 | * Permission to use, copy, modify and distribute this software and its | |
35 | * documentation is hereby granted, provided that both the copyright | |
36 | * notice and this permission notice appear in all copies of the | |
37 | * software, derivative works or modified versions, and any portions | |
38 | * thereof, and that both notices appear in supporting documentation. | |
39 | * | |
40 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
41 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
42 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
43 | * | |
44 | * Carnegie Mellon requests users of this software to return to | |
45 | * | |
46 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
47 | * School of Computer Science | |
48 | * Carnegie Mellon University | |
49 | * Pittsburgh PA 15213-3890 | |
50 | * | |
51 | * any improvements or extensions that they make and grant Carnegie Mellon | |
52 | * the rights to redistribute these changes. | |
53 | */ | |
54 | ||
55 | /* | |
56 | */ | |
57 | ||
58 | /* | |
59 | * Processor registers for i386 and i486. | |
60 | */ | |
61 | #ifndef _I386_PROC_REG_H_ | |
62 | #define _I386_PROC_REG_H_ | |
63 | ||
64 | /* | |
65 | * Model Specific Registers | |
66 | */ | |
67 | #define MSR_P5_TSC 0x10 /* Time Stamp Register */ | |
68 | #define MSR_P5_CESR 0x11 /* Control and Event Select Register */ | |
69 | #define MSR_P5_CTR0 0x12 /* Counter #0 */ | |
70 | #define MSR_P5_CTR1 0x13 /* Counter #1 */ | |
71 | ||
72 | #define MSR_P5_CESR_PC 0x0200 /* Pin Control */ | |
73 | #define MSR_P5_CESR_CC 0x01C0 /* Counter Control mask */ | |
74 | #define MSR_P5_CESR_ES 0x003F /* Event Control mask */ | |
75 | ||
76 | #define MSR_P5_CESR_SHIFT 16 /* Shift to get Counter 1 */ | |
77 | #define MSR_P5_CESR_MASK (MSR_P5_CESR_PC|\ | |
78 | MSR_P5_CESR_CC|\ | |
79 | MSR_P5_CESR_ES) /* Mask Counter */ | |
80 | ||
81 | #define MSR_P5_CESR_CC_CLOCK 0x0100 /* Clock Counting (otherwise Event) */ | |
82 | #define MSR_P5_CESR_CC_DISABLE 0x0000 /* Disable counter */ | |
83 | #define MSR_P5_CESR_CC_CPL012 0x0040 /* Count if the CPL == 0, 1, 2 */ | |
84 | #define MSR_P5_CESR_CC_CPL3 0x0080 /* Count if the CPL == 3 */ | |
85 | #define MSR_P5_CESR_CC_CPL 0x00C0 /* Count regardless of the CPL */ | |
86 | ||
87 | #define MSR_P5_CESR_ES_DATA_READ 0x000000 /* Data Read */ | |
88 | #define MSR_P5_CESR_ES_DATA_WRITE 0x000001 /* Data Write */ | |
89 | #define MSR_P5_CESR_ES_DATA_RW 0x101000 /* Data Read or Write */ | |
90 | #define MSR_P5_CESR_ES_DATA_TLB_MISS 0x000010 /* Data TLB Miss */ | |
91 | #define MSR_P5_CESR_ES_DATA_READ_MISS 0x000011 /* Data Read Miss */ | |
92 | #define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */ | |
93 | #define MSR_P5_CESR_ES_DATA_RW_MISS 0x101001 /* Data Read or Write Miss */ | |
94 | #define MSR_P5_CESR_ES_HIT_EM 0x000101 /* Write (hit) to M|E state */ | |
95 | #define MSR_P5_CESR_ES_DATA_CACHE_WB 0x000110 /* Cache lines written back */ | |
96 | #define MSR_P5_CESR_ES_EXTERNAL_SNOOP 0x000111 /* External Snoop */ | |
97 | #define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */ | |
98 | #define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* Mem. access in both pipes */ | |
99 | #define MSR_P5_CESR_ES_BANK_CONFLICTS 0x001010 /* Bank conflicts */ | |
100 | #define MSR_P5_CESR_ES_MISALIGNED 0x001011 /* Misaligned Memory or I/O */ | |
101 | #define MSR_P5_CESR_ES_CODE_READ 0x001100 /* Code Read */ | |
102 | #define MSR_P5_CESR_ES_CODE_TLB_MISS 0x001101 /* Code TLB miss */ | |
103 | #define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */ | |
104 | #define MSR_P5_CESR_ES_SEGMENT_LOADED 0x001111 /* Any segment reg. loaded */ | |
105 | #define MSR_P5_CESR_ES_BRANCHE 0x010010 /* Branches */ | |
106 | #define MSR_P5_CESR_ES_BTB_HIT 0x010011 /* BTB Hits */ | |
107 | #define MSR_P5_CESR_ES_BRANCHE_BTB 0x010100 /* Taken branch or BTB Hit */ | |
108 | #define MSR_P5_CESR_ES_PIPELINE_FLUSH 0x010101 /* Pipeline Flushes */ | |
109 | #define MSR_P5_CESR_ES_INSTRUCTION 0x010110 /* Instruction executed */ | |
110 | #define MSR_P5_CESR_ES_INSTRUCTION_V 0x010111 /* Inst. executed (v-pipe) */ | |
111 | #define MSR_P5_CESR_ES_BUS_CYCLE 0x011000 /* Clocks while bus cycle */ | |
112 | #define MSR_P5_CESR_ES_FULL_WRITE_BUF 0x011001 /* Clocks while full wrt buf. */ | |
113 | #define MSR_P5_CESR_ES_DATA_MEM_READ 0x011010 /* Pipeline waiting for read */ | |
114 | #define MSR_P5_CESR_ES_WRITE_EM 0x011011 /* Stall on write E|M state */ | |
115 | #define MSR_P5_CESR_ES_LOCKED_CYCLE 0x011100 /* Locked bus cycles */ | |
116 | #define MSR_P5_CESR_ES_IO_CYCLE 0x011101 /* I/O Read or Write cycles */ | |
117 | #define MSR_P5_CESR_ES_NON_CACHEABLE 0x011110 /* Non-cacheable Mem. read */ | |
118 | #define MSR_P5_CESR_ES_AGI 0x011111 /* Stall because of AGI */ | |
119 | #define MSR_P5_CESR_ES_FLOP 0x100010 /* Floating Point operations */ | |
120 | #define MSR_P5_CESR_ES_BREAK_DR0 0x100011 /* Breakpoint matches on DR0 */ | |
121 | #define MSR_P5_CESR_ES_BREAK_DR1 0x100100 /* Breakpoint matches on DR1 */ | |
122 | #define MSR_P5_CESR_ES_BREAK_DR2 0x100101 /* Breakpoint matches on DR2 */ | |
123 | #define MSR_P5_CESR_ES_BREAK_DR3 0x100110 /* Breakpoint matches on DR3 */ | |
124 | #define MSR_P5_CESR_ES_HARDWARE_IT 0x100111 /* Hardware interrupts */ | |
125 | ||
126 | /* | |
127 | * CR0 | |
128 | */ | |
129 | #define CR0_PG 0x80000000 /* Enable paging */ | |
130 | #define CR0_CD 0x40000000 /* i486: Cache disable */ | |
131 | #define CR0_NW 0x20000000 /* i486: No write-through */ | |
132 | #define CR0_AM 0x00040000 /* i486: Alignment check mask */ | |
133 | #define CR0_WP 0x00010000 /* i486: Write-protect kernel access */ | |
134 | #define CR0_NE 0x00000020 /* i486: Handle numeric exceptions */ | |
135 | #define CR0_ET 0x00000010 /* Extension type is 80387 */ | |
136 | /* (not official) */ | |
137 | #define CR0_TS 0x00000008 /* Task switch */ | |
138 | #define CR0_EM 0x00000004 /* Emulate coprocessor */ | |
139 | #define CR0_MP 0x00000002 /* Monitor coprocessor */ | |
140 | #define CR0_PE 0x00000001 /* Enable protected mode */ | |
141 | ||
142 | /* | |
143 | * CR4 | |
144 | */ | |
145 | #define CR4_FXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ | |
146 | #define CR4_XMM 0x00000400 /* SSE/SSE2 instructions supported in OS */ | |
147 | #define CR4_MCE 0x00000040 /* p5: Machine Check Exceptions */ | |
148 | #define CR4_PSE 0x00000010 /* p5: Page Size Extensions */ | |
149 | #define CR4_DE 0x00000008 /* p5: Debugging Extensions */ | |
150 | #define CR4_TSD 0x00000004 /* p5: Time Stamp Disable */ | |
151 | #define CR4_PVI 0x00000002 /* p5: Protected-mode Virtual Interrupts */ | |
152 | #define CR4_VME 0x00000001 /* p5: Virtual-8086 Mode Extensions */ | |
153 | ||
154 | #ifndef ASSEMBLER | |
155 | extern unsigned int get_cr0(void); | |
156 | extern void set_cr0( | |
157 | unsigned int value); | |
158 | extern unsigned int get_cr2(void); | |
159 | extern unsigned int get_cr3(void); | |
160 | extern void set_cr3( | |
161 | unsigned int value); | |
162 | extern unsigned int get_cr4(void); | |
163 | extern void set_cr4( | |
164 | unsigned int value); | |
165 | ||
166 | #define set_ts() \ | |
167 | set_cr0(get_cr0() | CR0_TS) | |
168 | extern void clear_ts(void); | |
169 | ||
170 | extern unsigned short get_tr(void); | |
171 | extern void set_tr( | |
172 | unsigned int seg); | |
173 | ||
174 | extern unsigned short get_ldt(void); | |
175 | extern void set_ldt( | |
176 | unsigned int seg); | |
177 | #ifdef __GNUC__ | |
178 | extern __inline__ unsigned int get_cr0(void) | |
179 | { | |
180 | register unsigned int cr0; | |
181 | __asm__ volatile("mov %%cr0, %0" : "=r" (cr0)); | |
182 | return(cr0); | |
183 | } | |
184 | ||
185 | extern __inline__ void set_cr0(unsigned int value) | |
186 | { | |
187 | __asm__ volatile("mov %0, %%cr0" : : "r" (value)); | |
188 | } | |
189 | ||
190 | extern __inline__ unsigned int get_cr2(void) | |
191 | { | |
192 | register unsigned int cr2; | |
193 | __asm__ volatile("mov %%cr2, %0" : "=r" (cr2)); | |
194 | return(cr2); | |
195 | } | |
196 | ||
197 | #if NCPUS > 1 && AT386 | |
198 | /* | |
199 | * get_cr3 and set_cr3 are more complicated for the MPs. cr3 is where | |
200 | * the cpu number gets stored. The MP versions live in locore.s | |
201 | */ | |
202 | #else /* NCPUS > 1 && AT386 */ | |
203 | extern __inline__ unsigned int get_cr3(void) | |
204 | { | |
205 | register unsigned int cr3; | |
206 | __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); | |
207 | return(cr3); | |
208 | } | |
209 | ||
210 | extern __inline__ void set_cr3(unsigned int value) | |
211 | { | |
212 | __asm__ volatile("mov %0, %%cr3" : : "r" (value)); | |
213 | } | |
214 | #endif /* NCPUS > 1 && AT386 */ | |
215 | ||
216 | extern __inline__ void clear_ts(void) | |
217 | { | |
218 | __asm__ volatile("clts"); | |
219 | } | |
220 | ||
221 | extern __inline__ unsigned short get_tr(void) | |
222 | { | |
223 | unsigned short seg; | |
224 | __asm__ volatile("str %0" : "=rm" (seg)); | |
225 | return(seg); | |
226 | } | |
227 | ||
228 | extern __inline__ void set_tr(unsigned int seg) | |
229 | { | |
230 | __asm__ volatile("ltr %0" : : "rm" ((unsigned short)(seg))); | |
231 | } | |
232 | ||
233 | extern __inline__ unsigned short get_ldt(void) | |
234 | { | |
235 | unsigned short seg; | |
236 | __asm__ volatile("sldt %0" : "=rm" (seg)); | |
237 | return(seg); | |
238 | } | |
239 | ||
240 | extern __inline__ void set_ldt(unsigned int seg) | |
241 | { | |
242 | __asm__ volatile("lldt %0" : : "rm" ((unsigned short)(seg))); | |
243 | } | |
244 | ||
245 | extern __inline__ void flush_tlb(void) | |
246 | { | |
247 | unsigned long cr3_temp; | |
248 | __asm__ volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (cr3_temp) :: "memory"); | |
249 | } | |
250 | ||
251 | extern __inline__ void invlpg(unsigned long addr) | |
252 | { | |
253 | __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory"); | |
254 | } | |
255 | ||
256 | /* | |
257 | * Access to machine-specific registers (available on 586 and better only) | |
258 | * Note: the rd* operations modify the parameters directly (without using | |
259 | * pointer indirection), this allows gcc to optimize better | |
260 | */ | |
261 | ||
262 | #define rdmsr(msr,lo,hi) \ | |
263 | __asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr)) | |
264 | ||
265 | #define wrmsr(msr,lo,hi) \ | |
266 | __asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi)) | |
267 | ||
268 | #define rdtsc(lo,hi) \ | |
269 | __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)) | |
270 | ||
271 | #define write_tsc(lo,hi) wrmsr(0x10, lo, hi) | |
272 | ||
273 | #define rdpmc(counter,lo,hi) \ | |
274 | __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) | |
275 | ||
276 | extern __inline__ uint64_t rdmsr64(uint32_t msr) | |
277 | { | |
278 | uint64_t ret; | |
279 | __asm__ volatile("rdmsr" : "=A" (ret) : "c" (msr)); | |
280 | return ret; | |
281 | } | |
282 | ||
283 | extern __inline__ void wrmsr64(uint32_t msr, uint64_t val) | |
284 | { | |
285 | __asm__ volatile("wrmsr" : : "c" (msr), "A" (val)); | |
286 | } | |
287 | ||
288 | extern __inline__ uint64_t rdtsc64(void) | |
289 | { | |
290 | uint64_t ret; | |
291 | __asm__ volatile("rdtsc" : "=A" (ret)); | |
292 | return ret; | |
293 | } | |
294 | #endif /* __GNUC__ */ | |
295 | #endif /* ASSEMBLER */ | |
296 | ||
297 | #define MSR_IA32_P5_MC_ADDR 0 | |
298 | #define MSR_IA32_P5_MC_TYPE 1 | |
299 | #define MSR_IA32_PLATFORM_ID 0x17 | |
300 | #define MSR_IA32_EBL_CR_POWERON 0x2a | |
301 | ||
302 | #define MSR_IA32_APIC_BASE 0x1b | |
303 | #define MSR_IA32_APIC_BASE_BSP (1<<8) | |
304 | #define MSR_IA32_APIC_BASE_ENABLE (1<<11) | |
305 | #define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) | |
306 | ||
307 | #define MSR_IA32_UCODE_WRITE 0x79 | |
308 | #define MSR_IA32_UCODE_REV 0x8b | |
309 | ||
310 | #define MSR_IA32_PERFCTR0 0xc1 | |
311 | #define MSR_IA32_PERFCTR1 0xc2 | |
312 | ||
313 | #define MSR_IA32_BBL_CR_CTL 0x119 | |
314 | ||
315 | #define MSR_IA32_MCG_CAP 0x179 | |
316 | #define MSR_IA32_MCG_STATUS 0x17a | |
317 | #define MSR_IA32_MCG_CTL 0x17b | |
318 | ||
319 | #define MSR_IA32_EVNTSEL0 0x186 | |
320 | #define MSR_IA32_EVNTSEL1 0x187 | |
321 | ||
322 | #define MSR_IA32_DEBUGCTLMSR 0x1d9 | |
323 | #define MSR_IA32_LASTBRANCHFROMIP 0x1db | |
324 | #define MSR_IA32_LASTBRANCHTOIP 0x1dc | |
325 | #define MSR_IA32_LASTINTFROMIP 0x1dd | |
326 | #define MSR_IA32_LASTINTTOIP 0x1de | |
327 | ||
328 | #define MSR_IA32_MC0_CTL 0x400 | |
329 | #define MSR_IA32_MC0_STATUS 0x401 | |
330 | #define MSR_IA32_MC0_ADDR 0x402 | |
331 | #define MSR_IA32_MC0_MISC 0x403 | |
332 | ||
333 | #endif /* _I386_PROC_REG_H_ */ |