]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/proc_reg.h
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / proc_reg.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26
27 #ifndef _PPC_PROC_REG_H_
28 #define _PPC_PROC_REG_H_
29
30 #include <mach/boolean.h>
31
32 /* Define some useful masks that convert from bit numbers */
33
34 #if __PPC__
35 #ifdef __BIG_ENDIAN__
36 #ifndef ENDIAN_MASK
37 #define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
38 #endif
39 #else
40 #error code not ported to little endian targets yet
41 #endif /* __BIG_ENDIAN__ */
42 #endif /* __PPC__ */
43
44 #define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
45 #define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
46 #define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
47
48 #undef MASK
49 #define MASK(PART) MASK32(PART)
50
51 #define BITS_PER_WORD 32
52 #define BITS_PER_WORD_POW2 5
53
54 /* Defines for decoding the MSR bits */
55
56 #define MSR_SF_BIT 0
57 #define MSR_HV_BIT 3
58 #define MSR_RES1_BIT 1
59 #define MSR_RES2_BIT 2
60 #define MSR_RES3_BIT 3
61 #define MSR_RES4_BIT 4
62 #define MSR_RES5_BIT 5
63 #define MSR_VEC_BIT 6
64 #define MSR_RES7_BIT 7
65 #define MSR_RES8_BIT 8
66 #define MSR_RES9_BIT 9
67 #define MSR_RES10_BIT 10
68 #define MSR_RES11_BIT 11
69 #define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
70 #define MSR_POW_BIT 13
71 #define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
72 #define MSR_ILE_BIT 15
73 #define MSR_EE_BIT 16
74 #define MSR_PR_BIT 17
75 #define MSR_FP_BIT 18
76 #define MSR_ME_BIT 19
77 #define MSR_FE0_BIT 20
78 #define MSR_SE_BIT 21
79 #define MSR_BE_BIT 22
80 #define MSR_FE1_BIT 23
81 #define MSR_RES24_BIT 24 /* AL bit in power architectures */
82 #define MSR_IP_BIT 25
83 #define MSR_IR_BIT 26
84 #define MSR_DR_BIT 27
85 #define MSR_RES28_BIT 28
86 #define MSR_PM_BIT 29
87 #define MSR_RI_BIT 30
88 #define MSR_LE_BIT 31
89
90 /* MSR for kernel mode, interrupts disabled, running in virtual mode */
91 #define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
92
93 /* MSR for above but with interrupts enabled */
94 #define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
95
96 /* MSR for physical mode code */
97 #define MSR_VM_OFF (MASK(MSR_ME))
98
99 /* MSR for physical instruction, virtual data */
100 #define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
101
102 /* MSR mask for user-exported bits - identify bits that must be set/reset */
103
104 /* SET - external exceptions, machine check, vm on, user-level privs */
105 #define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
106 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
107
108 /* only the following bits may be changed by a task */
109 #define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
110 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
111
112 #define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
113 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
114
115 #define MSR_VEC_ON (MASK(MSR_VEC))
116
117 #define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
118
119 /* seg reg values must be simple expressions so that assembler can cope */
120 #define SEG_REG_INVALID 0x0000
121 #define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
122
123 /* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
124 #define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
125
126 /* SR_COPYIN is used for copyin/copyout+remapping and must be
127 * saved and restored in the thread context.
128 */
129 /* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
130 * the space ID of the currently interrupted user task immediately
131 * after an exception and before interrupts are reenabled. It's used
132 * purely for an assert.
133 */
134
135 /* SR_KERNEL used for asserts... */
136
137 #define SR_COPYIN sr14
138 #define SR_UNUSED_BY_KERN sr13
139 #define SR_KERNEL sr0
140
141 #define SR_UNUSED_BY_KERN_NUM 13
142 #define SR_COPYIN_NAME sr14
143 #define SR_COPYIN_NUM 14
144 #define BAT_INVALID 0
145
146
147 /* DSISR bits on data access exceptions */
148
149 #define DSISR_IO_BIT 0 /* NOT USED on 601 */
150 #define DSISR_HASH_BIT 1
151 #define DSISR_NOEX_BIT 3
152 #define DSISR_PROT_BIT 4
153 #define DSISR_IO_SPC_BIT 5
154 #define DSISR_WRITE_BIT 6
155 #define DSISR_WATCH_BIT 9
156 #define DSISR_EIO_BIT 11
157
158 #define dsiMiss 0x40000000
159 #define dsiMissb 1
160 #define dsiNoEx 0x10000000
161 #define dsiProt 0x08000000
162 #define dsiInvMode 0x04000000
163 #define dsiStore 0x02000000
164 #define dsiAC 0x00400000
165 #define dsiSeg 0x00200000
166 #define dsiValid 0x5E600000
167 #define dsiLinkage 0x00010000 /* Linkage mapping type - software flag */
168 #define dsiLinkageb 15 /* Linkage mapping type - software flag */
169 #define dsiSoftware 0x0000FFFF
170
171 /* SRR1 bits on data/instruction translation exceptions */
172
173 #define SRR1_TRANS_HASH_BIT 1
174 #define SRR1_TRANS_IO_BIT 3
175 #define SRR1_TRANS_PROT_BIT 4
176 #define SRR1_TRANS_NO_PTE_BIT 10
177
178 /* SRR1 bits on program exceptions */
179
180 #define SRR1_PRG_FE_BIT 11
181 #define SRR1_PRG_ILL_INS_BIT 12
182 #define SRR1_PRG_PRV_INS_BIT 13
183 #define SRR1_PRG_TRAP_BIT 14
184
185 /*
186 * Virtual to physical mapping macros/structures.
187 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
188 */
189
190 #define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
191 #define PTE1_REFERENCED_BIT 23 /* ditto */
192 #define PTE1_CHANGED_BIT 24
193 #define PTE0_HASH_ID_BIT 25
194
195 #define PTE_WIMG_CB_CACHED_COHERENT 0 /* cached, writeback, coherent (default) */
196 #define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 1 /* cached, writeback, coherent, guarded */
197 #define PTE_WIMG_UNCACHED_COHERENT 2 /* uncached, coherentt */
198 #define PTE_WIMG_UNCACHED_COHERENT_GUARDED 3 /* uncached, coherent, guarded */
199
200 #define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
201 #define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
202
203
204
205 #ifndef ASSEMBLER
206 #ifdef __GNUC__
207
208 /* Structures and types for machine registers */
209
210
211 /*
212 * C-helper inline functions for accessing machine registers follow.
213 */
214
215
216 /*
217 * Various memory/IO synchronisation instructions
218 */
219
220 /* Use eieio as a memory barrier to order stores.
221 * Useful for device control and PTE maintenance.
222 */
223
224 #define eieio() \
225 __asm__ volatile("eieio")
226
227 /* Use sync to ensure previous stores have completed.
228 This is required when manipulating locks and/or
229 maintaining PTEs or other shared structures on SMP
230 machines.
231 */
232
233 #define sync() \
234 __asm__ volatile("sync")
235
236 /* Use isync to sychronize context; that is, the ensure
237 no prefetching of instructions happen before the
238 instruction.
239 */
240
241 #define isync() \
242 __asm__ volatile("isync")
243
244
245 /*
246 * Access to various system registers
247 */
248
249 extern unsigned int mflr(void);
250
251 extern __inline__ unsigned int mflr(void)
252 {
253 unsigned int result;
254 __asm__ volatile("mflr %0" : "=r" (result));
255 return result;
256 }
257
258 extern unsigned int mfpvr(void);
259
260 extern __inline__ unsigned int mfpvr(void)
261 {
262 unsigned int result;
263 __asm__ ("mfpvr %0" : "=r" (result));
264 return result;
265 }
266
267 /* mtmsr might need syncs etc around it, don't provide simple
268 * inline macro
269 */
270
271 extern unsigned int mfmsr(void);
272
273 extern __inline__ unsigned int mfmsr(void)
274 {
275 unsigned int result;
276 __asm__ volatile("mfmsr %0" : "=r" (result));
277 return result;
278 }
279
280
281 extern unsigned int mfdar(void);
282
283 extern __inline__ unsigned int mfdar(void)
284 {
285 unsigned int result;
286 __asm__ volatile("mfdar %0" : "=r" (result));
287 return result;
288 }
289
290 extern void mtdec(unsigned int val);
291
292 extern __inline__ void mtdec(unsigned int val)
293 {
294 __asm__ volatile("mtdec %0" : : "r" (val));
295 return;
296 }
297
298 extern void mttb(unsigned int val);
299
300 extern __inline__ void mttb(unsigned int val)
301 {
302 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
303 return;
304 }
305
306 extern unsigned int mftb(void);
307
308 extern __inline__ unsigned int mftb(void)
309 {
310 unsigned int result;
311 __asm__ volatile("mftb %0" : "=r" (result));
312 return result;
313 }
314
315 extern void mttbu(unsigned int val);
316
317 extern __inline__ void mttbu(unsigned int val)
318 {
319 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
320 return;
321 }
322
323 extern unsigned int mftbu(void);
324
325 extern __inline__ unsigned int mftbu(void)
326 {
327 unsigned int result;
328 __asm__ volatile("mftbu %0" : "=r" (result));
329 return result;
330 }
331
332 extern unsigned int mfl2cr(void);
333
334 extern __inline__ unsigned int mfl2cr(void)
335 {
336 unsigned int result;
337 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
338 return result;
339 }
340
341 extern unsigned int cntlzw(unsigned int num);
342
343 extern __inline__ unsigned int cntlzw(unsigned int num)
344 {
345 unsigned int result;
346 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
347 return result;
348 }
349
350
351 /* functions for doing byte reversed loads and stores */
352
353 extern unsigned int lwbrx(unsigned int addr);
354
355 extern __inline__ unsigned int lwbrx(unsigned int addr)
356 {
357 unsigned int result;
358 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
359 return result;
360 }
361
362 extern void stwbrx(unsigned int data, unsigned int addr);
363
364 extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
365 {
366 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
367 }
368
369 /* Performance Monitor Register access routines */
370 extern unsigned long mfmmcr0(void);
371 extern void mtmmcr0(unsigned long);
372 extern unsigned long mfmmcr1(void);
373 extern void mtmmcr1(unsigned long);
374 extern unsigned long mfmmcr2(void);
375 extern void mtmmcr2(unsigned long);
376 extern unsigned long mfpmc1(void);
377 extern void mtpmc1(unsigned long);
378 extern unsigned long mfpmc2(void);
379 extern void mtpmc2(unsigned long);
380 extern unsigned long mfpmc3(void);
381 extern void mtpmc3(unsigned long);
382 extern unsigned long mfpmc4(void);
383 extern void mtpmc4(unsigned long);
384 extern unsigned long mfsia(void);
385 extern unsigned long mfsda(void);
386
387 /* macros since the argument n is a hard-coded constant */
388
389 #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
390 #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
391
392 #define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
393 #define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
394
395 #endif /* __GNUC__ */
396 #endif /* !ASSEMBLER */
397
398 #endif /* _PPC_PROC_REG_H_ */