]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/proc_reg.h
5327ebe8d8ba7fd487a4061888e44569899434f4
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 #ifndef _PPC_PROC_REG_H_
30 #define _PPC_PROC_REG_H_
32 #include <mach/boolean.h>
34 /* Define some useful masks that convert from bit numbers */
39 #define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
42 #error code not ported to little endian targets yet
43 #endif /* _BIG_ENDIAN */
46 #define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
47 #define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
48 #define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
51 #define MASK(PART) MASK32(PART)
53 #define BITS_PER_WORD 32
54 #define BITS_PER_WORD_POW2 5
56 /* Defines for decoding the MSR bits */
60 #define MSR_RES1_BIT 1
61 #define MSR_RES2_BIT 2
62 #define MSR_RES3_BIT 3
63 #define MSR_RES4_BIT 4
64 #define MSR_RES5_BIT 5
66 #define MSR_RES7_BIT 7
67 #define MSR_RES8_BIT 8
68 #define MSR_RES9_BIT 9
69 #define MSR_RES10_BIT 10
70 #define MSR_RES11_BIT 11
71 #define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
72 #define MSR_POW_BIT 13
73 #define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
74 #define MSR_ILE_BIT 15
79 #define MSR_FE0_BIT 20
82 #define MSR_FE1_BIT 23
83 #define MSR_RES24_BIT 24 /* AL bit in power architectures */
87 #define MSR_RES28_BIT 28
92 /* MSR for kernel mode, interrupts disabled, running in virtual mode */
93 #define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
95 /* MSR for above but with interrupts enabled */
96 #define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
98 /* MSR for physical mode code */
99 #define MSR_VM_OFF (MASK(MSR_ME))
101 /* MSR for physical instruction, virtual data */
102 #define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
104 /* MSR mask for user-exported bits - identify bits that must be set/reset */
106 /* SET - external exceptions, machine check, vm on, user-level privs */
107 #define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
108 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
110 /* only the following bits may be changed by a task */
111 #define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
112 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
114 #define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
115 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
117 #define MSR_VEC_ON (MASK(MSR_VEC))
119 #define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
121 /* seg reg values must be simple expressions so that assembler can cope */
122 #define SEG_REG_INVALID 0x0000
123 #define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
125 /* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
126 #define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
128 /* SR_COPYIN is used for copyin/copyout+remapping and must be
129 * saved and restored in the thread context.
131 /* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
132 * the space ID of the currently interrupted user task immediately
133 * after an exception and before interrupts are reenabled. It's used
134 * purely for an assert.
137 /* SR_KERNEL used for asserts... */
139 #define SR_COPYIN sr14
140 #define SR_UNUSED_BY_KERN sr13
141 #define SR_KERNEL sr0
143 #define SR_UNUSED_BY_KERN_NUM 13
144 #define SR_COPYIN_NAME sr14
145 #define SR_COPYIN_NUM 14
146 #define BAT_INVALID 0
149 /* DSISR bits on data access exceptions */
151 #define DSISR_IO_BIT 0 /* NOT USED on 601 */
152 #define DSISR_HASH_BIT 1
153 #define DSISR_NOEX_BIT 3
154 #define DSISR_PROT_BIT 4
155 #define DSISR_IO_SPC_BIT 5
156 #define DSISR_WRITE_BIT 6
157 #define DSISR_WATCH_BIT 9
158 #define DSISR_EIO_BIT 11
160 #define dsiMiss 0x40000000
162 #define dsiNoEx 0x10000000
163 #define dsiProt 0x08000000
164 #define dsiInvMode 0x04000000
165 #define dsiStore 0x02000000
166 #define dsiAC 0x00400000
167 #define dsiSeg 0x00200000
168 #define dsiValid 0x5E600000
169 #define dsiSpcNest 0x00010000 /* Special nest - software flag */
170 #define dsiSpcNestb 15 /* Special nest - software flag */
171 #define dsiSoftware 0x0000FFFF
173 /* SRR1 bits on data/instruction translation exceptions */
175 #define SRR1_TRANS_HASH_BIT 1
176 #define SRR1_TRANS_IO_BIT 3
177 #define SRR1_TRANS_PROT_BIT 4
178 #define SRR1_TRANS_NO_PTE_BIT 10
180 /* SRR1 bits on program exceptions */
182 #define SRR1_PRG_FE_BIT 11
183 #define SRR1_PRG_ILL_INS_BIT 12
184 #define SRR1_PRG_PRV_INS_BIT 13
185 #define SRR1_PRG_TRAP_BIT 14
188 * Virtual to physical mapping macros/structures.
189 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
192 #define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
193 #define PTE1_REFERENCED_BIT 23 /* ditto */
194 #define PTE1_CHANGED_BIT 24
195 #define PTE0_HASH_ID_BIT 25
197 #define PTE_WIMG_CB_CACHED_COHERENT 0 /* cached, writeback, coherent (default) */
198 #define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 1 /* cached, writeback, coherent, guarded */
199 #define PTE_WIMG_UNCACHED_COHERENT 2 /* uncached, coherentt */
200 #define PTE_WIMG_UNCACHED_COHERENT_GUARDED 3 /* uncached, coherent, guarded */
202 #define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
203 #define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
210 /* Structures and types for machine registers */
214 * C-helper inline functions for accessing machine registers follow.
219 * Various memory/IO synchronisation instructions
222 /* Use eieio as a memory barrier to order stores.
223 * Useful for device control and PTE maintenance.
227 __asm__ volatile("eieio")
229 /* Use sync to ensure previous stores have completed.
230 This is required when manipulating locks and/or
231 maintaining PTEs or other shared structures on SMP
236 __asm__ volatile("sync")
238 /* Use isync to sychronize context; that is, the ensure
239 no prefetching of instructions happen before the
244 __asm__ volatile("isync")
248 * Access to various system registers
251 extern unsigned int mflr(void);
253 extern __inline__
unsigned int mflr(void)
256 __asm__
volatile("mflr %0" : "=r" (result
));
260 extern unsigned int mfpvr(void);
262 extern __inline__
unsigned int mfpvr(void)
265 __asm__ ("mfpvr %0" : "=r" (result
));
269 /* mtmsr might need syncs etc around it, don't provide simple
273 extern unsigned int mfmsr(void);
275 extern __inline__
unsigned int mfmsr(void)
278 __asm__
volatile("mfmsr %0" : "=r" (result
));
283 extern unsigned int mfdar(void);
285 extern __inline__
unsigned int mfdar(void)
288 __asm__
volatile("mfdar %0" : "=r" (result
));
292 extern void mtdec(unsigned int val
);
294 extern __inline__
void mtdec(unsigned int val
)
296 __asm__
volatile("mtdec %0" : : "r" (val
));
300 extern void mttb(unsigned int val
);
302 extern __inline__
void mttb(unsigned int val
)
304 __asm__
volatile("mtspr tbl, %0" : : "r" (val
));
308 extern unsigned int mftb(void);
310 extern __inline__
unsigned int mftb(void)
313 __asm__
volatile("mftb %0" : "=r" (result
));
317 extern void mttbu(unsigned int val
);
319 extern __inline__
void mttbu(unsigned int val
)
321 __asm__
volatile("mtspr tbu, %0" : : "r" (val
));
325 extern unsigned int mftbu(void);
327 extern __inline__
unsigned int mftbu(void)
330 __asm__
volatile("mftbu %0" : "=r" (result
));
334 extern unsigned int mfl2cr(void);
336 extern __inline__
unsigned int mfl2cr(void)
339 __asm__
volatile("mfspr %0, l2cr" : "=r" (result
));
343 extern unsigned int cntlzw(unsigned int num
);
345 extern __inline__
unsigned int cntlzw(unsigned int num
)
348 __asm__
volatile("cntlzw %0, %1" : "=r" (result
) : "r" (num
));
353 /* functions for doing byte reversed loads and stores */
355 extern unsigned int lwbrx(unsigned int addr
);
357 extern __inline__
unsigned int lwbrx(unsigned int addr
)
360 __asm__
volatile("lwbrx %0, 0, %1" : "=r" (result
) : "r" (addr
));
364 extern void stwbrx(unsigned int data
, unsigned int addr
);
366 extern __inline__
void stwbrx(unsigned int data
, unsigned int addr
)
368 __asm__
volatile("stwbrx %0, 0, %1" : : "r" (data
), "r" (addr
));
371 /* Performance Monitor Register access routines */
372 extern unsigned long mfmmcr0(void);
373 extern void mtmmcr0(unsigned long);
374 extern unsigned long mfmmcr1(void);
375 extern void mtmmcr1(unsigned long);
376 extern unsigned long mfmmcr2(void);
377 extern void mtmmcr2(unsigned long);
378 extern unsigned long mfpmc1(void);
379 extern void mtpmc1(unsigned long);
380 extern unsigned long mfpmc2(void);
381 extern void mtpmc2(unsigned long);
382 extern unsigned long mfpmc3(void);
383 extern void mtpmc3(unsigned long);
384 extern unsigned long mfpmc4(void);
385 extern void mtpmc4(unsigned long);
386 extern unsigned long mfsia(void);
387 extern unsigned long mfsda(void);
389 /* macros since the argument n is a hard-coded constant */
391 #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
392 #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
394 #define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
395 #define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
397 #endif /* __GNUC__ */
398 #endif /* !ASSEMBLER */
400 #endif /* _PPC_PROC_REG_H_ */