]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ppc/proc_reg.h
xnu-792.2.4.tar.gz
[apple/xnu.git] / osfmk / ppc / proc_reg.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
26#ifndef _PPC_PROC_REG_H_
27#define _PPC_PROC_REG_H_
28
29#include <mach/boolean.h>
30
31/* Define some useful masks that convert from bit numbers */
32
33#if __PPC__
34#ifdef __BIG_ENDIAN__
35#ifndef ENDIAN_MASK
36#define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
37#endif
38#else
39#error code not ported to little endian targets yet
40#endif /* __BIG_ENDIAN__ */
41#endif /* __PPC__ */
42
43#define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
44#define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
45#define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
46
47#undef MASK
48#define MASK(PART) MASK32(PART)
49
50#define BITS_PER_WORD 32
51#define BITS_PER_WORD_POW2 5
52
53/* Defines for decoding the MSR bits */
54
55#define MSR_SF_BIT 0
56#define MSR_HV_BIT 3
57#define MSR_RES1_BIT 1
58#define MSR_RES2_BIT 2
59#define MSR_RES3_BIT 3
60#define MSR_RES4_BIT 4
61#define MSR_RES5_BIT 5
62#define MSR_VEC_BIT 6
63#define MSR_RES7_BIT 7
64#define MSR_RES8_BIT 8
65#define MSR_RES9_BIT 9
66#define MSR_RES10_BIT 10
67#define MSR_RES11_BIT 11
68#define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
69#define MSR_POW_BIT 13
70#define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
71#define MSR_ILE_BIT 15
72#define MSR_EE_BIT 16
73#define MSR_PR_BIT 17
74#define MSR_FP_BIT 18
75#define MSR_ME_BIT 19
76#define MSR_FE0_BIT 20
77#define MSR_SE_BIT 21
78#define MSR_BE_BIT 22
79#define MSR_FE1_BIT 23
80#define MSR_RES24_BIT 24 /* AL bit in power architectures */
81#define MSR_IP_BIT 25
82#define MSR_IR_BIT 26
83#define MSR_DR_BIT 27
84#define MSR_RES28_BIT 28
85#define MSR_PM_BIT 29
86#define MSR_RI_BIT 30
87#define MSR_LE_BIT 31
88
89/* MSR for kernel mode, interrupts disabled, running in virtual mode */
90#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
91
92/* MSR for above but with interrupts enabled */
93#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
94
95/* MSR for physical mode code */
96#define MSR_VM_OFF (MASK(MSR_ME))
97
98/* MSR for physical instruction, virtual data */
99#define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
100
101/* MSR mask for user-exported bits - identify bits that must be set/reset */
102
103/* SET - external exceptions, machine check, vm on, user-level privs */
104#define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
105 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
106
107/* only the following bits may be changed by a task */
108#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
109 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
110
111#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
112 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
113
114#define MSR_VEC_ON (MASK(MSR_VEC))
115
116#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
117
118/* seg reg values must be simple expressions so that assembler can cope */
119#define SEG_REG_INVALID 0x0000
120#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
121
122/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
123#define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
124
125/* SR_COPYIN is used for copyin/copyout+remapping and must be
126 * saved and restored in the thread context.
127 */
128/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
129 * the space ID of the currently interrupted user task immediately
130 * after an exception and before interrupts are reenabled. It's used
131 * purely for an assert.
132 */
133
134/* SR_KERNEL used for asserts... */
135
136#define SR_COPYIN sr14
137#define SR_UNUSED_BY_KERN sr13
138#define SR_KERNEL sr0
139
140#define SR_UNUSED_BY_KERN_NUM 13
141#define SR_COPYIN_NAME sr14
142#define SR_COPYIN_NUM 14
143#define BAT_INVALID 0
144
145
146/* DSISR bits on data access exceptions */
147
148#define DSISR_IO_BIT 0 /* NOT USED on 601 */
149#define DSISR_HASH_BIT 1
150#define DSISR_NOEX_BIT 3
151#define DSISR_PROT_BIT 4
152#define DSISR_IO_SPC_BIT 5
153#define DSISR_WRITE_BIT 6
154#define DSISR_WATCH_BIT 9
155#define DSISR_EIO_BIT 11
156
157#define dsiMiss 0x40000000
158#define dsiMissb 1
159#define dsiNoEx 0x10000000
160#define dsiProt 0x08000000
161#define dsiInvMode 0x04000000
162#define dsiStore 0x02000000
163#define dsiAC 0x00400000
164#define dsiSeg 0x00200000
165#define dsiValid 0x5E600000
166#define dsiLinkage 0x00010000 /* Linkage mapping type - software flag */
167#define dsiLinkageb 15 /* Linkage mapping type - software flag */
168#define dsiSoftware 0x0000FFFF
169
170/* SRR1 bits on data/instruction translation exceptions */
171
172#define SRR1_TRANS_HASH_BIT 1
173#define SRR1_TRANS_IO_BIT 3
174#define SRR1_TRANS_PROT_BIT 4
175#define SRR1_TRANS_NO_PTE_BIT 10
176
177/* SRR1 bits on program exceptions */
178
179#define SRR1_PRG_FE_BIT 11
180#define SRR1_PRG_ILL_INS_BIT 12
181#define SRR1_PRG_PRV_INS_BIT 13
182#define SRR1_PRG_TRAP_BIT 14
183
184/*
185 * Virtual to physical mapping macros/structures.
186 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
187 */
188
189#define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
190#define PTE1_REFERENCED_BIT 23 /* ditto */
191#define PTE1_CHANGED_BIT 24
192#define PTE0_HASH_ID_BIT 25
193
194#define PTE_WIMG_CB_CACHED_COHERENT 0 /* cached, writeback, coherent (default) */
195#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 1 /* cached, writeback, coherent, guarded */
196#define PTE_WIMG_UNCACHED_COHERENT 2 /* uncached, coherentt */
197#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 3 /* uncached, coherent, guarded */
198
199#define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
200#define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
201
202
203
204#ifndef ASSEMBLER
205#ifdef __GNUC__
206
207/* Structures and types for machine registers */
208
209
210/*
211 * C-helper inline functions for accessing machine registers follow.
212 */
213
214
215/*
216 * Various memory/IO synchronisation instructions
217 */
218
219 /* Use eieio as a memory barrier to order stores.
220 * Useful for device control and PTE maintenance.
221 */
222
223#define eieio() \
224 __asm__ volatile("eieio")
225
226 /* Use sync to ensure previous stores have completed.
227 This is required when manipulating locks and/or
228 maintaining PTEs or other shared structures on SMP
229 machines.
230 */
231
232#define sync() \
233 __asm__ volatile("sync")
234
235 /* Use isync to sychronize context; that is, the ensure
236 no prefetching of instructions happen before the
237 instruction.
238 */
239
240#define isync() \
241 __asm__ volatile("isync")
242
243
244/*
245 * Access to various system registers
246 */
247
248extern unsigned int mflr(void);
249
250extern __inline__ unsigned int mflr(void)
251{
252 unsigned int result;
253 __asm__ volatile("mflr %0" : "=r" (result));
254 return result;
255}
256
257extern unsigned int mfpvr(void);
258
259extern __inline__ unsigned int mfpvr(void)
260{
261 unsigned int result;
262 __asm__ ("mfpvr %0" : "=r" (result));
263 return result;
264}
265
266/* mtmsr might need syncs etc around it, don't provide simple
267 * inline macro
268 */
269
270extern unsigned int mfmsr(void);
271
272extern __inline__ unsigned int mfmsr(void)
273{
274 unsigned int result;
275 __asm__ volatile("mfmsr %0" : "=r" (result));
276 return result;
277}
278
279
280extern unsigned int mfdar(void);
281
282extern __inline__ unsigned int mfdar(void)
283{
284 unsigned int result;
285 __asm__ volatile("mfdar %0" : "=r" (result));
286 return result;
287}
288
289extern void mtdec(unsigned int val);
290
291extern __inline__ void mtdec(unsigned int val)
292{
293 __asm__ volatile("mtdec %0" : : "r" (val));
294 return;
295}
296
297extern void mttb(unsigned int val);
298
299extern __inline__ void mttb(unsigned int val)
300{
301 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
302 return;
303}
304
305extern unsigned int mftb(void);
306
307extern __inline__ unsigned int mftb(void)
308{
309 unsigned int result;
310 __asm__ volatile("mftb %0" : "=r" (result));
311 return result;
312}
313
314extern void mttbu(unsigned int val);
315
316extern __inline__ void mttbu(unsigned int val)
317{
318 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
319 return;
320}
321
322extern unsigned int mftbu(void);
323
324extern __inline__ unsigned int mftbu(void)
325{
326 unsigned int result;
327 __asm__ volatile("mftbu %0" : "=r" (result));
328 return result;
329}
330
331extern unsigned int mfl2cr(void);
332
333extern __inline__ unsigned int mfl2cr(void)
334{
335 unsigned int result;
336 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
337 return result;
338}
339
340extern unsigned int cntlzw(unsigned int num);
341
342extern __inline__ unsigned int cntlzw(unsigned int num)
343{
344 unsigned int result;
345 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
346 return result;
347}
348
349
350/* functions for doing byte reversed loads and stores */
351
352extern unsigned int lwbrx(unsigned int addr);
353
354extern __inline__ unsigned int lwbrx(unsigned int addr)
355{
356 unsigned int result;
357 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
358 return result;
359}
360
361extern void stwbrx(unsigned int data, unsigned int addr);
362
363extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
364{
365 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
366}
367
368/* Performance Monitor Register access routines */
369extern unsigned long mfmmcr0(void);
370extern void mtmmcr0(unsigned long);
371extern unsigned long mfmmcr1(void);
372extern void mtmmcr1(unsigned long);
373extern unsigned long mfmmcr2(void);
374extern void mtmmcr2(unsigned long);
375extern unsigned long mfpmc1(void);
376extern void mtpmc1(unsigned long);
377extern unsigned long mfpmc2(void);
378extern void mtpmc2(unsigned long);
379extern unsigned long mfpmc3(void);
380extern void mtpmc3(unsigned long);
381extern unsigned long mfpmc4(void);
382extern void mtpmc4(unsigned long);
383extern unsigned long mfsia(void);
384extern unsigned long mfsda(void);
385
386/* macros since the argument n is a hard-coded constant */
387
388#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
389#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
390
391#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
392#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
393
394#endif /* __GNUC__ */
395#endif /* !ASSEMBLER */
396
397#endif /* _PPC_PROC_REG_H_ */