]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/proc_reg.h
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / proc_reg.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28
29#ifndef _PPC_PROC_REG_H_
30#define _PPC_PROC_REG_H_
31
32#include <mach/boolean.h>
33
34/* Define some useful masks that convert from bit numbers */
35
36#if __PPC__
55e303ae 37#ifdef __BIG_ENDIAN__
1c79356b
A
38#ifndef ENDIAN_MASK
39#define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
40#endif
41#else
42#error code not ported to little endian targets yet
55e303ae 43#endif /* __BIG_ENDIAN__ */
1c79356b
A
44#endif /* __PPC__ */
45
46#define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
47#define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
48#define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
49
50#undef MASK
51#define MASK(PART) MASK32(PART)
52
53#define BITS_PER_WORD 32
54#define BITS_PER_WORD_POW2 5
55
56/* Defines for decoding the MSR bits */
57
55e303ae
A
58#define MSR_SF_BIT 0
59#define MSR_HV_BIT 3
1c79356b
A
60#define MSR_RES1_BIT 1
61#define MSR_RES2_BIT 2
62#define MSR_RES3_BIT 3
63#define MSR_RES4_BIT 4
64#define MSR_RES5_BIT 5
65#define MSR_VEC_BIT 6
66#define MSR_RES7_BIT 7
67#define MSR_RES8_BIT 8
68#define MSR_RES9_BIT 9
69#define MSR_RES10_BIT 10
70#define MSR_RES11_BIT 11
71#define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
72#define MSR_POW_BIT 13
73#define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
74#define MSR_ILE_BIT 15
75#define MSR_EE_BIT 16
76#define MSR_PR_BIT 17
77#define MSR_FP_BIT 18
78#define MSR_ME_BIT 19
79#define MSR_FE0_BIT 20
80#define MSR_SE_BIT 21
81#define MSR_BE_BIT 22
82#define MSR_FE1_BIT 23
83#define MSR_RES24_BIT 24 /* AL bit in power architectures */
84#define MSR_IP_BIT 25
85#define MSR_IR_BIT 26
86#define MSR_DR_BIT 27
87#define MSR_RES28_BIT 28
88#define MSR_PM_BIT 29
89#define MSR_RI_BIT 30
90#define MSR_LE_BIT 31
91
92/* MSR for kernel mode, interrupts disabled, running in virtual mode */
93#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
94
95/* MSR for above but with interrupts enabled */
96#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
97
98/* MSR for physical mode code */
99#define MSR_VM_OFF (MASK(MSR_ME))
100
101/* MSR for physical instruction, virtual data */
102#define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
103
104/* MSR mask for user-exported bits - identify bits that must be set/reset */
105
106/* SET - external exceptions, machine check, vm on, user-level privs */
107#define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
108 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
109
110/* only the following bits may be changed by a task */
111#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
112 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
113
114#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
115 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
116
117#define MSR_VEC_ON (MASK(MSR_VEC))
118
119#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
120
121/* seg reg values must be simple expressions so that assembler can cope */
122#define SEG_REG_INVALID 0x0000
123#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
124
1c79356b
A
125/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
126#define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
127
128/* SR_COPYIN is used for copyin/copyout+remapping and must be
129 * saved and restored in the thread context.
130 */
131/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
132 * the space ID of the currently interrupted user task immediately
133 * after an exception and before interrupts are reenabled. It's used
134 * purely for an assert.
135 */
136
137/* SR_KERNEL used for asserts... */
138
139#define SR_COPYIN sr14
140#define SR_UNUSED_BY_KERN sr13
141#define SR_KERNEL sr0
142
143#define SR_UNUSED_BY_KERN_NUM 13
144#define SR_COPYIN_NAME sr14
145#define SR_COPYIN_NUM 14
55e303ae 146#define BAT_INVALID 0
1c79356b
A
147
148
149/* DSISR bits on data access exceptions */
150
151#define DSISR_IO_BIT 0 /* NOT USED on 601 */
152#define DSISR_HASH_BIT 1
55e303ae 153#define DSISR_NOEX_BIT 3
1c79356b
A
154#define DSISR_PROT_BIT 4
155#define DSISR_IO_SPC_BIT 5
156#define DSISR_WRITE_BIT 6
157#define DSISR_WATCH_BIT 9
158#define DSISR_EIO_BIT 11
159
55e303ae
A
160#define dsiMiss 0x40000000
161#define dsiMissb 1
162#define dsiNoEx 0x10000000
163#define dsiProt 0x08000000
164#define dsiInvMode 0x04000000
165#define dsiStore 0x02000000
166#define dsiAC 0x00400000
167#define dsiSeg 0x00200000
168#define dsiValid 0x5E600000
169#define dsiSpcNest 0x00010000 /* Special nest - software flag */
170#define dsiSpcNestb 15 /* Special nest - software flag */
171#define dsiSoftware 0x0000FFFF
172
1c79356b
A
173/* SRR1 bits on data/instruction translation exceptions */
174
175#define SRR1_TRANS_HASH_BIT 1
176#define SRR1_TRANS_IO_BIT 3
177#define SRR1_TRANS_PROT_BIT 4
178#define SRR1_TRANS_NO_PTE_BIT 10
179
180/* SRR1 bits on program exceptions */
181
182#define SRR1_PRG_FE_BIT 11
183#define SRR1_PRG_ILL_INS_BIT 12
184#define SRR1_PRG_PRV_INS_BIT 13
185#define SRR1_PRG_TRAP_BIT 14
186
1c79356b
A
187/*
188 * Virtual to physical mapping macros/structures.
189 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
190 */
191
1c79356b
A
192#define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
193#define PTE1_REFERENCED_BIT 23 /* ditto */
194#define PTE1_CHANGED_BIT 24
195#define PTE0_HASH_ID_BIT 25
196
55e303ae
A
197#define PTE_WIMG_CB_CACHED_COHERENT 0 /* cached, writeback, coherent (default) */
198#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 1 /* cached, writeback, coherent, guarded */
199#define PTE_WIMG_UNCACHED_COHERENT 2 /* uncached, coherentt */
200#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 3 /* uncached, coherent, guarded */
9bccf70c
A
201
202#define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
203#define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
204
205
1c79356b 206
1c79356b
A
207#ifndef ASSEMBLER
208#ifdef __GNUC__
209
1c79356b
A
210/* Structures and types for machine registers */
211
1c79356b
A
212
213/*
214 * C-helper inline functions for accessing machine registers follow.
215 */
216
217
1c79356b
A
218/*
219 * Various memory/IO synchronisation instructions
220 */
221
222 /* Use eieio as a memory barrier to order stores.
223 * Useful for device control and PTE maintenance.
224 */
225
226#define eieio() \
227 __asm__ volatile("eieio")
228
229 /* Use sync to ensure previous stores have completed.
230 This is required when manipulating locks and/or
231 maintaining PTEs or other shared structures on SMP
232 machines.
233 */
234
235#define sync() \
236 __asm__ volatile("sync")
237
238 /* Use isync to sychronize context; that is, the ensure
239 no prefetching of instructions happen before the
240 instruction.
241 */
242
243#define isync() \
244 __asm__ volatile("isync")
245
246
1c79356b
A
247/*
248 * Access to various system registers
249 */
250
251extern unsigned int mflr(void);
252
253extern __inline__ unsigned int mflr(void)
254{
255 unsigned int result;
256 __asm__ volatile("mflr %0" : "=r" (result));
257 return result;
258}
259
260extern unsigned int mfpvr(void);
261
262extern __inline__ unsigned int mfpvr(void)
263{
264 unsigned int result;
265 __asm__ ("mfpvr %0" : "=r" (result));
266 return result;
267}
268
269/* mtmsr might need syncs etc around it, don't provide simple
270 * inline macro
271 */
272
273extern unsigned int mfmsr(void);
274
275extern __inline__ unsigned int mfmsr(void)
276{
277 unsigned int result;
278 __asm__ volatile("mfmsr %0" : "=r" (result));
279 return result;
280}
281
1c79356b
A
282
283extern unsigned int mfdar(void);
284
285extern __inline__ unsigned int mfdar(void)
286{
287 unsigned int result;
288 __asm__ volatile("mfdar %0" : "=r" (result));
289 return result;
290}
291
292extern void mtdec(unsigned int val);
293
294extern __inline__ void mtdec(unsigned int val)
295{
296 __asm__ volatile("mtdec %0" : : "r" (val));
297 return;
298}
299
1c79356b
A
300extern void mttb(unsigned int val);
301
302extern __inline__ void mttb(unsigned int val)
303{
304 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
305 return;
306}
307
308extern unsigned int mftb(void);
309
310extern __inline__ unsigned int mftb(void)
311{
312 unsigned int result;
313 __asm__ volatile("mftb %0" : "=r" (result));
314 return result;
315}
316
317extern void mttbu(unsigned int val);
318
319extern __inline__ void mttbu(unsigned int val)
320{
321 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
322 return;
323}
324
325extern unsigned int mftbu(void);
326
327extern __inline__ unsigned int mftbu(void)
328{
329 unsigned int result;
330 __asm__ volatile("mftbu %0" : "=r" (result));
331 return result;
332}
333
1c79356b
A
334extern unsigned int mfl2cr(void);
335
336extern __inline__ unsigned int mfl2cr(void)
337{
338 unsigned int result;
339 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
340 return result;
341}
342
343extern unsigned int cntlzw(unsigned int num);
344
345extern __inline__ unsigned int cntlzw(unsigned int num)
346{
347 unsigned int result;
348 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
349 return result;
350}
351
352
353/* functions for doing byte reversed loads and stores */
354
355extern unsigned int lwbrx(unsigned int addr);
356
357extern __inline__ unsigned int lwbrx(unsigned int addr)
358{
359 unsigned int result;
360 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
361 return result;
362}
363
364extern void stwbrx(unsigned int data, unsigned int addr);
365
366extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
367{
368 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
369}
370
371/* Performance Monitor Register access routines */
372extern unsigned long mfmmcr0(void);
373extern void mtmmcr0(unsigned long);
374extern unsigned long mfmmcr1(void);
375extern void mtmmcr1(unsigned long);
376extern unsigned long mfmmcr2(void);
377extern void mtmmcr2(unsigned long);
378extern unsigned long mfpmc1(void);
379extern void mtpmc1(unsigned long);
380extern unsigned long mfpmc2(void);
381extern void mtpmc2(unsigned long);
382extern unsigned long mfpmc3(void);
383extern void mtpmc3(unsigned long);
384extern unsigned long mfpmc4(void);
385extern void mtpmc4(unsigned long);
386extern unsigned long mfsia(void);
387extern unsigned long mfsda(void);
388
389/* macros since the argument n is a hard-coded constant */
390
1c79356b
A
391#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
392#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
393
394#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
395#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
396
397#endif /* __GNUC__ */
398#endif /* !ASSEMBLER */
399
400#endif /* _PPC_PROC_REG_H_ */