]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/proc_reg.h
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / ppc / proc_reg.h
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/*
31 * @OSF_COPYRIGHT@
32 */
33
34#ifndef _PPC_PROC_REG_H_
35#define _PPC_PROC_REG_H_
36
37#include <mach/boolean.h>
38
39/* Define some useful masks that convert from bit numbers */
40
41#if __PPC__
55e303ae 42#ifdef __BIG_ENDIAN__
1c79356b
A
43#ifndef ENDIAN_MASK
44#define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
45#endif
46#else
47#error code not ported to little endian targets yet
55e303ae 48#endif /* __BIG_ENDIAN__ */
1c79356b
A
49#endif /* __PPC__ */
50
51#define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
52#define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
53#define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
54
55#undef MASK
56#define MASK(PART) MASK32(PART)
57
58#define BITS_PER_WORD 32
59#define BITS_PER_WORD_POW2 5
60
61/* Defines for decoding the MSR bits */
62
55e303ae
A
63#define MSR_SF_BIT 0
64#define MSR_HV_BIT 3
1c79356b
A
65#define MSR_RES1_BIT 1
66#define MSR_RES2_BIT 2
67#define MSR_RES3_BIT 3
68#define MSR_RES4_BIT 4
69#define MSR_RES5_BIT 5
70#define MSR_VEC_BIT 6
71#define MSR_RES7_BIT 7
72#define MSR_RES8_BIT 8
73#define MSR_RES9_BIT 9
74#define MSR_RES10_BIT 10
75#define MSR_RES11_BIT 11
76#define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
77#define MSR_POW_BIT 13
78#define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
79#define MSR_ILE_BIT 15
80#define MSR_EE_BIT 16
81#define MSR_PR_BIT 17
82#define MSR_FP_BIT 18
83#define MSR_ME_BIT 19
84#define MSR_FE0_BIT 20
85#define MSR_SE_BIT 21
86#define MSR_BE_BIT 22
87#define MSR_FE1_BIT 23
88#define MSR_RES24_BIT 24 /* AL bit in power architectures */
89#define MSR_IP_BIT 25
90#define MSR_IR_BIT 26
91#define MSR_DR_BIT 27
92#define MSR_RES28_BIT 28
93#define MSR_PM_BIT 29
94#define MSR_RI_BIT 30
95#define MSR_LE_BIT 31
96
97/* MSR for kernel mode, interrupts disabled, running in virtual mode */
98#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
99
100/* MSR for above but with interrupts enabled */
101#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
102
103/* MSR for physical mode code */
104#define MSR_VM_OFF (MASK(MSR_ME))
105
106/* MSR for physical instruction, virtual data */
107#define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
108
109/* MSR mask for user-exported bits - identify bits that must be set/reset */
110
111/* SET - external exceptions, machine check, vm on, user-level privs */
112#define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
113 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
114
115/* only the following bits may be changed by a task */
116#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
117 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
118
119#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
120 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
121
122#define MSR_VEC_ON (MASK(MSR_VEC))
123
124#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
125
126/* seg reg values must be simple expressions so that assembler can cope */
127#define SEG_REG_INVALID 0x0000
128#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
129
1c79356b
A
130/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
131#define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
132
133/* SR_COPYIN is used for copyin/copyout+remapping and must be
134 * saved and restored in the thread context.
135 */
136/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
137 * the space ID of the currently interrupted user task immediately
138 * after an exception and before interrupts are reenabled. It's used
139 * purely for an assert.
140 */
141
142/* SR_KERNEL used for asserts... */
143
144#define SR_COPYIN sr14
145#define SR_UNUSED_BY_KERN sr13
146#define SR_KERNEL sr0
147
148#define SR_UNUSED_BY_KERN_NUM 13
149#define SR_COPYIN_NAME sr14
150#define SR_COPYIN_NUM 14
55e303ae 151#define BAT_INVALID 0
1c79356b
A
152
153
154/* DSISR bits on data access exceptions */
155
156#define DSISR_IO_BIT 0 /* NOT USED on 601 */
157#define DSISR_HASH_BIT 1
55e303ae 158#define DSISR_NOEX_BIT 3
1c79356b
A
159#define DSISR_PROT_BIT 4
160#define DSISR_IO_SPC_BIT 5
161#define DSISR_WRITE_BIT 6
162#define DSISR_WATCH_BIT 9
163#define DSISR_EIO_BIT 11
164
55e303ae
A
165#define dsiMiss 0x40000000
166#define dsiMissb 1
167#define dsiNoEx 0x10000000
168#define dsiProt 0x08000000
169#define dsiInvMode 0x04000000
170#define dsiStore 0x02000000
171#define dsiAC 0x00400000
172#define dsiSeg 0x00200000
173#define dsiValid 0x5E600000
91447636
A
174#define dsiLinkage 0x00010000 /* Linkage mapping type - software flag */
175#define dsiLinkageb 15 /* Linkage mapping type - software flag */
55e303ae
A
176#define dsiSoftware 0x0000FFFF
177
1c79356b
A
178/* SRR1 bits on data/instruction translation exceptions */
179
180#define SRR1_TRANS_HASH_BIT 1
181#define SRR1_TRANS_IO_BIT 3
182#define SRR1_TRANS_PROT_BIT 4
183#define SRR1_TRANS_NO_PTE_BIT 10
184
185/* SRR1 bits on program exceptions */
186
187#define SRR1_PRG_FE_BIT 11
188#define SRR1_PRG_ILL_INS_BIT 12
189#define SRR1_PRG_PRV_INS_BIT 13
190#define SRR1_PRG_TRAP_BIT 14
191
1c79356b
A
192/*
193 * Virtual to physical mapping macros/structures.
194 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
195 */
196
1c79356b
A
197#define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
198#define PTE1_REFERENCED_BIT 23 /* ditto */
199#define PTE1_CHANGED_BIT 24
200#define PTE0_HASH_ID_BIT 25
201
55e303ae
A
202#define PTE_WIMG_CB_CACHED_COHERENT 0 /* cached, writeback, coherent (default) */
203#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 1 /* cached, writeback, coherent, guarded */
204#define PTE_WIMG_UNCACHED_COHERENT 2 /* uncached, coherentt */
205#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 3 /* uncached, coherent, guarded */
9bccf70c
A
206
207#define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
208#define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
209
210
1c79356b 211
1c79356b
A
212#ifndef ASSEMBLER
213#ifdef __GNUC__
214
1c79356b
A
215/* Structures and types for machine registers */
216
1c79356b
A
217
218/*
219 * C-helper inline functions for accessing machine registers follow.
220 */
221
222
1c79356b
A
223/*
224 * Various memory/IO synchronisation instructions
225 */
226
227 /* Use eieio as a memory barrier to order stores.
228 * Useful for device control and PTE maintenance.
229 */
230
231#define eieio() \
232 __asm__ volatile("eieio")
233
234 /* Use sync to ensure previous stores have completed.
235 This is required when manipulating locks and/or
236 maintaining PTEs or other shared structures on SMP
237 machines.
238 */
239
240#define sync() \
241 __asm__ volatile("sync")
242
243 /* Use isync to sychronize context; that is, the ensure
244 no prefetching of instructions happen before the
245 instruction.
246 */
247
248#define isync() \
249 __asm__ volatile("isync")
250
251
1c79356b
A
252/*
253 * Access to various system registers
254 */
255
256extern unsigned int mflr(void);
257
258extern __inline__ unsigned int mflr(void)
259{
260 unsigned int result;
261 __asm__ volatile("mflr %0" : "=r" (result));
262 return result;
263}
264
265extern unsigned int mfpvr(void);
266
267extern __inline__ unsigned int mfpvr(void)
268{
269 unsigned int result;
270 __asm__ ("mfpvr %0" : "=r" (result));
271 return result;
272}
273
274/* mtmsr might need syncs etc around it, don't provide simple
275 * inline macro
276 */
277
278extern unsigned int mfmsr(void);
279
280extern __inline__ unsigned int mfmsr(void)
281{
282 unsigned int result;
283 __asm__ volatile("mfmsr %0" : "=r" (result));
284 return result;
285}
286
1c79356b
A
287
288extern unsigned int mfdar(void);
289
290extern __inline__ unsigned int mfdar(void)
291{
292 unsigned int result;
293 __asm__ volatile("mfdar %0" : "=r" (result));
294 return result;
295}
296
297extern void mtdec(unsigned int val);
298
299extern __inline__ void mtdec(unsigned int val)
300{
301 __asm__ volatile("mtdec %0" : : "r" (val));
302 return;
303}
304
1c79356b
A
305extern void mttb(unsigned int val);
306
307extern __inline__ void mttb(unsigned int val)
308{
309 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
310 return;
311}
312
313extern unsigned int mftb(void);
314
315extern __inline__ unsigned int mftb(void)
316{
317 unsigned int result;
318 __asm__ volatile("mftb %0" : "=r" (result));
319 return result;
320}
321
322extern void mttbu(unsigned int val);
323
324extern __inline__ void mttbu(unsigned int val)
325{
326 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
327 return;
328}
329
330extern unsigned int mftbu(void);
331
332extern __inline__ unsigned int mftbu(void)
333{
334 unsigned int result;
335 __asm__ volatile("mftbu %0" : "=r" (result));
336 return result;
337}
338
1c79356b
A
339extern unsigned int mfl2cr(void);
340
341extern __inline__ unsigned int mfl2cr(void)
342{
343 unsigned int result;
344 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
345 return result;
346}
347
348extern unsigned int cntlzw(unsigned int num);
349
350extern __inline__ unsigned int cntlzw(unsigned int num)
351{
352 unsigned int result;
353 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
354 return result;
355}
356
357
358/* functions for doing byte reversed loads and stores */
359
360extern unsigned int lwbrx(unsigned int addr);
361
362extern __inline__ unsigned int lwbrx(unsigned int addr)
363{
364 unsigned int result;
365 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
366 return result;
367}
368
369extern void stwbrx(unsigned int data, unsigned int addr);
370
371extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
372{
373 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
374}
375
376/* Performance Monitor Register access routines */
377extern unsigned long mfmmcr0(void);
378extern void mtmmcr0(unsigned long);
379extern unsigned long mfmmcr1(void);
380extern void mtmmcr1(unsigned long);
381extern unsigned long mfmmcr2(void);
382extern void mtmmcr2(unsigned long);
383extern unsigned long mfpmc1(void);
384extern void mtpmc1(unsigned long);
385extern unsigned long mfpmc2(void);
386extern void mtpmc2(unsigned long);
387extern unsigned long mfpmc3(void);
388extern void mtpmc3(unsigned long);
389extern unsigned long mfpmc4(void);
390extern void mtpmc4(unsigned long);
391extern unsigned long mfsia(void);
392extern unsigned long mfsda(void);
393
394/* macros since the argument n is a hard-coded constant */
395
1c79356b
A
396#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
397#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
398
399#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
400#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
401
402#endif /* __GNUC__ */
403#endif /* !ASSEMBLER */
404
405#endif /* _PPC_PROC_REG_H_ */