]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/proc_reg.h
c06e8b9939bf0550c2a1da1ea7bffb79082322b9
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 #ifndef _PPC_PROC_REG_H_
27 #define _PPC_PROC_REG_H_
29 #include <mach/boolean.h>
31 /* Define some useful masks that convert from bit numbers */
36 #define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
39 #error code not ported to little endian targets yet
40 #endif /* _BIG_ENDIAN */
43 #define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
44 #define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
45 #define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
48 #define MASK(PART) MASK32(PART)
50 #define BITS_PER_WORD 32
51 #define BITS_PER_WORD_POW2 5
53 /* Defines for decoding the MSR bits */
56 #define MSR_RES1_BIT 1
57 #define MSR_RES2_BIT 2
58 #define MSR_RES3_BIT 3
59 #define MSR_RES4_BIT 4
60 #define MSR_RES5_BIT 5
62 #define MSR_RES7_BIT 7
63 #define MSR_RES8_BIT 8
64 #define MSR_RES9_BIT 9
65 #define MSR_RES10_BIT 10
66 #define MSR_RES11_BIT 11
67 #define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
68 #define MSR_POW_BIT 13
69 #define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
70 #define MSR_ILE_BIT 15
75 #define MSR_FE0_BIT 20
78 #define MSR_FE1_BIT 23
79 #define MSR_RES24_BIT 24 /* AL bit in power architectures */
83 #define MSR_RES28_BIT 28
88 /* MSR for kernel mode, interrupts disabled, running in virtual mode */
89 #define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
91 /* MSR for above but with interrupts enabled */
92 #define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
94 /* MSR for physical mode code */
95 #define MSR_VM_OFF (MASK(MSR_ME))
97 /* MSR for physical instruction, virtual data */
98 #define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
100 /* MSR mask for user-exported bits - identify bits that must be set/reset */
102 /* SET - external exceptions, machine check, vm on, user-level privs */
103 #define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
104 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
106 /* only the following bits may be changed by a task */
107 #define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
108 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
110 #define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
111 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
113 #define MSR_VEC_ON (MASK(MSR_VEC))
115 #define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
117 /* seg reg values must be simple expressions so that assembler can cope */
118 #define SEG_REG_INVALID 0x0000
119 #define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
121 /* the following segment register values are only used prior to the probe,
122 * they map the various device areas 1-1 on 601 machines
124 #define KERNEL_SEG_REG5_VALUE 0xa7F00005 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=5 */
125 #define KERNEL_SEG_REG8_VALUE 0xa7F00008 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=8 */
126 #define KERNEL_SEG_REG9_VALUE 0xa7F00009 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=9 */
127 #define KERNEL_SEG_REG10_VALUE 0xa7F0000a /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=a */
128 #define KERNEL_SEG_REG11_VALUE 0xa7F0000b /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=b */
129 #define KERNEL_SEG_REG12_VALUE 0xa7F0000c /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=c */
130 #define KERNEL_SEG_REG13_VALUE 0xa7F0000d /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=d */
131 #define KERNEL_SEG_REG14_VALUE 0xa7F0000e /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=e */
132 #define KERNEL_SEG_REG15_VALUE 0xa7F0000f /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=f */
134 /* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
135 #define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
137 /* SR_COPYIN is used for copyin/copyout+remapping and must be
138 * saved and restored in the thread context.
140 /* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
141 * the space ID of the currently interrupted user task immediately
142 * after an exception and before interrupts are reenabled. It's used
143 * purely for an assert.
146 /* SR_KERNEL used for asserts... */
148 #define SR_COPYIN sr14
149 #define SR_UNUSED_BY_KERN sr13
150 #define SR_KERNEL sr0
152 #define SR_UNUSED_BY_KERN_NUM 13
153 #define SR_COPYIN_NAME sr14
154 #define SR_COPYIN_NUM 14
157 /* DSISR bits on data access exceptions */
159 #define DSISR_IO_BIT 0 /* NOT USED on 601 */
160 #define DSISR_HASH_BIT 1
161 #define DSISR_PROT_BIT 4
162 #define DSISR_IO_SPC_BIT 5
163 #define DSISR_WRITE_BIT 6
164 #define DSISR_WATCH_BIT 9
165 #define DSISR_EIO_BIT 11
167 /* SRR1 bits on data/instruction translation exceptions */
169 #define SRR1_TRANS_HASH_BIT 1
170 #define SRR1_TRANS_IO_BIT 3
171 #define SRR1_TRANS_PROT_BIT 4
172 #define SRR1_TRANS_NO_PTE_BIT 10
174 /* SRR1 bits on program exceptions */
176 #define SRR1_PRG_FE_BIT 11
177 #define SRR1_PRG_ILL_INS_BIT 12
178 #define SRR1_PRG_PRV_INS_BIT 13
179 #define SRR1_PRG_TRAP_BIT 14
181 /* BAT information */
183 /* Constants used when setting mask values */
185 #define BAT_INVALID 0
188 * Virtual to physical mapping macros/structures.
189 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
192 #define CACHE_LINE_SIZE 32
193 #define CACHE_LINE_POW2 5
194 #define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1))
196 #define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
197 #define PTE1_REFERENCED_BIT 23 /* ditto */
198 #define PTE1_CHANGED_BIT 24
199 #define PTE0_HASH_ID_BIT 25
201 #define PPC_HASHSIZE 2048 /* size of hash table */
202 #define PPC_HASHSIZE_LOG2 11
203 #define PPC_MIN_MPP 2 /* min # of mappings per phys page */
209 #error - bitfield structures are not checked for bit ordering in words
210 #endif /* _BIG_ENDIAN */
212 /* Structures and types for machine registers */
217 unsigned int htaborg
: 16;
218 unsigned int reserved
: 7;
219 unsigned int htabmask
: 9;
223 /* Block mapping registers. These values are model dependent.
224 * Eventually, we will need to up these to 64 bit values.
227 #define blokValid 0x1FFE0000
228 #define batMin 0x00020000
229 #define batMax 0x10000000
233 /* BAT register structures.
234 * Not used for standard mappings, but may be used
235 * for mapping devices. Note that the 601 has a
236 * different BAT layout than the other PowerPC processors
242 unsigned int blpi
: 15;
243 unsigned int reserved
: 10;
244 unsigned int wim
: 3;
254 unsigned int pbn
: 15;
255 unsigned int reserved
: 10;
256 unsigned int valid
: 1;
257 unsigned int bsm
: 6;
261 typedef struct bat601_t
{
269 unsigned int bepi
: 15;
270 unsigned int reserved
: 4;
271 unsigned int bl
: 11;
280 unsigned int brpn
: 15;
281 unsigned int reserved
: 10;
282 unsigned int wimg
: 4;
283 unsigned int reserved2
: 1;
288 typedef struct bat_t
{
294 * Used extensively for standard mappings
300 unsigned int valid
: 1;
301 unsigned int segment_id
: 24;
302 unsigned int hash_id
: 1;
303 unsigned int page_index
: 6; /* Abbreviated */
306 unsigned int valid
: 1;
307 unsigned int not_used
: 5;
308 unsigned int segment_id
: 19; /* Least Sig 19 bits */
309 unsigned int hash_id
: 1;
310 unsigned int page_index
: 6;
317 unsigned int phys_page
: 20;
318 unsigned int reserved3
: 3;
319 unsigned int referenced
: 1;
320 unsigned int changed
: 1;
321 unsigned int wimg
: 4;
322 unsigned int reserved1
: 1;
323 unsigned int protection
: 2;
327 typedef struct pte_t
{
332 #define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */
333 #define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */
335 #define PTE_WIMG_CB_CACHED 0 /* cached, writeback */
336 #define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */
337 #define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */
338 #define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */
339 #define PTE_WIMG_UNCACHED 4 /* uncached */
340 #define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */
341 #define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */
342 #define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */
343 #define PTE_WIMG_WT_CACHED 8 /* cached, writethru */
344 #define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */
345 #define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */
346 #define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */
348 #define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
349 #define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
352 * A virtual address is decoded into various parts when looking for its PTE
355 typedef struct va_full_t
{
356 unsigned int seg_num
: 4;
357 unsigned int page_index
: 16;
358 unsigned int byte_ofs
: 12;
361 typedef struct va_abbrev_t
{ /* use bits.abbrev for abbreviated page index */
362 unsigned int seg_num
: 4;
363 unsigned int page_index
: 6;
364 unsigned int junk
: 10;
365 unsigned int byte_ofs
: 12;
374 /* A physical address can be split up into page and offset */
376 typedef struct pa_t
{
377 unsigned int page_no
: 20;
378 unsigned int offset
: 12;
387 * C-helper inline functions for accessing machine registers follow.
392 #define __CASMNL__ ";"
394 #define __CASMNL__ "@"
397 /* Return the current GOT pointer */
399 extern unsigned int get_got(void);
401 extern __inline__
unsigned int get_got(void)
405 __asm__
volatile("mr %0, r2" : "=r" (result
));
407 __asm__
volatile("mr %0, 2" : "=r" (result
));
413 * Various memory/IO synchronisation instructions
416 /* Use eieio as a memory barrier to order stores.
417 * Useful for device control and PTE maintenance.
421 __asm__ volatile("eieio")
423 /* Use sync to ensure previous stores have completed.
424 This is required when manipulating locks and/or
425 maintaining PTEs or other shared structures on SMP
430 __asm__ volatile("sync")
432 /* Use isync to sychronize context; that is, the ensure
433 no prefetching of instructions happen before the
438 __asm__ volatile("isync")
442 * This guy will make sure all tlbs on all processors finish their tlbies
445 __asm__ volatile("tlbsync")
448 /* Invalidate TLB entry. Caution, requires context synchronization.
450 extern void tlbie(unsigned int val
);
452 extern __inline__
void tlbie(unsigned int val
)
454 __asm__
volatile("tlbie %0" : : "r" (val
));
461 * Access to various system registers
464 extern unsigned int mflr(void);
466 extern __inline__
unsigned int mflr(void)
469 __asm__
volatile("mflr %0" : "=r" (result
));
473 extern unsigned int mfpvr(void);
475 extern __inline__
unsigned int mfpvr(void)
478 __asm__ ("mfpvr %0" : "=r" (result
));
482 /* mtmsr might need syncs etc around it, don't provide simple
486 extern unsigned int mfmsr(void);
488 extern __inline__
unsigned int mfmsr(void)
491 __asm__
volatile("mfmsr %0" : "=r" (result
));
495 /* mtsr and mfsr must be macros since SR must be hardcoded */
498 #define mtsr(SR, REG) \
499 __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
500 #define mfsr(REG, SR) \
501 __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR));
503 #define mtsr(SR, REG) \
504 __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
506 #define mfsr(REG, SR) \
507 __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR));
511 extern void mtsrin(unsigned int val
, unsigned int reg
);
513 extern __inline__
void mtsrin(unsigned int val
, unsigned int reg
)
515 __asm__
volatile("sync" __CASMNL__
"mtsrin %0, %1" __CASMNL__
" isync" : : "r" (val
), "r" (reg
));
519 extern unsigned int mfsrin(unsigned int reg
);
521 extern __inline__
unsigned int mfsrin(unsigned int reg
)
524 __asm__
volatile("mfsrin %0, %1" : "=r" (result
) : "r" (reg
));
528 extern void mtsdr1(unsigned int val
);
530 extern __inline__
void mtsdr1(unsigned int val
)
532 __asm__
volatile("mtsdr1 %0" : : "r" (val
));
536 extern void mtdar(unsigned int val
);
538 extern __inline__
void mtdar(unsigned int val
)
540 __asm__
volatile("mtdar %0" : : "r" (val
));
544 extern unsigned int mfdar(void);
546 extern __inline__
unsigned int mfdar(void)
549 __asm__
volatile("mfdar %0" : "=r" (result
));
553 extern void mtdec(unsigned int val
);
555 extern __inline__
void mtdec(unsigned int val
)
557 __asm__
volatile("mtdec %0" : : "r" (val
));
561 extern int isync_mfdec(void);
563 extern __inline__
int isync_mfdec(void)
566 __asm__
volatile("isync" __CASMNL__
"mfdec %0" : "=r" (result
));
570 /* Read and write the value from the real-time clock
571 * or time base registers. Note that you have to
572 * use the right ones depending upon being on
573 * 601 or 603/604. Care about carries between
574 * the words and using the right registers must be
575 * done by the calling function.
578 extern void mttb(unsigned int val
);
580 extern __inline__
void mttb(unsigned int val
)
582 __asm__
volatile("mtspr tbl, %0" : : "r" (val
));
586 extern unsigned int mftb(void);
588 extern __inline__
unsigned int mftb(void)
591 __asm__
volatile("mftb %0" : "=r" (result
));
595 extern void mttbu(unsigned int val
);
597 extern __inline__
void mttbu(unsigned int val
)
599 __asm__
volatile("mtspr tbu, %0" : : "r" (val
));
603 extern unsigned int mftbu(void);
605 extern __inline__
unsigned int mftbu(void)
608 __asm__
volatile("mftbu %0" : "=r" (result
));
612 extern void mtrtcl(unsigned int val
);
614 extern __inline__
void mtrtcl(unsigned int val
)
616 __asm__
volatile("mtspr 21,%0" : : "r" (val
));
620 extern unsigned int mfrtcl(void);
622 extern __inline__
unsigned int mfrtcl(void)
625 __asm__
volatile("mfspr %0,5" : "=r" (result
));
629 extern void mtrtcu(unsigned int val
);
631 extern __inline__
void mtrtcu(unsigned int val
)
633 __asm__
volatile("mtspr 20,%0" : : "r" (val
));
637 extern unsigned int mfrtcu(void);
639 extern __inline__
unsigned int mfrtcu(void)
642 __asm__
volatile("mfspr %0,4" : "=r" (result
));
646 extern void mtl2cr(unsigned int val
);
648 extern __inline__
void mtl2cr(unsigned int val
)
650 __asm__
volatile("mtspr l2cr, %0" : : "r" (val
));
654 extern unsigned int mfl2cr(void);
656 extern __inline__
unsigned int mfl2cr(void)
659 __asm__
volatile("mfspr %0, l2cr" : "=r" (result
));
663 extern unsigned int cntlzw(unsigned int num
);
665 extern __inline__
unsigned int cntlzw(unsigned int num
)
668 __asm__
volatile("cntlzw %0, %1" : "=r" (result
) : "r" (num
));
673 /* functions for doing byte reversed loads and stores */
675 extern unsigned int lwbrx(unsigned int addr
);
677 extern __inline__
unsigned int lwbrx(unsigned int addr
)
680 __asm__
volatile("lwbrx %0, 0, %1" : "=r" (result
) : "r" (addr
));
684 extern void stwbrx(unsigned int data
, unsigned int addr
);
686 extern __inline__
void stwbrx(unsigned int data
, unsigned int addr
)
688 __asm__
volatile("stwbrx %0, 0, %1" : : "r" (data
), "r" (addr
));
691 /* Performance Monitor Register access routines */
692 extern unsigned long mfmmcr0(void);
693 extern void mtmmcr0(unsigned long);
694 extern unsigned long mfmmcr1(void);
695 extern void mtmmcr1(unsigned long);
696 extern unsigned long mfmmcr2(void);
697 extern void mtmmcr2(unsigned long);
698 extern unsigned long mfpmc1(void);
699 extern void mtpmc1(unsigned long);
700 extern unsigned long mfpmc2(void);
701 extern void mtpmc2(unsigned long);
702 extern unsigned long mfpmc3(void);
703 extern void mtpmc3(unsigned long);
704 extern unsigned long mfpmc4(void);
705 extern void mtpmc4(unsigned long);
706 extern unsigned long mfsia(void);
707 extern unsigned long mfsda(void);
709 /* macros since the argument n is a hard-coded constant */
711 #define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg))
712 #define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg))
714 #define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg))
715 #define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg))
717 #define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg))
718 #define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg))
720 #define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg))
721 #define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg))
723 #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
724 #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
726 #define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
727 #define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
729 #endif /* __GNUC__ */
730 #endif /* !ASSEMBLER */
732 #endif /* _PPC_PROC_REG_H_ */