]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/proc_reg.h
xnu-201.tar.gz
[apple/xnu.git] / osfmk / ppc / proc_reg.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
26#ifndef _PPC_PROC_REG_H_
27#define _PPC_PROC_REG_H_
28
29#include <mach/boolean.h>
30
31/* Define some useful masks that convert from bit numbers */
32
33#if __PPC__
34#if _BIG_ENDIAN
35#ifndef ENDIAN_MASK
36#define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
37#endif
38#else
39#error code not ported to little endian targets yet
40#endif /* _BIG_ENDIAN */
41#endif /* __PPC__ */
42
43#define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
44#define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
45#define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
46
47#undef MASK
48#define MASK(PART) MASK32(PART)
49
50#define BITS_PER_WORD 32
51#define BITS_PER_WORD_POW2 5
52
53/* Defines for decoding the MSR bits */
54
55#define MSR_SF_BIT 0
56#define MSR_RES1_BIT 1
57#define MSR_RES2_BIT 2
58#define MSR_RES3_BIT 3
59#define MSR_RES4_BIT 4
60#define MSR_RES5_BIT 5
61#define MSR_VEC_BIT 6
62#define MSR_RES7_BIT 7
63#define MSR_RES8_BIT 8
64#define MSR_RES9_BIT 9
65#define MSR_RES10_BIT 10
66#define MSR_RES11_BIT 11
67#define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
68#define MSR_POW_BIT 13
69#define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
70#define MSR_ILE_BIT 15
71#define MSR_EE_BIT 16
72#define MSR_PR_BIT 17
73#define MSR_FP_BIT 18
74#define MSR_ME_BIT 19
75#define MSR_FE0_BIT 20
76#define MSR_SE_BIT 21
77#define MSR_BE_BIT 22
78#define MSR_FE1_BIT 23
79#define MSR_RES24_BIT 24 /* AL bit in power architectures */
80#define MSR_IP_BIT 25
81#define MSR_IR_BIT 26
82#define MSR_DR_BIT 27
83#define MSR_RES28_BIT 28
84#define MSR_PM_BIT 29
85#define MSR_RI_BIT 30
86#define MSR_LE_BIT 31
87
88/* MSR for kernel mode, interrupts disabled, running in virtual mode */
89#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
90
91/* MSR for above but with interrupts enabled */
92#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
93
94/* MSR for physical mode code */
95#define MSR_VM_OFF (MASK(MSR_ME))
96
97/* MSR for physical instruction, virtual data */
98#define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
99
100/* MSR mask for user-exported bits - identify bits that must be set/reset */
101
102/* SET - external exceptions, machine check, vm on, user-level privs */
103#define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
104 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
105
106/* only the following bits may be changed by a task */
107#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
108 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
109
110#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
111 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
112
113#define MSR_VEC_ON (MASK(MSR_VEC))
114
115#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
116
117/* seg reg values must be simple expressions so that assembler can cope */
118#define SEG_REG_INVALID 0x0000
119#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
120
121/* the following segment register values are only used prior to the probe,
122 * they map the various device areas 1-1 on 601 machines
123 */
124#define KERNEL_SEG_REG5_VALUE 0xa7F00005 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=5 */
125#define KERNEL_SEG_REG8_VALUE 0xa7F00008 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=8 */
126#define KERNEL_SEG_REG9_VALUE 0xa7F00009 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=9 */
127#define KERNEL_SEG_REG10_VALUE 0xa7F0000a /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=a */
128#define KERNEL_SEG_REG11_VALUE 0xa7F0000b /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=b */
129#define KERNEL_SEG_REG12_VALUE 0xa7F0000c /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=c */
130#define KERNEL_SEG_REG13_VALUE 0xa7F0000d /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=d */
131#define KERNEL_SEG_REG14_VALUE 0xa7F0000e /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=e */
132#define KERNEL_SEG_REG15_VALUE 0xa7F0000f /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=f */
133
134/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
135#define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
136
137/* SR_COPYIN is used for copyin/copyout+remapping and must be
138 * saved and restored in the thread context.
139 */
140/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
141 * the space ID of the currently interrupted user task immediately
142 * after an exception and before interrupts are reenabled. It's used
143 * purely for an assert.
144 */
145
146/* SR_KERNEL used for asserts... */
147
148#define SR_COPYIN sr14
149#define SR_UNUSED_BY_KERN sr13
150#define SR_KERNEL sr0
151
152#define SR_UNUSED_BY_KERN_NUM 13
153#define SR_COPYIN_NAME sr14
154#define SR_COPYIN_NUM 14
155
156
157/* DSISR bits on data access exceptions */
158
159#define DSISR_IO_BIT 0 /* NOT USED on 601 */
160#define DSISR_HASH_BIT 1
161#define DSISR_PROT_BIT 4
162#define DSISR_IO_SPC_BIT 5
163#define DSISR_WRITE_BIT 6
164#define DSISR_WATCH_BIT 9
165#define DSISR_EIO_BIT 11
166
167/* SRR1 bits on data/instruction translation exceptions */
168
169#define SRR1_TRANS_HASH_BIT 1
170#define SRR1_TRANS_IO_BIT 3
171#define SRR1_TRANS_PROT_BIT 4
172#define SRR1_TRANS_NO_PTE_BIT 10
173
174/* SRR1 bits on program exceptions */
175
176#define SRR1_PRG_FE_BIT 11
177#define SRR1_PRG_ILL_INS_BIT 12
178#define SRR1_PRG_PRV_INS_BIT 13
179#define SRR1_PRG_TRAP_BIT 14
180
181/* BAT information */
182
183/* Constants used when setting mask values */
184
185#define BAT_INVALID 0
186
187/*
188 * Virtual to physical mapping macros/structures.
189 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
190 */
191
192#define CACHE_LINE_SIZE 32
193#define CACHE_LINE_POW2 5
194#define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1))
195
196#define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
197#define PTE1_REFERENCED_BIT 23 /* ditto */
198#define PTE1_CHANGED_BIT 24
199#define PTE0_HASH_ID_BIT 25
200
201#define PPC_HASHSIZE 2048 /* size of hash table */
202#define PPC_HASHSIZE_LOG2 11
203#define PPC_MIN_MPP 2 /* min # of mappings per phys page */
204
1c79356b
A
205#ifndef ASSEMBLER
206#ifdef __GNUC__
207
208#if _BIG_ENDIAN == 0
209#error - bitfield structures are not checked for bit ordering in words
210#endif /* _BIG_ENDIAN */
211
212/* Structures and types for machine registers */
213
214typedef union {
215 unsigned int word;
216 struct {
217 unsigned int htaborg : 16;
218 unsigned int reserved : 7;
219 unsigned int htabmask : 9;
220 } bits;
221} sdr1_t;
222
223/* Block mapping registers. These values are model dependent.
224 * Eventually, we will need to up these to 64 bit values.
225 */
226
227#define blokValid 0x1FFE0000
228#define batMin 0x00020000
229#define batMax 0x10000000
230#define batICnt 4
231#define batDCnt 4
232
233/* BAT register structures.
234 * Not used for standard mappings, but may be used
235 * for mapping devices. Note that the 601 has a
236 * different BAT layout than the other PowerPC processors
237 */
238
239typedef union {
240 unsigned int word;
241 struct {
242 unsigned int blpi : 15;
243 unsigned int reserved : 10;
244 unsigned int wim : 3;
245 unsigned int ks : 1;
246 unsigned int ku : 1;
247 unsigned int pp : 2;
248 } bits;
249} bat601u_t;
250
251typedef union {
252 unsigned int word;
253 struct {
254 unsigned int pbn : 15;
255 unsigned int reserved : 10;
256 unsigned int valid : 1;
257 unsigned int bsm : 6;
258 } bits;
259} bat601l_t;
260
261typedef struct bat601_t {
262 bat601u_t upper;
263 bat601l_t lower;
264} bat601_t;
265
266typedef union {
267 unsigned int word;
268 struct {
269 unsigned int bepi : 15;
270 unsigned int reserved : 4;
271 unsigned int bl : 11;
272 unsigned int vs : 1;
273 unsigned int vp : 1;
274 } bits;
275} batu_t;
276
277typedef union {
278 unsigned int word;
279 struct {
280 unsigned int brpn : 15;
281 unsigned int reserved : 10;
282 unsigned int wimg : 4;
283 unsigned int reserved2 : 1;
284 unsigned int pp : 2;
285 } bits;
286} batl_t;
287
288typedef struct bat_t {
289 batu_t upper;
290 batl_t lower;
291} bat_t;
292
293/* PTE entries
294 * Used extensively for standard mappings
295 */
296
297typedef union {
298 unsigned int word;
299 struct {
300 unsigned int valid : 1;
301 unsigned int segment_id : 24;
302 unsigned int hash_id : 1;
303 unsigned int page_index : 6; /* Abbreviated */
304 } bits;
305 struct {
306 unsigned int valid : 1;
307 unsigned int not_used : 5;
308 unsigned int segment_id : 19; /* Least Sig 19 bits */
309 unsigned int hash_id : 1;
310 unsigned int page_index : 6;
311 } hash_bits;
312} pte0_t;
313
314typedef union {
315 unsigned int word;
316 struct {
317 unsigned int phys_page : 20;
318 unsigned int reserved3 : 3;
319 unsigned int referenced : 1;
320 unsigned int changed : 1;
321 unsigned int wimg : 4;
322 unsigned int reserved1 : 1;
323 unsigned int protection : 2;
324 } bits;
325} pte1_t;
326
327typedef struct pte_t {
328 pte0_t pte0;
329 pte1_t pte1;
330} pte_t;
331
332#define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */
333#define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */
334
335#define PTE_WIMG_CB_CACHED 0 /* cached, writeback */
336#define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */
337#define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */
338#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */
339#define PTE_WIMG_UNCACHED 4 /* uncached */
340#define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */
341#define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */
342#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */
343#define PTE_WIMG_WT_CACHED 8 /* cached, writethru */
344#define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */
345#define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */
346#define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */
347
348#define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
349#define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
350
351/*
352 * A virtual address is decoded into various parts when looking for its PTE
353 */
354
355typedef struct va_full_t {
356 unsigned int seg_num : 4;
357 unsigned int page_index : 16;
358 unsigned int byte_ofs : 12;
359} va_full_t;
360
361typedef struct va_abbrev_t { /* use bits.abbrev for abbreviated page index */
362 unsigned int seg_num : 4;
363 unsigned int page_index : 6;
364 unsigned int junk : 10;
365 unsigned int byte_ofs : 12;
366} va_abbrev_t;
367
368typedef union {
369 unsigned int word;
370 va_full_t full;
371 va_abbrev_t abbrev;
372} virtual_addr_t;
373
374/* A physical address can be split up into page and offset */
375
376typedef struct pa_t {
377 unsigned int page_no : 20;
378 unsigned int offset : 12;
379} pa_t;
380
381typedef union {
382 unsigned int word;
383 pa_t bits;
384} physical_addr_t;
385
386/*
387 * C-helper inline functions for accessing machine registers follow.
388 */
389
390
391#ifdef __ELF__
392#define __CASMNL__ ";"
393#else
394#define __CASMNL__ "@"
395#endif
396
397/* Return the current GOT pointer */
398
399extern unsigned int get_got(void);
400
401extern __inline__ unsigned int get_got(void)
402{
403 unsigned int result;
404#ifndef __ELF__
405 __asm__ volatile("mr %0, r2" : "=r" (result));
406#else
407 __asm__ volatile("mr %0, 2" : "=r" (result));
408#endif
409 return result;
410}
411
412/*
413 * Various memory/IO synchronisation instructions
414 */
415
416 /* Use eieio as a memory barrier to order stores.
417 * Useful for device control and PTE maintenance.
418 */
419
420#define eieio() \
421 __asm__ volatile("eieio")
422
423 /* Use sync to ensure previous stores have completed.
424 This is required when manipulating locks and/or
425 maintaining PTEs or other shared structures on SMP
426 machines.
427 */
428
429#define sync() \
430 __asm__ volatile("sync")
431
432 /* Use isync to sychronize context; that is, the ensure
433 no prefetching of instructions happen before the
434 instruction.
435 */
436
437#define isync() \
438 __asm__ volatile("isync")
439
440
441/*
442 * This guy will make sure all tlbs on all processors finish their tlbies
443 */
444#define tlbsync() \
445 __asm__ volatile("tlbsync")
446
447
448 /* Invalidate TLB entry. Caution, requires context synchronization.
449 */
450extern void tlbie(unsigned int val);
451
452extern __inline__ void tlbie(unsigned int val)
453{
454 __asm__ volatile("tlbie %0" : : "r" (val));
455 return;
456}
457
458
459
460/*
461 * Access to various system registers
462 */
463
464extern unsigned int mflr(void);
465
466extern __inline__ unsigned int mflr(void)
467{
468 unsigned int result;
469 __asm__ volatile("mflr %0" : "=r" (result));
470 return result;
471}
472
473extern unsigned int mfpvr(void);
474
475extern __inline__ unsigned int mfpvr(void)
476{
477 unsigned int result;
478 __asm__ ("mfpvr %0" : "=r" (result));
479 return result;
480}
481
482/* mtmsr might need syncs etc around it, don't provide simple
483 * inline macro
484 */
485
486extern unsigned int mfmsr(void);
487
488extern __inline__ unsigned int mfmsr(void)
489{
490 unsigned int result;
491 __asm__ volatile("mfmsr %0" : "=r" (result));
492 return result;
493}
494
495/* mtsr and mfsr must be macros since SR must be hardcoded */
496
497#if __ELF__
498#define mtsr(SR, REG) \
499 __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
500#define mfsr(REG, SR) \
501 __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR));
502#else
503#define mtsr(SR, REG) \
504 __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
505
506#define mfsr(REG, SR) \
507 __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR));
508#endif
509
510
511extern void mtsrin(unsigned int val, unsigned int reg);
512
513extern __inline__ void mtsrin(unsigned int val, unsigned int reg)
514{
515 __asm__ volatile("sync" __CASMNL__ "mtsrin %0, %1" __CASMNL__ " isync" : : "r" (val), "r" (reg));
516 return;
517}
518
519extern unsigned int mfsrin(unsigned int reg);
520
521extern __inline__ unsigned int mfsrin(unsigned int reg)
522{
523 unsigned int result;
524 __asm__ volatile("mfsrin %0, %1" : "=r" (result) : "r" (reg));
525 return result;
526}
527
528extern void mtsdr1(unsigned int val);
529
530extern __inline__ void mtsdr1(unsigned int val)
531{
532 __asm__ volatile("mtsdr1 %0" : : "r" (val));
533 return;
534}
535
536extern void mtdar(unsigned int val);
537
538extern __inline__ void mtdar(unsigned int val)
539{
540 __asm__ volatile("mtdar %0" : : "r" (val));
541 return;
542}
543
544extern unsigned int mfdar(void);
545
546extern __inline__ unsigned int mfdar(void)
547{
548 unsigned int result;
549 __asm__ volatile("mfdar %0" : "=r" (result));
550 return result;
551}
552
553extern void mtdec(unsigned int val);
554
555extern __inline__ void mtdec(unsigned int val)
556{
557 __asm__ volatile("mtdec %0" : : "r" (val));
558 return;
559}
560
561extern int isync_mfdec(void);
562
563extern __inline__ int isync_mfdec(void)
564{
565 int result;
566 __asm__ volatile("isync" __CASMNL__ "mfdec %0" : "=r" (result));
567 return result;
568}
569
570/* Read and write the value from the real-time clock
571 * or time base registers. Note that you have to
572 * use the right ones depending upon being on
573 * 601 or 603/604. Care about carries between
574 * the words and using the right registers must be
575 * done by the calling function.
576 */
577
578extern void mttb(unsigned int val);
579
580extern __inline__ void mttb(unsigned int val)
581{
582 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
583 return;
584}
585
586extern unsigned int mftb(void);
587
588extern __inline__ unsigned int mftb(void)
589{
590 unsigned int result;
591 __asm__ volatile("mftb %0" : "=r" (result));
592 return result;
593}
594
595extern void mttbu(unsigned int val);
596
597extern __inline__ void mttbu(unsigned int val)
598{
599 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
600 return;
601}
602
603extern unsigned int mftbu(void);
604
605extern __inline__ unsigned int mftbu(void)
606{
607 unsigned int result;
608 __asm__ volatile("mftbu %0" : "=r" (result));
609 return result;
610}
611
612extern void mtrtcl(unsigned int val);
613
614extern __inline__ void mtrtcl(unsigned int val)
615{
616 __asm__ volatile("mtspr 21,%0" : : "r" (val));
617 return;
618}
619
620extern unsigned int mfrtcl(void);
621
622extern __inline__ unsigned int mfrtcl(void)
623{
624 unsigned int result;
625 __asm__ volatile("mfspr %0,5" : "=r" (result));
626 return result;
627}
628
629extern void mtrtcu(unsigned int val);
630
631extern __inline__ void mtrtcu(unsigned int val)
632{
633 __asm__ volatile("mtspr 20,%0" : : "r" (val));
634 return;
635}
636
637extern unsigned int mfrtcu(void);
638
639extern __inline__ unsigned int mfrtcu(void)
640{
641 unsigned int result;
642 __asm__ volatile("mfspr %0,4" : "=r" (result));
643 return result;
644}
645
646extern void mtl2cr(unsigned int val);
647
648extern __inline__ void mtl2cr(unsigned int val)
649{
650 __asm__ volatile("mtspr l2cr, %0" : : "r" (val));
651 return;
652}
653
654extern unsigned int mfl2cr(void);
655
656extern __inline__ unsigned int mfl2cr(void)
657{
658 unsigned int result;
659 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
660 return result;
661}
662
663extern unsigned int cntlzw(unsigned int num);
664
665extern __inline__ unsigned int cntlzw(unsigned int num)
666{
667 unsigned int result;
668 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
669 return result;
670}
671
672
673/* functions for doing byte reversed loads and stores */
674
675extern unsigned int lwbrx(unsigned int addr);
676
677extern __inline__ unsigned int lwbrx(unsigned int addr)
678{
679 unsigned int result;
680 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
681 return result;
682}
683
684extern void stwbrx(unsigned int data, unsigned int addr);
685
686extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
687{
688 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
689}
690
691/* Performance Monitor Register access routines */
692extern unsigned long mfmmcr0(void);
693extern void mtmmcr0(unsigned long);
694extern unsigned long mfmmcr1(void);
695extern void mtmmcr1(unsigned long);
696extern unsigned long mfmmcr2(void);
697extern void mtmmcr2(unsigned long);
698extern unsigned long mfpmc1(void);
699extern void mtpmc1(unsigned long);
700extern unsigned long mfpmc2(void);
701extern void mtpmc2(unsigned long);
702extern unsigned long mfpmc3(void);
703extern void mtpmc3(unsigned long);
704extern unsigned long mfpmc4(void);
705extern void mtpmc4(unsigned long);
706extern unsigned long mfsia(void);
707extern unsigned long mfsda(void);
708
709/* macros since the argument n is a hard-coded constant */
710
711#define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg))
712#define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg))
713
714#define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg))
715#define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg))
716
717#define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg))
718#define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg))
719
720#define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg))
721#define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg))
722
723#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
724#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
725
726#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
727#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
728
729#endif /* __GNUC__ */
730#endif /* !ASSEMBLER */
731
732#endif /* _PPC_PROC_REG_H_ */