]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/proc_reg.h
79fce25c6d32a92b1133ddc5f4db7e1ff291bda6
[apple/xnu.git] / osfmk / ppc / proc_reg.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 #ifndef _PPC_PROC_REG_H_
30 #define _PPC_PROC_REG_H_
31
32 #include <mach/boolean.h>
33
34 /* Define some useful masks that convert from bit numbers */
35
36 #if __PPC__
37 #if _BIG_ENDIAN
38 #ifndef ENDIAN_MASK
39 #define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
40 #endif
41 #else
42 #error code not ported to little endian targets yet
43 #endif /* _BIG_ENDIAN */
44 #endif /* __PPC__ */
45
46 #define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
47 #define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
48 #define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
49
50 #undef MASK
51 #define MASK(PART) MASK32(PART)
52
53 #define BITS_PER_WORD 32
54 #define BITS_PER_WORD_POW2 5
55
56 /* Defines for decoding the MSR bits */
57
58 #define MSR_SF_BIT 0
59 #define MSR_RES1_BIT 1
60 #define MSR_RES2_BIT 2
61 #define MSR_RES3_BIT 3
62 #define MSR_RES4_BIT 4
63 #define MSR_RES5_BIT 5
64 #define MSR_VEC_BIT 6
65 #define MSR_RES7_BIT 7
66 #define MSR_RES8_BIT 8
67 #define MSR_RES9_BIT 9
68 #define MSR_RES10_BIT 10
69 #define MSR_RES11_BIT 11
70 #define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
71 #define MSR_POW_BIT 13
72 #define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
73 #define MSR_ILE_BIT 15
74 #define MSR_EE_BIT 16
75 #define MSR_PR_BIT 17
76 #define MSR_FP_BIT 18
77 #define MSR_ME_BIT 19
78 #define MSR_FE0_BIT 20
79 #define MSR_SE_BIT 21
80 #define MSR_BE_BIT 22
81 #define MSR_FE1_BIT 23
82 #define MSR_RES24_BIT 24 /* AL bit in power architectures */
83 #define MSR_IP_BIT 25
84 #define MSR_IR_BIT 26
85 #define MSR_DR_BIT 27
86 #define MSR_RES28_BIT 28
87 #define MSR_PM_BIT 29
88 #define MSR_RI_BIT 30
89 #define MSR_LE_BIT 31
90
91 /* MSR for kernel mode, interrupts disabled, running in virtual mode */
92 #define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
93
94 /* MSR for above but with interrupts enabled */
95 #define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
96
97 /* MSR for physical mode code */
98 #define MSR_VM_OFF (MASK(MSR_ME))
99
100 /* MSR for physical instruction, virtual data */
101 #define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
102
103 /* MSR mask for user-exported bits - identify bits that must be set/reset */
104
105 /* SET - external exceptions, machine check, vm on, user-level privs */
106 #define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
107 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
108
109 /* only the following bits may be changed by a task */
110 #define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
111 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
112
113 #define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
114 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
115
116 #define MSR_VEC_ON (MASK(MSR_VEC))
117
118 #define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
119
120 /* seg reg values must be simple expressions so that assembler can cope */
121 #define SEG_REG_INVALID 0x0000
122 #define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
123
124 /* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
125 #define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
126
127 /* SR_COPYIN is used for copyin/copyout+remapping and must be
128 * saved and restored in the thread context.
129 */
130 /* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
131 * the space ID of the currently interrupted user task immediately
132 * after an exception and before interrupts are reenabled. It's used
133 * purely for an assert.
134 */
135
136 /* SR_KERNEL used for asserts... */
137
138 #define SR_COPYIN sr14
139 #define SR_UNUSED_BY_KERN sr13
140 #define SR_KERNEL sr0
141
142 #define SR_UNUSED_BY_KERN_NUM 13
143 #define SR_COPYIN_NAME sr14
144 #define SR_COPYIN_NUM 14
145
146
147 /* DSISR bits on data access exceptions */
148
149 #define DSISR_IO_BIT 0 /* NOT USED on 601 */
150 #define DSISR_HASH_BIT 1
151 #define DSISR_PROT_BIT 4
152 #define DSISR_IO_SPC_BIT 5
153 #define DSISR_WRITE_BIT 6
154 #define DSISR_WATCH_BIT 9
155 #define DSISR_EIO_BIT 11
156
157 /* SRR1 bits on data/instruction translation exceptions */
158
159 #define SRR1_TRANS_HASH_BIT 1
160 #define SRR1_TRANS_IO_BIT 3
161 #define SRR1_TRANS_PROT_BIT 4
162 #define SRR1_TRANS_NO_PTE_BIT 10
163
164 /* SRR1 bits on program exceptions */
165
166 #define SRR1_PRG_FE_BIT 11
167 #define SRR1_PRG_ILL_INS_BIT 12
168 #define SRR1_PRG_PRV_INS_BIT 13
169 #define SRR1_PRG_TRAP_BIT 14
170
171 /* BAT information */
172
173 /* Constants used when setting mask values */
174
175 #define BAT_INVALID 0
176
177 /*
178 * Virtual to physical mapping macros/structures.
179 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
180 */
181
182 #define CACHE_LINE_SIZE 32
183 #define CACHE_LINE_POW2 5
184 #define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1))
185
186 #define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
187 #define PTE1_REFERENCED_BIT 23 /* ditto */
188 #define PTE1_CHANGED_BIT 24
189 #define PTE0_HASH_ID_BIT 25
190
191 #define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */
192 #define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */
193
194 #define PTE_WIMG_CB_CACHED 0 /* cached, writeback */
195 #define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */
196 #define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */
197 #define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */
198 #define PTE_WIMG_UNCACHED 4 /* uncached */
199 #define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */
200 #define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */
201 #define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */
202 #define PTE_WIMG_WT_CACHED 8 /* cached, writethru */
203 #define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */
204 #define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */
205 #define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */
206
207 #define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
208 #define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
209
210
211
212 #ifndef ASSEMBLER
213 #ifdef __GNUC__
214
215 #if _BIG_ENDIAN == 0
216 #error - bitfield structures are not checked for bit ordering in words
217 #endif /* _BIG_ENDIAN */
218
219 /* Structures and types for machine registers */
220
221 typedef union {
222 unsigned int word;
223 struct {
224 unsigned int htaborg : 16;
225 unsigned int reserved : 7;
226 unsigned int htabmask : 9;
227 } bits;
228 } sdr1_t;
229
230 /* Block mapping registers. These values are model dependent.
231 * Eventually, we will need to up these to 64 bit values.
232 */
233
234 #define blokValid 0x1FFE0000
235 #define batMin 0x00020000
236 #define batMax 0x10000000
237 #define batICnt 4
238 #define batDCnt 4
239
240 /* BAT register structures.
241 * Not used for standard mappings, but may be used
242 * for mapping devices. Note that the 601 has a
243 * different BAT layout than the other PowerPC processors
244 */
245
246 typedef union {
247 unsigned int word;
248 struct {
249 unsigned int blpi : 15;
250 unsigned int reserved : 10;
251 unsigned int wim : 3;
252 unsigned int ks : 1;
253 unsigned int ku : 1;
254 unsigned int pp : 2;
255 } bits;
256 } bat601u_t;
257
258 typedef union {
259 unsigned int word;
260 struct {
261 unsigned int pbn : 15;
262 unsigned int reserved : 10;
263 unsigned int valid : 1;
264 unsigned int bsm : 6;
265 } bits;
266 } bat601l_t;
267
268 typedef struct bat601_t {
269 bat601u_t upper;
270 bat601l_t lower;
271 } bat601_t;
272
273 typedef union {
274 unsigned int word;
275 struct {
276 unsigned int bepi : 15;
277 unsigned int reserved : 4;
278 unsigned int bl : 11;
279 unsigned int vs : 1;
280 unsigned int vp : 1;
281 } bits;
282 } batu_t;
283
284 typedef union {
285 unsigned int word;
286 struct {
287 unsigned int brpn : 15;
288 unsigned int reserved : 10;
289 unsigned int wimg : 4;
290 unsigned int reserved2 : 1;
291 unsigned int pp : 2;
292 } bits;
293 } batl_t;
294
295 typedef struct bat_t {
296 batu_t upper;
297 batl_t lower;
298 } bat_t;
299
300 /* PTE entries
301 * Used extensively for standard mappings
302 */
303
304 typedef union {
305 unsigned int word;
306 struct {
307 unsigned int valid : 1;
308 unsigned int segment_id : 24;
309 unsigned int hash_id : 1;
310 unsigned int page_index : 6; /* Abbreviated */
311 } bits;
312 struct {
313 unsigned int valid : 1;
314 unsigned int not_used : 5;
315 unsigned int segment_id : 19; /* Least Sig 19 bits */
316 unsigned int hash_id : 1;
317 unsigned int page_index : 6;
318 } hash_bits;
319 } pte0_t;
320
321 typedef union {
322 unsigned int word;
323 struct {
324 unsigned int phys_page : 20;
325 unsigned int reserved3 : 3;
326 unsigned int referenced : 1;
327 unsigned int changed : 1;
328 unsigned int wimg : 4;
329 unsigned int reserved1 : 1;
330 unsigned int protection : 2;
331 } bits;
332 } pte1_t;
333
334 typedef struct pte_t {
335 pte0_t pte0;
336 pte1_t pte1;
337 } pte_t;
338
339 /*
340 * A virtual address is decoded into various parts when looking for its PTE
341 */
342
343 typedef struct va_full_t {
344 unsigned int seg_num : 4;
345 unsigned int page_index : 16;
346 unsigned int byte_ofs : 12;
347 } va_full_t;
348
349 typedef struct va_abbrev_t { /* use bits.abbrev for abbreviated page index */
350 unsigned int seg_num : 4;
351 unsigned int page_index : 6;
352 unsigned int junk : 10;
353 unsigned int byte_ofs : 12;
354 } va_abbrev_t;
355
356 typedef union {
357 unsigned int word;
358 va_full_t full;
359 va_abbrev_t abbrev;
360 } virtual_addr_t;
361
362 /* A physical address can be split up into page and offset */
363
364 typedef struct pa_t {
365 unsigned int page_no : 20;
366 unsigned int offset : 12;
367 } pa_t;
368
369 typedef union {
370 unsigned int word;
371 pa_t bits;
372 } physical_addr_t;
373
374 /*
375 * C-helper inline functions for accessing machine registers follow.
376 */
377
378
379 #ifdef __ELF__
380 #define __CASMNL__ ";"
381 #else
382 #define __CASMNL__ "@"
383 #endif
384
385 /* Return the current GOT pointer */
386
387 extern unsigned int get_got(void);
388
389 extern __inline__ unsigned int get_got(void)
390 {
391 unsigned int result;
392 #ifndef __ELF__
393 __asm__ volatile("mr %0, r2" : "=r" (result));
394 #else
395 __asm__ volatile("mr %0, 2" : "=r" (result));
396 #endif
397 return result;
398 }
399
400 /*
401 * Various memory/IO synchronisation instructions
402 */
403
404 /* Use eieio as a memory barrier to order stores.
405 * Useful for device control and PTE maintenance.
406 */
407
408 #define eieio() \
409 __asm__ volatile("eieio")
410
411 /* Use sync to ensure previous stores have completed.
412 This is required when manipulating locks and/or
413 maintaining PTEs or other shared structures on SMP
414 machines.
415 */
416
417 #define sync() \
418 __asm__ volatile("sync")
419
420 /* Use isync to sychronize context; that is, the ensure
421 no prefetching of instructions happen before the
422 instruction.
423 */
424
425 #define isync() \
426 __asm__ volatile("isync")
427
428
429 /*
430 * This guy will make sure all tlbs on all processors finish their tlbies
431 */
432 #define tlbsync() \
433 __asm__ volatile("tlbsync")
434
435
436 /* Invalidate TLB entry. Caution, requires context synchronization.
437 */
438 extern void tlbie(unsigned int val);
439
440 extern __inline__ void tlbie(unsigned int val)
441 {
442 __asm__ volatile("tlbie %0" : : "r" (val));
443 return;
444 }
445
446
447
448 /*
449 * Access to various system registers
450 */
451
452 extern unsigned int mflr(void);
453
454 extern __inline__ unsigned int mflr(void)
455 {
456 unsigned int result;
457 __asm__ volatile("mflr %0" : "=r" (result));
458 return result;
459 }
460
461 extern unsigned int mfpvr(void);
462
463 extern __inline__ unsigned int mfpvr(void)
464 {
465 unsigned int result;
466 __asm__ ("mfpvr %0" : "=r" (result));
467 return result;
468 }
469
470 /* mtmsr might need syncs etc around it, don't provide simple
471 * inline macro
472 */
473
474 extern unsigned int mfmsr(void);
475
476 extern __inline__ unsigned int mfmsr(void)
477 {
478 unsigned int result;
479 __asm__ volatile("mfmsr %0" : "=r" (result));
480 return result;
481 }
482
483 /* mtsr and mfsr must be macros since SR must be hardcoded */
484
485 #if __ELF__
486 #define mtsr(SR, REG) \
487 __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
488 #define mfsr(REG, SR) \
489 __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR));
490 #else
491 #define mtsr(SR, REG) \
492 __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
493
494 #define mfsr(REG, SR) \
495 __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR));
496 #endif
497
498
499 extern void mtsrin(unsigned int val, unsigned int reg);
500
501 extern __inline__ void mtsrin(unsigned int val, unsigned int reg)
502 {
503 __asm__ volatile("sync" __CASMNL__ "mtsrin %0, %1" __CASMNL__ " isync" : : "r" (val), "r" (reg));
504 return;
505 }
506
507 extern unsigned int mfsrin(unsigned int reg);
508
509 extern __inline__ unsigned int mfsrin(unsigned int reg)
510 {
511 unsigned int result;
512 __asm__ volatile("mfsrin %0, %1" : "=r" (result) : "r" (reg));
513 return result;
514 }
515
516 extern void mtsdr1(unsigned int val);
517
518 extern __inline__ void mtsdr1(unsigned int val)
519 {
520 __asm__ volatile("mtsdr1 %0" : : "r" (val));
521 return;
522 }
523
524 extern void mtdar(unsigned int val);
525
526 extern __inline__ void mtdar(unsigned int val)
527 {
528 __asm__ volatile("mtdar %0" : : "r" (val));
529 return;
530 }
531
532 extern unsigned int mfdar(void);
533
534 extern __inline__ unsigned int mfdar(void)
535 {
536 unsigned int result;
537 __asm__ volatile("mfdar %0" : "=r" (result));
538 return result;
539 }
540
541 extern void mtdec(unsigned int val);
542
543 extern __inline__ void mtdec(unsigned int val)
544 {
545 __asm__ volatile("mtdec %0" : : "r" (val));
546 return;
547 }
548
549 extern int isync_mfdec(void);
550
551 extern __inline__ int isync_mfdec(void)
552 {
553 int result;
554 __asm__ volatile("isync" __CASMNL__ "mfdec %0" : "=r" (result));
555 return result;
556 }
557
558 /* Read and write the value from the real-time clock
559 * or time base registers. Note that you have to
560 * use the right ones depending upon being on
561 * 601 or 603/604. Care about carries between
562 * the words and using the right registers must be
563 * done by the calling function.
564 */
565
566 extern void mttb(unsigned int val);
567
568 extern __inline__ void mttb(unsigned int val)
569 {
570 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
571 return;
572 }
573
574 extern unsigned int mftb(void);
575
576 extern __inline__ unsigned int mftb(void)
577 {
578 unsigned int result;
579 __asm__ volatile("mftb %0" : "=r" (result));
580 return result;
581 }
582
583 extern void mttbu(unsigned int val);
584
585 extern __inline__ void mttbu(unsigned int val)
586 {
587 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
588 return;
589 }
590
591 extern unsigned int mftbu(void);
592
593 extern __inline__ unsigned int mftbu(void)
594 {
595 unsigned int result;
596 __asm__ volatile("mftbu %0" : "=r" (result));
597 return result;
598 }
599
600 extern void mtrtcl(unsigned int val);
601
602 extern __inline__ void mtrtcl(unsigned int val)
603 {
604 __asm__ volatile("mtspr 21,%0" : : "r" (val));
605 return;
606 }
607
608 extern unsigned int mfrtcl(void);
609
610 extern __inline__ unsigned int mfrtcl(void)
611 {
612 unsigned int result;
613 __asm__ volatile("mfspr %0,5" : "=r" (result));
614 return result;
615 }
616
617 extern void mtrtcu(unsigned int val);
618
619 extern __inline__ void mtrtcu(unsigned int val)
620 {
621 __asm__ volatile("mtspr 20,%0" : : "r" (val));
622 return;
623 }
624
625 extern unsigned int mfrtcu(void);
626
627 extern __inline__ unsigned int mfrtcu(void)
628 {
629 unsigned int result;
630 __asm__ volatile("mfspr %0,4" : "=r" (result));
631 return result;
632 }
633
634 extern void mtl2cr(unsigned int val);
635
636 extern __inline__ void mtl2cr(unsigned int val)
637 {
638 __asm__ volatile("mtspr l2cr, %0" : : "r" (val));
639 return;
640 }
641
642 extern unsigned int mfl2cr(void);
643
644 extern __inline__ unsigned int mfl2cr(void)
645 {
646 unsigned int result;
647 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
648 return result;
649 }
650
651 extern unsigned int cntlzw(unsigned int num);
652
653 extern __inline__ unsigned int cntlzw(unsigned int num)
654 {
655 unsigned int result;
656 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
657 return result;
658 }
659
660
661 /* functions for doing byte reversed loads and stores */
662
663 extern unsigned int lwbrx(unsigned int addr);
664
665 extern __inline__ unsigned int lwbrx(unsigned int addr)
666 {
667 unsigned int result;
668 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
669 return result;
670 }
671
672 extern void stwbrx(unsigned int data, unsigned int addr);
673
674 extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
675 {
676 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
677 }
678
679 /* Performance Monitor Register access routines */
680 extern unsigned long mfmmcr0(void);
681 extern void mtmmcr0(unsigned long);
682 extern unsigned long mfmmcr1(void);
683 extern void mtmmcr1(unsigned long);
684 extern unsigned long mfmmcr2(void);
685 extern void mtmmcr2(unsigned long);
686 extern unsigned long mfpmc1(void);
687 extern void mtpmc1(unsigned long);
688 extern unsigned long mfpmc2(void);
689 extern void mtpmc2(unsigned long);
690 extern unsigned long mfpmc3(void);
691 extern void mtpmc3(unsigned long);
692 extern unsigned long mfpmc4(void);
693 extern void mtpmc4(unsigned long);
694 extern unsigned long mfsia(void);
695 extern unsigned long mfsda(void);
696
697 /* macros since the argument n is a hard-coded constant */
698
699 #define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg))
700 #define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg))
701
702 #define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg))
703 #define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg))
704
705 #define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg))
706 #define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg))
707
708 #define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg))
709 #define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg))
710
711 #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
712 #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
713
714 #define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
715 #define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
716
717 #endif /* __GNUC__ */
718 #endif /* !ASSEMBLER */
719
720 #endif /* _PPC_PROC_REG_H_ */