]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/proc_reg.h
xnu-344.32.tar.gz
[apple/xnu.git] / osfmk / ppc / proc_reg.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
26#ifndef _PPC_PROC_REG_H_
27#define _PPC_PROC_REG_H_
28
29#include <mach/boolean.h>
30
31/* Define some useful masks that convert from bit numbers */
32
33#if __PPC__
34#if _BIG_ENDIAN
35#ifndef ENDIAN_MASK
36#define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
37#endif
38#else
39#error code not ported to little endian targets yet
40#endif /* _BIG_ENDIAN */
41#endif /* __PPC__ */
42
43#define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
44#define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
45#define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
46
47#undef MASK
48#define MASK(PART) MASK32(PART)
49
50#define BITS_PER_WORD 32
51#define BITS_PER_WORD_POW2 5
52
53/* Defines for decoding the MSR bits */
54
de355530 55#define MSR_SF_BIT 0
1c79356b
A
56#define MSR_RES1_BIT 1
57#define MSR_RES2_BIT 2
58#define MSR_RES3_BIT 3
59#define MSR_RES4_BIT 4
60#define MSR_RES5_BIT 5
61#define MSR_VEC_BIT 6
62#define MSR_RES7_BIT 7
63#define MSR_RES8_BIT 8
64#define MSR_RES9_BIT 9
65#define MSR_RES10_BIT 10
66#define MSR_RES11_BIT 11
67#define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
68#define MSR_POW_BIT 13
69#define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
70#define MSR_ILE_BIT 15
71#define MSR_EE_BIT 16
72#define MSR_PR_BIT 17
73#define MSR_FP_BIT 18
74#define MSR_ME_BIT 19
75#define MSR_FE0_BIT 20
76#define MSR_SE_BIT 21
77#define MSR_BE_BIT 22
78#define MSR_FE1_BIT 23
79#define MSR_RES24_BIT 24 /* AL bit in power architectures */
80#define MSR_IP_BIT 25
81#define MSR_IR_BIT 26
82#define MSR_DR_BIT 27
83#define MSR_RES28_BIT 28
84#define MSR_PM_BIT 29
85#define MSR_RI_BIT 30
86#define MSR_LE_BIT 31
87
88/* MSR for kernel mode, interrupts disabled, running in virtual mode */
89#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
90
91/* MSR for above but with interrupts enabled */
92#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
93
94/* MSR for physical mode code */
95#define MSR_VM_OFF (MASK(MSR_ME))
96
97/* MSR for physical instruction, virtual data */
98#define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
99
100/* MSR mask for user-exported bits - identify bits that must be set/reset */
101
102/* SET - external exceptions, machine check, vm on, user-level privs */
103#define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
104 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
105
106/* only the following bits may be changed by a task */
107#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
108 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
109
110#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
111 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
112
113#define MSR_VEC_ON (MASK(MSR_VEC))
114
115#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
116
117/* seg reg values must be simple expressions so that assembler can cope */
118#define SEG_REG_INVALID 0x0000
119#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
120
1c79356b
A
121/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
122#define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
123
124/* SR_COPYIN is used for copyin/copyout+remapping and must be
125 * saved and restored in the thread context.
126 */
127/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
128 * the space ID of the currently interrupted user task immediately
129 * after an exception and before interrupts are reenabled. It's used
130 * purely for an assert.
131 */
132
133/* SR_KERNEL used for asserts... */
134
135#define SR_COPYIN sr14
136#define SR_UNUSED_BY_KERN sr13
137#define SR_KERNEL sr0
138
139#define SR_UNUSED_BY_KERN_NUM 13
140#define SR_COPYIN_NAME sr14
141#define SR_COPYIN_NUM 14
142
143
144/* DSISR bits on data access exceptions */
145
146#define DSISR_IO_BIT 0 /* NOT USED on 601 */
147#define DSISR_HASH_BIT 1
148#define DSISR_PROT_BIT 4
149#define DSISR_IO_SPC_BIT 5
150#define DSISR_WRITE_BIT 6
151#define DSISR_WATCH_BIT 9
152#define DSISR_EIO_BIT 11
153
154/* SRR1 bits on data/instruction translation exceptions */
155
156#define SRR1_TRANS_HASH_BIT 1
157#define SRR1_TRANS_IO_BIT 3
158#define SRR1_TRANS_PROT_BIT 4
159#define SRR1_TRANS_NO_PTE_BIT 10
160
161/* SRR1 bits on program exceptions */
162
163#define SRR1_PRG_FE_BIT 11
164#define SRR1_PRG_ILL_INS_BIT 12
165#define SRR1_PRG_PRV_INS_BIT 13
166#define SRR1_PRG_TRAP_BIT 14
167
de355530
A
168/* BAT information */
169
170/* Constants used when setting mask values */
171
172#define BAT_INVALID 0
173
1c79356b
A
174/*
175 * Virtual to physical mapping macros/structures.
176 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
177 */
178
de355530
A
179#define CACHE_LINE_SIZE 32
180#define CACHE_LINE_POW2 5
181#define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1))
182
1c79356b
A
183#define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
184#define PTE1_REFERENCED_BIT 23 /* ditto */
185#define PTE1_CHANGED_BIT 24
186#define PTE0_HASH_ID_BIT 25
187
de355530
A
188#define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */
189#define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */
190
191#define PTE_WIMG_CB_CACHED 0 /* cached, writeback */
192#define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */
193#define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */
194#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */
195#define PTE_WIMG_UNCACHED 4 /* uncached */
196#define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */
197#define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */
198#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */
199#define PTE_WIMG_WT_CACHED 8 /* cached, writethru */
200#define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */
201#define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */
202#define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */
9bccf70c
A
203
204#define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
205#define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
206
207
1c79356b 208
1c79356b
A
209#ifndef ASSEMBLER
210#ifdef __GNUC__
211
de355530
A
212#if _BIG_ENDIAN == 0
213#error - bitfield structures are not checked for bit ordering in words
214#endif /* _BIG_ENDIAN */
215
1c79356b
A
216/* Structures and types for machine registers */
217
de355530
A
218typedef union {
219 unsigned int word;
220 struct {
221 unsigned int htaborg : 16;
222 unsigned int reserved : 7;
223 unsigned int htabmask : 9;
224 } bits;
225} sdr1_t;
226
227/* Block mapping registers. These values are model dependent.
228 * Eventually, we will need to up these to 64 bit values.
229 */
230
231#define blokValid 0x1FFE0000
232#define batMin 0x00020000
233#define batMax 0x10000000
234#define batICnt 4
235#define batDCnt 4
236
237/* BAT register structures.
238 * Not used for standard mappings, but may be used
239 * for mapping devices. Note that the 601 has a
240 * different BAT layout than the other PowerPC processors
241 */
242
243typedef union {
244 unsigned int word;
245 struct {
246 unsigned int blpi : 15;
247 unsigned int reserved : 10;
248 unsigned int wim : 3;
249 unsigned int ks : 1;
250 unsigned int ku : 1;
251 unsigned int pp : 2;
252 } bits;
253} bat601u_t;
254
255typedef union {
256 unsigned int word;
257 struct {
258 unsigned int pbn : 15;
259 unsigned int reserved : 10;
260 unsigned int valid : 1;
261 unsigned int bsm : 6;
262 } bits;
263} bat601l_t;
264
265typedef struct bat601_t {
266 bat601u_t upper;
267 bat601l_t lower;
268} bat601_t;
269
270typedef union {
271 unsigned int word;
272 struct {
273 unsigned int bepi : 15;
274 unsigned int reserved : 4;
275 unsigned int bl : 11;
276 unsigned int vs : 1;
277 unsigned int vp : 1;
278 } bits;
279} batu_t;
280
281typedef union {
282 unsigned int word;
283 struct {
284 unsigned int brpn : 15;
285 unsigned int reserved : 10;
286 unsigned int wimg : 4;
287 unsigned int reserved2 : 1;
288 unsigned int pp : 2;
289 } bits;
290} batl_t;
291
292typedef struct bat_t {
293 batu_t upper;
294 batl_t lower;
295} bat_t;
296
297/* PTE entries
298 * Used extensively for standard mappings
299 */
300
301typedef union {
302 unsigned int word;
303 struct {
304 unsigned int valid : 1;
305 unsigned int segment_id : 24;
306 unsigned int hash_id : 1;
307 unsigned int page_index : 6; /* Abbreviated */
308 } bits;
309 struct {
310 unsigned int valid : 1;
311 unsigned int not_used : 5;
312 unsigned int segment_id : 19; /* Least Sig 19 bits */
313 unsigned int hash_id : 1;
314 unsigned int page_index : 6;
315 } hash_bits;
316} pte0_t;
317
318typedef union {
319 unsigned int word;
320 struct {
321 unsigned int phys_page : 20;
322 unsigned int reserved3 : 3;
323 unsigned int referenced : 1;
324 unsigned int changed : 1;
325 unsigned int wimg : 4;
326 unsigned int reserved1 : 1;
327 unsigned int protection : 2;
328 } bits;
329} pte1_t;
330
331typedef struct pte_t {
332 pte0_t pte0;
333 pte1_t pte1;
334} pte_t;
335
336/*
337 * A virtual address is decoded into various parts when looking for its PTE
338 */
339
340typedef struct va_full_t {
341 unsigned int seg_num : 4;
342 unsigned int page_index : 16;
343 unsigned int byte_ofs : 12;
344} va_full_t;
345
346typedef struct va_abbrev_t { /* use bits.abbrev for abbreviated page index */
347 unsigned int seg_num : 4;
348 unsigned int page_index : 6;
349 unsigned int junk : 10;
350 unsigned int byte_ofs : 12;
351} va_abbrev_t;
352
353typedef union {
354 unsigned int word;
355 va_full_t full;
356 va_abbrev_t abbrev;
357} virtual_addr_t;
358
359/* A physical address can be split up into page and offset */
360
361typedef struct pa_t {
362 unsigned int page_no : 20;
363 unsigned int offset : 12;
364} pa_t;
365
366typedef union {
367 unsigned int word;
368 pa_t bits;
369} physical_addr_t;
1c79356b
A
370
371/*
372 * C-helper inline functions for accessing machine registers follow.
373 */
374
375
de355530
A
376#ifdef __ELF__
377#define __CASMNL__ ";"
378#else
379#define __CASMNL__ "@"
380#endif
381
382/* Return the current GOT pointer */
383
384extern unsigned int get_got(void);
385
386extern __inline__ unsigned int get_got(void)
387{
388 unsigned int result;
389#ifndef __ELF__
390 __asm__ volatile("mr %0, r2" : "=r" (result));
391#else
392 __asm__ volatile("mr %0, 2" : "=r" (result));
393#endif
394 return result;
395}
396
1c79356b
A
397/*
398 * Various memory/IO synchronisation instructions
399 */
400
401 /* Use eieio as a memory barrier to order stores.
402 * Useful for device control and PTE maintenance.
403 */
404
405#define eieio() \
406 __asm__ volatile("eieio")
407
408 /* Use sync to ensure previous stores have completed.
409 This is required when manipulating locks and/or
410 maintaining PTEs or other shared structures on SMP
411 machines.
412 */
413
414#define sync() \
415 __asm__ volatile("sync")
416
417 /* Use isync to sychronize context; that is, the ensure
418 no prefetching of instructions happen before the
419 instruction.
420 */
421
422#define isync() \
423 __asm__ volatile("isync")
424
425
de355530
A
426/*
427 * This guy will make sure all tlbs on all processors finish their tlbies
428 */
429#define tlbsync() \
430 __asm__ volatile("tlbsync")
431
432
433 /* Invalidate TLB entry. Caution, requires context synchronization.
434 */
435extern void tlbie(unsigned int val);
436
437extern __inline__ void tlbie(unsigned int val)
438{
439 __asm__ volatile("tlbie %0" : : "r" (val));
440 return;
441}
442
443
444
1c79356b
A
445/*
446 * Access to various system registers
447 */
448
449extern unsigned int mflr(void);
450
451extern __inline__ unsigned int mflr(void)
452{
453 unsigned int result;
454 __asm__ volatile("mflr %0" : "=r" (result));
455 return result;
456}
457
458extern unsigned int mfpvr(void);
459
460extern __inline__ unsigned int mfpvr(void)
461{
462 unsigned int result;
463 __asm__ ("mfpvr %0" : "=r" (result));
464 return result;
465}
466
467/* mtmsr might need syncs etc around it, don't provide simple
468 * inline macro
469 */
470
471extern unsigned int mfmsr(void);
472
473extern __inline__ unsigned int mfmsr(void)
474{
475 unsigned int result;
476 __asm__ volatile("mfmsr %0" : "=r" (result));
477 return result;
478}
479
de355530
A
480/* mtsr and mfsr must be macros since SR must be hardcoded */
481
482#if __ELF__
483#define mtsr(SR, REG) \
484 __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
485#define mfsr(REG, SR) \
486 __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR));
487#else
488#define mtsr(SR, REG) \
489 __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
490
491#define mfsr(REG, SR) \
492 __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR));
493#endif
494
495
496extern void mtsrin(unsigned int val, unsigned int reg);
497
498extern __inline__ void mtsrin(unsigned int val, unsigned int reg)
499{
500 __asm__ volatile("sync" __CASMNL__ "mtsrin %0, %1" __CASMNL__ " isync" : : "r" (val), "r" (reg));
501 return;
502}
503
504extern unsigned int mfsrin(unsigned int reg);
505
506extern __inline__ unsigned int mfsrin(unsigned int reg)
507{
508 unsigned int result;
509 __asm__ volatile("mfsrin %0, %1" : "=r" (result) : "r" (reg));
510 return result;
511}
512
513extern void mtsdr1(unsigned int val);
514
515extern __inline__ void mtsdr1(unsigned int val)
516{
517 __asm__ volatile("mtsdr1 %0" : : "r" (val));
518 return;
519}
520
521extern void mtdar(unsigned int val);
522
523extern __inline__ void mtdar(unsigned int val)
524{
525 __asm__ volatile("mtdar %0" : : "r" (val));
526 return;
527}
1c79356b
A
528
529extern unsigned int mfdar(void);
530
531extern __inline__ unsigned int mfdar(void)
532{
533 unsigned int result;
534 __asm__ volatile("mfdar %0" : "=r" (result));
535 return result;
536}
537
538extern void mtdec(unsigned int val);
539
540extern __inline__ void mtdec(unsigned int val)
541{
542 __asm__ volatile("mtdec %0" : : "r" (val));
543 return;
544}
545
de355530
A
546extern int isync_mfdec(void);
547
548extern __inline__ int isync_mfdec(void)
549{
550 int result;
551 __asm__ volatile("isync" __CASMNL__ "mfdec %0" : "=r" (result));
552 return result;
553}
554
555/* Read and write the value from the real-time clock
556 * or time base registers. Note that you have to
557 * use the right ones depending upon being on
558 * 601 or 603/604. Care about carries between
559 * the words and using the right registers must be
560 * done by the calling function.
561 */
562
1c79356b
A
563extern void mttb(unsigned int val);
564
565extern __inline__ void mttb(unsigned int val)
566{
567 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
568 return;
569}
570
571extern unsigned int mftb(void);
572
573extern __inline__ unsigned int mftb(void)
574{
575 unsigned int result;
576 __asm__ volatile("mftb %0" : "=r" (result));
577 return result;
578}
579
580extern void mttbu(unsigned int val);
581
582extern __inline__ void mttbu(unsigned int val)
583{
584 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
585 return;
586}
587
588extern unsigned int mftbu(void);
589
590extern __inline__ unsigned int mftbu(void)
591{
592 unsigned int result;
593 __asm__ volatile("mftbu %0" : "=r" (result));
594 return result;
595}
596
de355530
A
597extern void mtrtcl(unsigned int val);
598
599extern __inline__ void mtrtcl(unsigned int val)
600{
601 __asm__ volatile("mtspr 21,%0" : : "r" (val));
602 return;
603}
604
605extern unsigned int mfrtcl(void);
606
607extern __inline__ unsigned int mfrtcl(void)
608{
609 unsigned int result;
610 __asm__ volatile("mfspr %0,5" : "=r" (result));
611 return result;
612}
613
614extern void mtrtcu(unsigned int val);
615
616extern __inline__ void mtrtcu(unsigned int val)
617{
618 __asm__ volatile("mtspr 20,%0" : : "r" (val));
619 return;
620}
621
622extern unsigned int mfrtcu(void);
623
624extern __inline__ unsigned int mfrtcu(void)
625{
626 unsigned int result;
627 __asm__ volatile("mfspr %0,4" : "=r" (result));
628 return result;
629}
630
631extern void mtl2cr(unsigned int val);
632
633extern __inline__ void mtl2cr(unsigned int val)
634{
635 __asm__ volatile("mtspr l2cr, %0" : : "r" (val));
636 return;
637}
638
1c79356b
A
639extern unsigned int mfl2cr(void);
640
641extern __inline__ unsigned int mfl2cr(void)
642{
643 unsigned int result;
644 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
645 return result;
646}
647
648extern unsigned int cntlzw(unsigned int num);
649
650extern __inline__ unsigned int cntlzw(unsigned int num)
651{
652 unsigned int result;
653 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
654 return result;
655}
656
657
658/* functions for doing byte reversed loads and stores */
659
660extern unsigned int lwbrx(unsigned int addr);
661
662extern __inline__ unsigned int lwbrx(unsigned int addr)
663{
664 unsigned int result;
665 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
666 return result;
667}
668
669extern void stwbrx(unsigned int data, unsigned int addr);
670
671extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
672{
673 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
674}
675
676/* Performance Monitor Register access routines */
677extern unsigned long mfmmcr0(void);
678extern void mtmmcr0(unsigned long);
679extern unsigned long mfmmcr1(void);
680extern void mtmmcr1(unsigned long);
681extern unsigned long mfmmcr2(void);
682extern void mtmmcr2(unsigned long);
683extern unsigned long mfpmc1(void);
684extern void mtpmc1(unsigned long);
685extern unsigned long mfpmc2(void);
686extern void mtpmc2(unsigned long);
687extern unsigned long mfpmc3(void);
688extern void mtpmc3(unsigned long);
689extern unsigned long mfpmc4(void);
690extern void mtpmc4(unsigned long);
691extern unsigned long mfsia(void);
692extern unsigned long mfsda(void);
693
694/* macros since the argument n is a hard-coded constant */
695
de355530
A
696#define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg))
697#define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg))
698
699#define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg))
700#define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg))
701
702#define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg))
703#define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg))
704
705#define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg))
706#define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg))
707
1c79356b
A
708#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
709#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
710
711#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
712#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
713
714#endif /* __GNUC__ */
715#endif /* !ASSEMBLER */
716
717#endif /* _PPC_PROC_REG_H_ */