]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/proc_reg.h
68fb49dba2d78f1a78a3c5816941e46de4aeed4e
[apple/xnu.git] / osfmk / ppc / proc_reg.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #ifndef _PPC_PROC_REG_H_
27 #define _PPC_PROC_REG_H_
28
29 #include <mach/boolean.h>
30
31 /* Define some useful masks that convert from bit numbers */
32
33 #if __PPC__
34 #if _BIG_ENDIAN
35 #ifndef ENDIAN_MASK
36 #define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
37 #endif
38 #else
39 #error code not ported to little endian targets yet
40 #endif /* _BIG_ENDIAN */
41 #endif /* __PPC__ */
42
43 #define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32)
44 #define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16)
45 #define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8)
46
47 #undef MASK
48 #define MASK(PART) MASK32(PART)
49
50 #define BITS_PER_WORD 32
51 #define BITS_PER_WORD_POW2 5
52
53 /* Defines for decoding the MSR bits */
54
55 #define MSR_SF_BIT 0
56 #define MSR_RES1_BIT 1
57 #define MSR_RES2_BIT 2
58 #define MSR_RES3_BIT 3
59 #define MSR_RES4_BIT 4
60 #define MSR_RES5_BIT 5
61 #define MSR_VEC_BIT 6
62 #define MSR_RES7_BIT 7
63 #define MSR_RES8_BIT 8
64 #define MSR_RES9_BIT 9
65 #define MSR_RES10_BIT 10
66 #define MSR_RES11_BIT 11
67 #define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */
68 #define MSR_POW_BIT 13
69 #define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */
70 #define MSR_ILE_BIT 15
71 #define MSR_EE_BIT 16
72 #define MSR_PR_BIT 17
73 #define MSR_FP_BIT 18
74 #define MSR_ME_BIT 19
75 #define MSR_FE0_BIT 20
76 #define MSR_SE_BIT 21
77 #define MSR_BE_BIT 22
78 #define MSR_FE1_BIT 23
79 #define MSR_RES24_BIT 24 /* AL bit in power architectures */
80 #define MSR_IP_BIT 25
81 #define MSR_IR_BIT 26
82 #define MSR_DR_BIT 27
83 #define MSR_RES28_BIT 28
84 #define MSR_PM_BIT 29
85 #define MSR_RI_BIT 30
86 #define MSR_LE_BIT 31
87
88 /* MSR for kernel mode, interrupts disabled, running in virtual mode */
89 #define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
90
91 /* MSR for above but with interrupts enabled */
92 #define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
93
94 /* MSR for physical mode code */
95 #define MSR_VM_OFF (MASK(MSR_ME))
96
97 /* MSR for physical instruction, virtual data */
98 #define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR))
99
100 /* MSR mask for user-exported bits - identify bits that must be set/reset */
101
102 /* SET - external exceptions, machine check, vm on, user-level privs */
103 #define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \
104 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
105
106 /* only the following bits may be changed by a task */
107 #define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
108 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
109
110 #define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
111 ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
112
113 #define MSR_VEC_ON (MASK(MSR_VEC))
114
115 #define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
116
117 /* seg reg values must be simple expressions so that assembler can cope */
118 #define SEG_REG_INVALID 0x0000
119 #define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
120
121 /* the following segment register values are only used prior to the probe,
122 * they map the various device areas 1-1 on 601 machines
123 */
124 #define KERNEL_SEG_REG5_VALUE 0xa7F00005 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=5 */
125 #define KERNEL_SEG_REG8_VALUE 0xa7F00008 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=8 */
126 #define KERNEL_SEG_REG9_VALUE 0xa7F00009 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=9 */
127 #define KERNEL_SEG_REG10_VALUE 0xa7F0000a /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=a */
128 #define KERNEL_SEG_REG11_VALUE 0xa7F0000b /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=b */
129 #define KERNEL_SEG_REG12_VALUE 0xa7F0000c /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=c */
130 #define KERNEL_SEG_REG13_VALUE 0xa7F0000d /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=d */
131 #define KERNEL_SEG_REG14_VALUE 0xa7F0000e /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=e */
132 #define KERNEL_SEG_REG15_VALUE 0xa7F0000f /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=f */
133
134 /* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
135 #define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */
136
137 /* SR_COPYIN is used for copyin/copyout+remapping and must be
138 * saved and restored in the thread context.
139 */
140 /* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
141 * the space ID of the currently interrupted user task immediately
142 * after an exception and before interrupts are reenabled. It's used
143 * purely for an assert.
144 */
145
146 /* SR_KERNEL used for asserts... */
147
148 #define SR_COPYIN sr14
149 #define SR_UNUSED_BY_KERN sr13
150 #define SR_KERNEL sr0
151
152 #define SR_UNUSED_BY_KERN_NUM 13
153 #define SR_COPYIN_NAME sr14
154 #define SR_COPYIN_NUM 14
155
156
157 /* DSISR bits on data access exceptions */
158
159 #define DSISR_IO_BIT 0 /* NOT USED on 601 */
160 #define DSISR_HASH_BIT 1
161 #define DSISR_PROT_BIT 4
162 #define DSISR_IO_SPC_BIT 5
163 #define DSISR_WRITE_BIT 6
164 #define DSISR_WATCH_BIT 9
165 #define DSISR_EIO_BIT 11
166
167 /* SRR1 bits on data/instruction translation exceptions */
168
169 #define SRR1_TRANS_HASH_BIT 1
170 #define SRR1_TRANS_IO_BIT 3
171 #define SRR1_TRANS_PROT_BIT 4
172 #define SRR1_TRANS_NO_PTE_BIT 10
173
174 /* SRR1 bits on program exceptions */
175
176 #define SRR1_PRG_FE_BIT 11
177 #define SRR1_PRG_ILL_INS_BIT 12
178 #define SRR1_PRG_PRV_INS_BIT 13
179 #define SRR1_PRG_TRAP_BIT 14
180
181 /* BAT information */
182
183 /* Constants used when setting mask values */
184
185 #define BAT_INVALID 0
186
187 /*
188 * Virtual to physical mapping macros/structures.
189 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
190 */
191
192 #define CACHE_LINE_SIZE 32
193 #define CACHE_LINE_POW2 5
194 #define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1))
195
196 #define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */
197 #define PTE1_REFERENCED_BIT 23 /* ditto */
198 #define PTE1_CHANGED_BIT 24
199 #define PTE0_HASH_ID_BIT 25
200
201 #define PPC_HASHSIZE 2048 /* size of hash table */
202 #define PPC_HASHSIZE_LOG2 11
203 #define PPC_MIN_MPP 2 /* min # of mappings per phys page */
204
205 /* macros to help decide processor type */
206 #define PROCESSOR_VERSION_601 1
207 #define PROCESSOR_VERSION_603 3
208 #define PROCESSOR_VERSION_604 4
209 #define PROCESSOR_VERSION_603e 6
210 #define PROCESSOR_VERSION_750 8
211 #define PROCESSOR_VERSION_604e 9
212 #define PROCESSOR_VERSION_604ev 10 /* ? */
213 #define PROCESSOR_VERSION_7400 12 /* ? */
214 #define PROCESSOR_VERSION_7410 0x800C /* ? */
215 #define PROCESSOR_VERSION_7450 0x8000 /* ? */
216
217 #ifndef ASSEMBLER
218 #ifdef __GNUC__
219
220 #if _BIG_ENDIAN == 0
221 #error - bitfield structures are not checked for bit ordering in words
222 #endif /* _BIG_ENDIAN */
223
224 /* Structures and types for machine registers */
225
226 typedef union {
227 unsigned int word;
228 struct {
229 unsigned int htaborg : 16;
230 unsigned int reserved : 7;
231 unsigned int htabmask : 9;
232 } bits;
233 } sdr1_t;
234
235 /* Block mapping registers. These values are model dependent.
236 * Eventually, we will need to up these to 64 bit values.
237 */
238
239 #define blokValid 0x1FFE0000
240 #define batMin 0x00020000
241 #define batMax 0x10000000
242 #define batICnt 4
243 #define batDCnt 4
244
245 /* BAT register structures.
246 * Not used for standard mappings, but may be used
247 * for mapping devices. Note that the 601 has a
248 * different BAT layout than the other PowerPC processors
249 */
250
251 typedef union {
252 unsigned int word;
253 struct {
254 unsigned int blpi : 15;
255 unsigned int reserved : 10;
256 unsigned int wim : 3;
257 unsigned int ks : 1;
258 unsigned int ku : 1;
259 unsigned int pp : 2;
260 } bits;
261 } bat601u_t;
262
263 typedef union {
264 unsigned int word;
265 struct {
266 unsigned int pbn : 15;
267 unsigned int reserved : 10;
268 unsigned int valid : 1;
269 unsigned int bsm : 6;
270 } bits;
271 } bat601l_t;
272
273 typedef struct bat601_t {
274 bat601u_t upper;
275 bat601l_t lower;
276 } bat601_t;
277
278 typedef union {
279 unsigned int word;
280 struct {
281 unsigned int bepi : 15;
282 unsigned int reserved : 4;
283 unsigned int bl : 11;
284 unsigned int vs : 1;
285 unsigned int vp : 1;
286 } bits;
287 } batu_t;
288
289 typedef union {
290 unsigned int word;
291 struct {
292 unsigned int brpn : 15;
293 unsigned int reserved : 10;
294 unsigned int wimg : 4;
295 unsigned int reserved2 : 1;
296 unsigned int pp : 2;
297 } bits;
298 } batl_t;
299
300 typedef struct bat_t {
301 batu_t upper;
302 batl_t lower;
303 } bat_t;
304
305 /* PTE entries
306 * Used extensively for standard mappings
307 */
308
309 typedef union {
310 unsigned int word;
311 struct {
312 unsigned int valid : 1;
313 unsigned int segment_id : 24;
314 unsigned int hash_id : 1;
315 unsigned int page_index : 6; /* Abbreviated */
316 } bits;
317 struct {
318 unsigned int valid : 1;
319 unsigned int not_used : 5;
320 unsigned int segment_id : 19; /* Least Sig 19 bits */
321 unsigned int hash_id : 1;
322 unsigned int page_index : 6;
323 } hash_bits;
324 } pte0_t;
325
326 typedef union {
327 unsigned int word;
328 struct {
329 unsigned int phys_page : 20;
330 unsigned int reserved3 : 3;
331 unsigned int referenced : 1;
332 unsigned int changed : 1;
333 unsigned int wimg : 4;
334 unsigned int reserved1 : 1;
335 unsigned int protection : 2;
336 } bits;
337 } pte1_t;
338
339 typedef struct pte_t {
340 pte0_t pte0;
341 pte1_t pte1;
342 } pte_t;
343
344 #define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */
345 #define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */
346
347 #define PTE_WIMG_CB_CACHED 0 /* cached, writeback */
348 #define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */
349 #define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */
350 #define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */
351 #define PTE_WIMG_UNCACHED 4 /* uncached */
352 #define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */
353 #define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */
354 #define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */
355 #define PTE_WIMG_WT_CACHED 8 /* cached, writethru */
356 #define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */
357 #define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */
358 #define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */
359
360 #define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT
361 #define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED
362
363 /*
364 * A virtual address is decoded into various parts when looking for its PTE
365 */
366
367 typedef struct va_full_t {
368 unsigned int seg_num : 4;
369 unsigned int page_index : 16;
370 unsigned int byte_ofs : 12;
371 } va_full_t;
372
373 typedef struct va_abbrev_t { /* use bits.abbrev for abbreviated page index */
374 unsigned int seg_num : 4;
375 unsigned int page_index : 6;
376 unsigned int junk : 10;
377 unsigned int byte_ofs : 12;
378 } va_abbrev_t;
379
380 typedef union {
381 unsigned int word;
382 va_full_t full;
383 va_abbrev_t abbrev;
384 } virtual_addr_t;
385
386 /* A physical address can be split up into page and offset */
387
388 typedef struct pa_t {
389 unsigned int page_no : 20;
390 unsigned int offset : 12;
391 } pa_t;
392
393 typedef union {
394 unsigned int word;
395 pa_t bits;
396 } physical_addr_t;
397
398 /*
399 * C-helper inline functions for accessing machine registers follow.
400 */
401
402
403 #ifdef __ELF__
404 #define __CASMNL__ ";"
405 #else
406 #define __CASMNL__ "@"
407 #endif
408
409 /* Return the current GOT pointer */
410
411 extern unsigned int get_got(void);
412
413 extern __inline__ unsigned int get_got(void)
414 {
415 unsigned int result;
416 #ifndef __ELF__
417 __asm__ volatile("mr %0, r2" : "=r" (result));
418 #else
419 __asm__ volatile("mr %0, 2" : "=r" (result));
420 #endif
421 return result;
422 }
423
424 /*
425 * Various memory/IO synchronisation instructions
426 */
427
428 /* Use eieio as a memory barrier to order stores.
429 * Useful for device control and PTE maintenance.
430 */
431
432 #define eieio() \
433 __asm__ volatile("eieio")
434
435 /* Use sync to ensure previous stores have completed.
436 This is required when manipulating locks and/or
437 maintaining PTEs or other shared structures on SMP
438 machines.
439 */
440
441 #define sync() \
442 __asm__ volatile("sync")
443
444 /* Use isync to sychronize context; that is, the ensure
445 no prefetching of instructions happen before the
446 instruction.
447 */
448
449 #define isync() \
450 __asm__ volatile("isync")
451
452
453 /*
454 * This guy will make sure all tlbs on all processors finish their tlbies
455 */
456 #define tlbsync() \
457 __asm__ volatile("tlbsync")
458
459
460 /* Invalidate TLB entry. Caution, requires context synchronization.
461 */
462 extern void tlbie(unsigned int val);
463
464 extern __inline__ void tlbie(unsigned int val)
465 {
466 __asm__ volatile("tlbie %0" : : "r" (val));
467 return;
468 }
469
470
471
472 /*
473 * Access to various system registers
474 */
475
476 extern unsigned int mflr(void);
477
478 extern __inline__ unsigned int mflr(void)
479 {
480 unsigned int result;
481 __asm__ volatile("mflr %0" : "=r" (result));
482 return result;
483 }
484
485 extern unsigned int mfpvr(void);
486
487 extern __inline__ unsigned int mfpvr(void)
488 {
489 unsigned int result;
490 __asm__ ("mfpvr %0" : "=r" (result));
491 return result;
492 }
493
494 /* mtmsr might need syncs etc around it, don't provide simple
495 * inline macro
496 */
497
498 extern unsigned int mfmsr(void);
499
500 extern __inline__ unsigned int mfmsr(void)
501 {
502 unsigned int result;
503 __asm__ volatile("mfmsr %0" : "=r" (result));
504 return result;
505 }
506
507 /* mtsr and mfsr must be macros since SR must be hardcoded */
508
509 #if __ELF__
510 #define mtsr(SR, REG) \
511 __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
512 #define mfsr(REG, SR) \
513 __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR));
514 #else
515 #define mtsr(SR, REG) \
516 __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG));
517
518 #define mfsr(REG, SR) \
519 __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR));
520 #endif
521
522
523 extern void mtsrin(unsigned int val, unsigned int reg);
524
525 extern __inline__ void mtsrin(unsigned int val, unsigned int reg)
526 {
527 __asm__ volatile("sync" __CASMNL__ "mtsrin %0, %1" __CASMNL__ " isync" : : "r" (val), "r" (reg));
528 return;
529 }
530
531 extern unsigned int mfsrin(unsigned int reg);
532
533 extern __inline__ unsigned int mfsrin(unsigned int reg)
534 {
535 unsigned int result;
536 __asm__ volatile("mfsrin %0, %1" : "=r" (result) : "r" (reg));
537 return result;
538 }
539
540 extern void mtsdr1(unsigned int val);
541
542 extern __inline__ void mtsdr1(unsigned int val)
543 {
544 __asm__ volatile("mtsdr1 %0" : : "r" (val));
545 return;
546 }
547
548 extern void mtdar(unsigned int val);
549
550 extern __inline__ void mtdar(unsigned int val)
551 {
552 __asm__ volatile("mtdar %0" : : "r" (val));
553 return;
554 }
555
556 extern unsigned int mfdar(void);
557
558 extern __inline__ unsigned int mfdar(void)
559 {
560 unsigned int result;
561 __asm__ volatile("mfdar %0" : "=r" (result));
562 return result;
563 }
564
565 extern void mtdec(unsigned int val);
566
567 extern __inline__ void mtdec(unsigned int val)
568 {
569 __asm__ volatile("mtdec %0" : : "r" (val));
570 return;
571 }
572
573 extern int isync_mfdec(void);
574
575 extern __inline__ int isync_mfdec(void)
576 {
577 int result;
578 __asm__ volatile("isync" __CASMNL__ "mfdec %0" : "=r" (result));
579 return result;
580 }
581
582 /* Read and write the value from the real-time clock
583 * or time base registers. Note that you have to
584 * use the right ones depending upon being on
585 * 601 or 603/604. Care about carries between
586 * the words and using the right registers must be
587 * done by the calling function.
588 */
589
590 extern void mttb(unsigned int val);
591
592 extern __inline__ void mttb(unsigned int val)
593 {
594 __asm__ volatile("mtspr tbl, %0" : : "r" (val));
595 return;
596 }
597
598 extern unsigned int mftb(void);
599
600 extern __inline__ unsigned int mftb(void)
601 {
602 unsigned int result;
603 __asm__ volatile("mftb %0" : "=r" (result));
604 return result;
605 }
606
607 extern void mttbu(unsigned int val);
608
609 extern __inline__ void mttbu(unsigned int val)
610 {
611 __asm__ volatile("mtspr tbu, %0" : : "r" (val));
612 return;
613 }
614
615 extern unsigned int mftbu(void);
616
617 extern __inline__ unsigned int mftbu(void)
618 {
619 unsigned int result;
620 __asm__ volatile("mftbu %0" : "=r" (result));
621 return result;
622 }
623
624 extern void mtrtcl(unsigned int val);
625
626 extern __inline__ void mtrtcl(unsigned int val)
627 {
628 __asm__ volatile("mtspr 21,%0" : : "r" (val));
629 return;
630 }
631
632 extern unsigned int mfrtcl(void);
633
634 extern __inline__ unsigned int mfrtcl(void)
635 {
636 unsigned int result;
637 __asm__ volatile("mfspr %0,5" : "=r" (result));
638 return result;
639 }
640
641 extern void mtrtcu(unsigned int val);
642
643 extern __inline__ void mtrtcu(unsigned int val)
644 {
645 __asm__ volatile("mtspr 20,%0" : : "r" (val));
646 return;
647 }
648
649 extern unsigned int mfrtcu(void);
650
651 extern __inline__ unsigned int mfrtcu(void)
652 {
653 unsigned int result;
654 __asm__ volatile("mfspr %0,4" : "=r" (result));
655 return result;
656 }
657
658 extern void mtl2cr(unsigned int val);
659
660 extern __inline__ void mtl2cr(unsigned int val)
661 {
662 __asm__ volatile("mtspr l2cr, %0" : : "r" (val));
663 return;
664 }
665
666 extern unsigned int mfl2cr(void);
667
668 extern __inline__ unsigned int mfl2cr(void)
669 {
670 unsigned int result;
671 __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
672 return result;
673 }
674
675 extern unsigned int cntlzw(unsigned int num);
676
677 extern __inline__ unsigned int cntlzw(unsigned int num)
678 {
679 unsigned int result;
680 __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
681 return result;
682 }
683
684
685 /* functions for doing byte reversed loads and stores */
686
687 extern unsigned int lwbrx(unsigned int addr);
688
689 extern __inline__ unsigned int lwbrx(unsigned int addr)
690 {
691 unsigned int result;
692 __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
693 return result;
694 }
695
696 extern void stwbrx(unsigned int data, unsigned int addr);
697
698 extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
699 {
700 __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
701 }
702
703 /* Performance Monitor Register access routines */
704 extern unsigned long mfmmcr0(void);
705 extern void mtmmcr0(unsigned long);
706 extern unsigned long mfmmcr1(void);
707 extern void mtmmcr1(unsigned long);
708 extern unsigned long mfmmcr2(void);
709 extern void mtmmcr2(unsigned long);
710 extern unsigned long mfpmc1(void);
711 extern void mtpmc1(unsigned long);
712 extern unsigned long mfpmc2(void);
713 extern void mtpmc2(unsigned long);
714 extern unsigned long mfpmc3(void);
715 extern void mtpmc3(unsigned long);
716 extern unsigned long mfpmc4(void);
717 extern void mtpmc4(unsigned long);
718 extern unsigned long mfsia(void);
719 extern unsigned long mfsda(void);
720
721 /* macros since the argument n is a hard-coded constant */
722
723 #define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg))
724 #define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg))
725
726 #define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg))
727 #define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg))
728
729 #define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg))
730 #define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg))
731
732 #define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg))
733 #define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg))
734
735 #define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg))
736 #define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg))
737
738 #define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val))
739 #define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg))
740
741 #endif /* __GNUC__ */
742 #endif /* !ASSEMBLER */
743
744 #endif /* _PPC_PROC_REG_H_ */