]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cache.s
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / ppc / cache.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28
1c79356b
A
29#include <ppc/asm.h>
30#include <ppc/proc_reg.h>
55e303ae 31#include <ppc/exception.h>
1c79356b 32#include <assym.s>
1c79356b 33
55e303ae
A
34/* These routines run in 32 or 64-bit addressing, and handle
35 * 32 and 128 byte caches. They do not use compare instructions
36 * on addresses, since compares are 32/64-bit-mode-specific.
1c79356b
A
37 */
38
55e303ae
A
39#define kDcbf 0x1
40#define kDcbfb 31
41#define kDcbi 0x2
42#define kDcbib 30
43#define kIcbi 0x4
44#define kIcbib 29
1c79356b 45
1c79356b
A
46
47/*
48 * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys);
55e303ae 49 * extern void flush_dcache64(addr64_t addr, unsigned count, boolean phys);
1c79356b
A
50 *
51 * flush_dcache takes a virtual or physical address and count to flush
52 * and (can be called for multiple virtual pages).
53 *
54 * it flushes the data cache
55 * cache for the address range in question
56 *
57 * if 'phys' is non-zero then physical addresses will be used
58 */
59
1c79356b 60
55e303ae
A
61
62 .text
63 .align 5
64 .globl _flush_dcache
65_flush_dcache:
66 li r0,kDcbf // use DCBF instruction
67 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
68 b cache_op_join // join common code
1c79356b 69
55e303ae
A
70 .align 5
71 .globl _flush_dcache64
72_flush_dcache64:
73 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
74 li r0,kDcbf // use DCBF instruction
75 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
76 mr r4,r5 ; Move count
77 mr r5,r6 ; Move physical flag
78 b cache_op_join // join common code
1c79356b
A
79
80
81/*
82 * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys);
55e303ae 83 * extern void invalidate_dcache64(addr64_t va, unsigned count, boolean phys);
1c79356b
A
84 *
85 * invalidate_dcache takes a virtual or physical address and count to
86 * invalidate and (can be called for multiple virtual pages).
87 *
88 * it invalidates the data cache for the address range in question
89 */
55e303ae
A
90
91 .globl _invalidate_dcache
92_invalidate_dcache:
93 li r0,kDcbi // use DCBI instruction
94 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
95 b cache_op_join // join common code
96
97
98 .align 5
99 .globl _invalidate_dcache64
100_invalidate_dcache64:
101 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
102 li r0,kDcbi // use DCBI instruction
103 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
104 mr r4,r5 ; Move count
105 mr r5,r6 ; Move physical flag
106 b cache_op_join // join common code
1c79356b
A
107
108/*
109 * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys);
55e303ae 110 * extern void invalidate_icache64(addr64_t addr, unsigned cnt, boolean phys);
1c79356b
A
111 *
112 * invalidate_icache takes a virtual or physical address and
113 * count to invalidate, (can be called for multiple virtual pages).
114 *
115 * it invalidates the instruction cache for the address range in question.
55e303ae
A
116 */
117
118 .globl _invalidate_icache
119_invalidate_icache:
120 li r0,kIcbi // use ICBI instruction
121 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
122 b cache_op_join // join common code
123
124
125 .align 5
126 .globl _invalidate_icache64
127_invalidate_icache64:
128 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
129 li r0,kIcbi // use ICBI instruction
130 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
131 mr r4,r5 ; Move count
132 mr r5,r6 ; Move physical flag
133 b cache_op_join // join common code
134
135/*
136 * extern void sync_ppage(ppnum_t pa);
137 *
138 * sync_ppage takes a physical page number
139 *
140 * it writes out the data cache and invalidates the instruction
141 * cache for the address range in question
1c79356b
A
142 */
143
55e303ae
A
144 .globl _sync_ppage
145 .align 5
146_sync_ppage: // Should be the most commonly called routine, by far
147 mfsprg r2,2
148 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
149 mtcrf 0x02,r2 ; Move pf64Bit to cr6
150 li r5,1 // set flag for physical addresses
151 li r4,4096 ; Set page size
152 bt++ pf64Bitb,spp64 ; Skip if 64-bit (only they take the hint)
153 rlwinm r3,r3,12,0,19 ; Convert to physical address - 32-bit
154 b cache_op_join ; Join up....
155
156spp64: sldi r3,r3,12 ; Convert to physical address - 64-bit
157 b cache_op_join ; Join up....
158
de355530 159
de355530 160
55e303ae
A
161/*
162 * extern void sync_cache_virtual(vm_offset_t addr, unsigned count);
163 *
164 * Like "sync_cache", except it takes a virtual address and byte count.
165 * It flushes the data cache, invalidates the I cache, and sync's.
166 */
167
168 .globl _sync_cache_virtual
169 .align 5
170_sync_cache_virtual:
171 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
172 li r5,0 // set flag for virtual addresses
173 b cache_op_join // join common code
174
175
176/*
177 * extern void sync_cache(vm_offset_t pa, unsigned count);
178 * extern void sync_cache64(addr64_t pa, unsigned count);
179 *
180 * sync_cache takes a physical address and count to sync, thus
181 * must not be called for multiple virtual pages.
182 *
183 * it writes out the data cache and invalidates the instruction
184 * cache for the address range in question
185 */
1c79356b 186
55e303ae
A
187 .globl _sync_cache
188 .align 5
189_sync_cache:
190 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
191 li r5,1 // set flag for physical addresses
192 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
193 b cache_op_join // join common code
194
195 .globl _sync_cache64
196 .align 5
197_sync_cache64:
198 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
199 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
200 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
201 mr r4,r5 ; Copy over the length
202 li r5,1 // set flag for physical addresses
203
204
205 // Common code to handle the cache operations.
206
207cache_op_join: // here with r3=addr, r4=count, r5=phys flag, r0=bits
208 mfsprg r10,2 // r10 <- processor feature flags
209 cmpwi cr5,r5,0 // using physical addresses?
210 mtcrf 0x01,r0 // move kDcbf, kDcbi, and kIcbi bits to CR7
211 andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size
212 mtcrf 0x02,r10 // move pf64Bit bit to CR6
213 subi r8,r9,1 // r8 <- (linesize-1)
214 beq-- cr5,cache_op_2 // skip if using virtual addresses
215
216 bf-- pf64Bitb,cache_op_not64 // This is not a 64-bit machine
217
218 srdi r12,r3,31 // Slide bit 32 to bit 63
219 cmpldi r12,1 // Are we in the I/O mapped area?
220 beqlr-- // No cache ops allowed here...
221
222cache_op_not64:
223 mflr r12 // save return address
224 bl EXT(ml_set_physical) // turn on physical addressing
225 mtlr r12 // restore return address
226
227 // get r3=first cache line, r4=first line not in set, r6=byte count
228
229cache_op_2:
230 add r7,r3,r4 // point to 1st byte not to operate on
231 andc r3,r3,r8 // r3 <- 1st line to operate on
232 add r4,r7,r8 // round up
233 andc r4,r4,r8 // r4 <- 1st line not to operate on
234 sub. r6,r4,r3 // r6 <- byte count to operate on
235 beq-- cache_op_exit // nothing to do
236 bf-- kDcbfb,cache_op_6 // no need to dcbf
237
238
239 // DCBF loop
240
241cache_op_5:
242 sub. r6,r6,r9 // more to go?
243 dcbf r6,r3 // flush next line to RAM
244 bne cache_op_5 // loop if more to go
245 sync // make sure the data reaches RAM
246 sub r6,r4,r3 // reset count
247
248
249 // ICBI loop
250
251cache_op_6:
252 bf-- kIcbib,cache_op_8 // no need to icbi
253cache_op_7:
254 sub. r6,r6,r9 // more to go?
255 icbi r6,r3 // invalidate next line
256 bne cache_op_7
257 sub r6,r4,r3 // reset count
258 isync
259 sync
260
261
262 // DCBI loop
263
264cache_op_8:
265 bf++ kDcbib,cache_op_exit // no need to dcbi
266cache_op_9:
267 sub. r6,r6,r9 // more to go?
268 dcbi r6,r3 // invalidate next line
269 bne cache_op_9
270 sync
271
272
273 // restore MSR iff necessary and done
274
275cache_op_exit:
276 beqlr-- cr5 // if using virtual addresses, no need to restore MSR
277 b EXT(ml_restore) // restore MSR and return
1c79356b 278