]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cache.s
xnu-792.6.22.tar.gz
[apple/xnu.git] / osfmk / ppc / cache.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
1c79356b
A
26#include <ppc/asm.h>
27#include <ppc/proc_reg.h>
55e303ae 28#include <ppc/exception.h>
1c79356b 29#include <assym.s>
1c79356b 30
55e303ae
A
31/* These routines run in 32 or 64-bit addressing, and handle
32 * 32 and 128 byte caches. They do not use compare instructions
33 * on addresses, since compares are 32/64-bit-mode-specific.
1c79356b
A
34 */
35
55e303ae
A
36#define kDcbf 0x1
37#define kDcbfb 31
38#define kDcbi 0x2
39#define kDcbib 30
40#define kIcbi 0x4
41#define kIcbib 29
1c79356b 42
1c79356b
A
43
44/*
45 * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys);
55e303ae 46 * extern void flush_dcache64(addr64_t addr, unsigned count, boolean phys);
1c79356b
A
47 *
48 * flush_dcache takes a virtual or physical address and count to flush
49 * and (can be called for multiple virtual pages).
50 *
51 * it flushes the data cache
52 * cache for the address range in question
53 *
54 * if 'phys' is non-zero then physical addresses will be used
55 */
56
1c79356b 57
55e303ae
A
58
59 .text
60 .align 5
61 .globl _flush_dcache
62_flush_dcache:
63 li r0,kDcbf // use DCBF instruction
64 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
65 b cache_op_join // join common code
1c79356b 66
55e303ae
A
67 .align 5
68 .globl _flush_dcache64
69_flush_dcache64:
70 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
71 li r0,kDcbf // use DCBF instruction
72 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
73 mr r4,r5 ; Move count
74 mr r5,r6 ; Move physical flag
75 b cache_op_join // join common code
1c79356b
A
76
77
78/*
79 * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys);
55e303ae 80 * extern void invalidate_dcache64(addr64_t va, unsigned count, boolean phys);
1c79356b
A
81 *
82 * invalidate_dcache takes a virtual or physical address and count to
83 * invalidate and (can be called for multiple virtual pages).
84 *
85 * it invalidates the data cache for the address range in question
86 */
55e303ae
A
87
88 .globl _invalidate_dcache
89_invalidate_dcache:
90 li r0,kDcbi // use DCBI instruction
91 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
92 b cache_op_join // join common code
93
94
95 .align 5
96 .globl _invalidate_dcache64
97_invalidate_dcache64:
98 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
99 li r0,kDcbi // use DCBI instruction
100 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
101 mr r4,r5 ; Move count
102 mr r5,r6 ; Move physical flag
103 b cache_op_join // join common code
1c79356b
A
104
105/*
106 * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys);
55e303ae 107 * extern void invalidate_icache64(addr64_t addr, unsigned cnt, boolean phys);
1c79356b
A
108 *
109 * invalidate_icache takes a virtual or physical address and
110 * count to invalidate, (can be called for multiple virtual pages).
111 *
112 * it invalidates the instruction cache for the address range in question.
55e303ae
A
113 */
114
115 .globl _invalidate_icache
116_invalidate_icache:
117 li r0,kIcbi // use ICBI instruction
118 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
119 b cache_op_join // join common code
120
121
122 .align 5
123 .globl _invalidate_icache64
124_invalidate_icache64:
125 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
126 li r0,kIcbi // use ICBI instruction
127 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
128 mr r4,r5 ; Move count
129 mr r5,r6 ; Move physical flag
130 b cache_op_join // join common code
131
132/*
133 * extern void sync_ppage(ppnum_t pa);
134 *
135 * sync_ppage takes a physical page number
136 *
137 * it writes out the data cache and invalidates the instruction
138 * cache for the address range in question
1c79356b
A
139 */
140
55e303ae
A
141 .globl _sync_ppage
142 .align 5
143_sync_ppage: // Should be the most commonly called routine, by far
144 mfsprg r2,2
145 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
146 mtcrf 0x02,r2 ; Move pf64Bit to cr6
147 li r5,1 // set flag for physical addresses
148 li r4,4096 ; Set page size
149 bt++ pf64Bitb,spp64 ; Skip if 64-bit (only they take the hint)
150 rlwinm r3,r3,12,0,19 ; Convert to physical address - 32-bit
151 b cache_op_join ; Join up....
152
153spp64: sldi r3,r3,12 ; Convert to physical address - 64-bit
154 b cache_op_join ; Join up....
155
de355530 156
de355530 157
55e303ae
A
158/*
159 * extern void sync_cache_virtual(vm_offset_t addr, unsigned count);
160 *
161 * Like "sync_cache", except it takes a virtual address and byte count.
162 * It flushes the data cache, invalidates the I cache, and sync's.
163 */
164
165 .globl _sync_cache_virtual
166 .align 5
167_sync_cache_virtual:
168 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
169 li r5,0 // set flag for virtual addresses
170 b cache_op_join // join common code
171
172
173/*
174 * extern void sync_cache(vm_offset_t pa, unsigned count);
175 * extern void sync_cache64(addr64_t pa, unsigned count);
176 *
177 * sync_cache takes a physical address and count to sync, thus
178 * must not be called for multiple virtual pages.
179 *
180 * it writes out the data cache and invalidates the instruction
181 * cache for the address range in question
182 */
1c79356b 183
55e303ae
A
184 .globl _sync_cache
185 .align 5
186_sync_cache:
187 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
188 li r5,1 // set flag for physical addresses
189 rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine
190 b cache_op_join // join common code
191
192 .globl _sync_cache64
193 .align 5
194_sync_cache64:
195 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
196 li r0,kDcbf+kIcbi // we need to dcbf and then icbi
197 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
198 mr r4,r5 ; Copy over the length
199 li r5,1 // set flag for physical addresses
200
201
202 // Common code to handle the cache operations.
203
204cache_op_join: // here with r3=addr, r4=count, r5=phys flag, r0=bits
205 mfsprg r10,2 // r10 <- processor feature flags
206 cmpwi cr5,r5,0 // using physical addresses?
207 mtcrf 0x01,r0 // move kDcbf, kDcbi, and kIcbi bits to CR7
208 andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size
209 mtcrf 0x02,r10 // move pf64Bit bit to CR6
210 subi r8,r9,1 // r8 <- (linesize-1)
211 beq-- cr5,cache_op_2 // skip if using virtual addresses
212
213 bf-- pf64Bitb,cache_op_not64 // This is not a 64-bit machine
214
215 srdi r12,r3,31 // Slide bit 32 to bit 63
216 cmpldi r12,1 // Are we in the I/O mapped area?
217 beqlr-- // No cache ops allowed here...
218
219cache_op_not64:
220 mflr r12 // save return address
221 bl EXT(ml_set_physical) // turn on physical addressing
222 mtlr r12 // restore return address
223
224 // get r3=first cache line, r4=first line not in set, r6=byte count
225
226cache_op_2:
227 add r7,r3,r4 // point to 1st byte not to operate on
228 andc r3,r3,r8 // r3 <- 1st line to operate on
229 add r4,r7,r8 // round up
230 andc r4,r4,r8 // r4 <- 1st line not to operate on
231 sub. r6,r4,r3 // r6 <- byte count to operate on
232 beq-- cache_op_exit // nothing to do
233 bf-- kDcbfb,cache_op_6 // no need to dcbf
234
235
236 // DCBF loop
237
238cache_op_5:
239 sub. r6,r6,r9 // more to go?
240 dcbf r6,r3 // flush next line to RAM
241 bne cache_op_5 // loop if more to go
242 sync // make sure the data reaches RAM
243 sub r6,r4,r3 // reset count
244
245
246 // ICBI loop
247
248cache_op_6:
249 bf-- kIcbib,cache_op_8 // no need to icbi
250cache_op_7:
251 sub. r6,r6,r9 // more to go?
252 icbi r6,r3 // invalidate next line
253 bne cache_op_7
254 sub r6,r4,r3 // reset count
255 isync
256 sync
257
258
259 // DCBI loop
260
261cache_op_8:
262 bf++ kDcbib,cache_op_exit // no need to dcbi
263cache_op_9:
264 sub. r6,r6,r9 // more to go?
265 dcbi r6,r3 // invalidate next line
266 bne cache_op_9
267 sync
268
269
270 // restore MSR iff necessary and done
271
272cache_op_exit:
273 beqlr-- cr5 // if using virtual addresses, no need to restore MSR
274 b EXT(ml_restore) // restore MSR and return
1c79356b 275
91447636
A
276
277////////////////////////////////////////////////////
278
279 .align 5
280 .globl _dcache_incoherent_io_store64
281_dcache_incoherent_io_store64:
282 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
283 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
284 mr r4,r5 ; Move count
285
286 // here with r3=addr, r4=count
287 mfsprg r10,2 // r10 <- processor feature flags
288 andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size
289 mtcrf 0x02,r10 // move pf64Bit bit to CR6
290 subi r8,r9,1 // r8 <- (linesize-1)
291
292 bf-- pf64Bitb,cache_ios_not64 // This is not a 64-bit machine
293
294 srdi r12,r3,31 // Slide bit 32 to bit 63
295 cmpldi r12,1 // Are we in the I/O mapped area?
296 beqlr-- // No cache ops allowed here...
297
298cache_ios_not64:
299 mflr r12 // save return address
300 bl EXT(ml_set_physical) // turn on physical addressing
301 mtlr r12 // restore return address
302
303 // get r3=first cache line, r4=first line not in set, r6=byte count
304 add r7,r3,r4 // point to 1st byte not to operate on
305 andc r3,r3,r8 // r3 <- 1st line to operate on
306 add r4,r7,r8 // round up
307 andc r4,r4,r8 // r4 <- 1st line not to operate on
308 sub. r6,r4,r3 // r6 <- byte count to operate on
309 beq-- cache_ios_exit // nothing to do
310
311 sub. r6,r6,r9 // >1 line?
312 beq cache_ios_last_line // use dcbst on all lines but last
313
314 // DCBST loop
315cache_ios_5:
316 sub. r6,r6,r9 // more to go?
317 dcbst r6,r3 // store next line
318 bne cache_ios_5 // loop if more to go
319
320cache_ios_last_line:
321 sync // flush last line
322 isync
323 dcbf r6,r3
324 sync
325 isync
326 add r6,r6,r3
327 lwz r0,0(r6) // make sure the data reaches RAM (not just the memory controller)
328 isync
329
330 // restore MSR
331cache_ios_exit:
332 b EXT(ml_restore) // restore MSR and return
333
334
335////////////////////////////////////////////////////
336
337 .align 5
338 .globl _dcache_incoherent_io_flush64
339_dcache_incoherent_io_flush64:
340 rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg
341 rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits
342 mr r4,r5 ; Move count
343
344 // here with r3=addr, r4=count
345 mfsprg r10,2 // r10 <- processor feature flags
346 andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size
347 mtcrf 0x02,r10 // move pf64Bit bit to CR6
348 subi r8,r9,1 // r8 <- (linesize-1)
349
350 bf-- pf64Bitb,cache_iof_not64 // This is not a 64-bit machine
351
352 srdi r12,r3,31 // Slide bit 32 to bit 63
353 cmpldi r12,1 // Are we in the I/O mapped area?
354 beqlr-- // No cache ops allowed here...
355
356cache_iof_not64:
357 mflr r12 // save return address
358 bl EXT(ml_set_physical) // turn on physical addressing
359 mtlr r12 // restore return address
360
361 // get r3=first cache line, r4=first line not in set, r6=byte count
362 add r7,r3,r4 // point to 1st byte not to operate on
363 andc r3,r3,r8 // r3 <- 1st line to operate on
364 add r4,r7,r8 // round up
365 andc r4,r4,r8 // r4 <- 1st line not to operate on
366 sub. r6,r4,r3 // r6 <- byte count to operate on
367 beq-- cache_iof_exit // nothing to do
368
369 // DCBF loop
370cache_iof_5:
371 sub. r6,r6,r9 // more to go?
372 dcbf r6,r3 // store next line
373 bne cache_iof_5 // loop if more to go
374
375cache_iof_last_line:
376 sync // flush last line
377 isync
378
379 // restore MSR
380cache_iof_exit:
381 b EXT(ml_restore) // restore MSR and return
382
383