]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | ||
1c79356b A |
32 | #include <ppc/asm.h> |
33 | #include <ppc/proc_reg.h> | |
55e303ae | 34 | #include <ppc/exception.h> |
1c79356b | 35 | #include <assym.s> |
1c79356b | 36 | |
55e303ae A |
37 | /* These routines run in 32 or 64-bit addressing, and handle |
38 | * 32 and 128 byte caches. They do not use compare instructions | |
39 | * on addresses, since compares are 32/64-bit-mode-specific. | |
1c79356b A |
40 | */ |
41 | ||
55e303ae A |
42 | #define kDcbf 0x1 |
43 | #define kDcbfb 31 | |
44 | #define kDcbi 0x2 | |
45 | #define kDcbib 30 | |
46 | #define kIcbi 0x4 | |
47 | #define kIcbib 29 | |
1c79356b | 48 | |
1c79356b A |
49 | |
50 | /* | |
51 | * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys); | |
55e303ae | 52 | * extern void flush_dcache64(addr64_t addr, unsigned count, boolean phys); |
1c79356b A |
53 | * |
54 | * flush_dcache takes a virtual or physical address and count to flush | |
55 | * and (can be called for multiple virtual pages). | |
56 | * | |
57 | * it flushes the data cache | |
58 | * cache for the address range in question | |
59 | * | |
60 | * if 'phys' is non-zero then physical addresses will be used | |
61 | */ | |
62 | ||
1c79356b | 63 | |
55e303ae A |
64 | |
65 | .text | |
66 | .align 5 | |
67 | .globl _flush_dcache | |
68 | _flush_dcache: | |
69 | li r0,kDcbf // use DCBF instruction | |
70 | rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine | |
71 | b cache_op_join // join common code | |
1c79356b | 72 | |
55e303ae A |
73 | .align 5 |
74 | .globl _flush_dcache64 | |
75 | _flush_dcache64: | |
76 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
77 | li r0,kDcbf // use DCBF instruction | |
78 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
79 | mr r4,r5 ; Move count | |
80 | mr r5,r6 ; Move physical flag | |
81 | b cache_op_join // join common code | |
1c79356b A |
82 | |
83 | ||
84 | /* | |
85 | * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys); | |
55e303ae | 86 | * extern void invalidate_dcache64(addr64_t va, unsigned count, boolean phys); |
1c79356b A |
87 | * |
88 | * invalidate_dcache takes a virtual or physical address and count to | |
89 | * invalidate and (can be called for multiple virtual pages). | |
90 | * | |
91 | * it invalidates the data cache for the address range in question | |
92 | */ | |
55e303ae A |
93 | |
94 | .globl _invalidate_dcache | |
95 | _invalidate_dcache: | |
96 | li r0,kDcbi // use DCBI instruction | |
97 | rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine | |
98 | b cache_op_join // join common code | |
99 | ||
100 | ||
101 | .align 5 | |
102 | .globl _invalidate_dcache64 | |
103 | _invalidate_dcache64: | |
104 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
105 | li r0,kDcbi // use DCBI instruction | |
106 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
107 | mr r4,r5 ; Move count | |
108 | mr r5,r6 ; Move physical flag | |
109 | b cache_op_join // join common code | |
1c79356b A |
110 | |
111 | /* | |
112 | * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys); | |
55e303ae | 113 | * extern void invalidate_icache64(addr64_t addr, unsigned cnt, boolean phys); |
1c79356b A |
114 | * |
115 | * invalidate_icache takes a virtual or physical address and | |
116 | * count to invalidate, (can be called for multiple virtual pages). | |
117 | * | |
118 | * it invalidates the instruction cache for the address range in question. | |
55e303ae A |
119 | */ |
120 | ||
121 | .globl _invalidate_icache | |
122 | _invalidate_icache: | |
123 | li r0,kIcbi // use ICBI instruction | |
124 | rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine | |
125 | b cache_op_join // join common code | |
126 | ||
127 | ||
128 | .align 5 | |
129 | .globl _invalidate_icache64 | |
130 | _invalidate_icache64: | |
131 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
132 | li r0,kIcbi // use ICBI instruction | |
133 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
134 | mr r4,r5 ; Move count | |
135 | mr r5,r6 ; Move physical flag | |
136 | b cache_op_join // join common code | |
137 | ||
138 | /* | |
139 | * extern void sync_ppage(ppnum_t pa); | |
140 | * | |
141 | * sync_ppage takes a physical page number | |
142 | * | |
143 | * it writes out the data cache and invalidates the instruction | |
144 | * cache for the address range in question | |
1c79356b A |
145 | */ |
146 | ||
55e303ae A |
147 | .globl _sync_ppage |
148 | .align 5 | |
149 | _sync_ppage: // Should be the most commonly called routine, by far | |
150 | mfsprg r2,2 | |
151 | li r0,kDcbf+kIcbi // we need to dcbf and then icbi | |
152 | mtcrf 0x02,r2 ; Move pf64Bit to cr6 | |
153 | li r5,1 // set flag for physical addresses | |
154 | li r4,4096 ; Set page size | |
155 | bt++ pf64Bitb,spp64 ; Skip if 64-bit (only they take the hint) | |
156 | rlwinm r3,r3,12,0,19 ; Convert to physical address - 32-bit | |
157 | b cache_op_join ; Join up.... | |
158 | ||
159 | spp64: sldi r3,r3,12 ; Convert to physical address - 64-bit | |
160 | b cache_op_join ; Join up.... | |
161 | ||
de355530 | 162 | |
de355530 | 163 | |
55e303ae A |
164 | /* |
165 | * extern void sync_cache_virtual(vm_offset_t addr, unsigned count); | |
166 | * | |
167 | * Like "sync_cache", except it takes a virtual address and byte count. | |
168 | * It flushes the data cache, invalidates the I cache, and sync's. | |
169 | */ | |
170 | ||
171 | .globl _sync_cache_virtual | |
172 | .align 5 | |
173 | _sync_cache_virtual: | |
174 | li r0,kDcbf+kIcbi // we need to dcbf and then icbi | |
175 | li r5,0 // set flag for virtual addresses | |
176 | b cache_op_join // join common code | |
177 | ||
178 | ||
179 | /* | |
180 | * extern void sync_cache(vm_offset_t pa, unsigned count); | |
181 | * extern void sync_cache64(addr64_t pa, unsigned count); | |
182 | * | |
183 | * sync_cache takes a physical address and count to sync, thus | |
184 | * must not be called for multiple virtual pages. | |
185 | * | |
186 | * it writes out the data cache and invalidates the instruction | |
187 | * cache for the address range in question | |
188 | */ | |
1c79356b | 189 | |
55e303ae A |
190 | .globl _sync_cache |
191 | .align 5 | |
192 | _sync_cache: | |
193 | li r0,kDcbf+kIcbi // we need to dcbf and then icbi | |
194 | li r5,1 // set flag for physical addresses | |
195 | rlwinm r3,r3,0,0,31 // truncate address in case this is a 64-bit machine | |
196 | b cache_op_join // join common code | |
197 | ||
198 | .globl _sync_cache64 | |
199 | .align 5 | |
200 | _sync_cache64: | |
201 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
202 | li r0,kDcbf+kIcbi // we need to dcbf and then icbi | |
203 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
204 | mr r4,r5 ; Copy over the length | |
205 | li r5,1 // set flag for physical addresses | |
206 | ||
207 | ||
208 | // Common code to handle the cache operations. | |
209 | ||
210 | cache_op_join: // here with r3=addr, r4=count, r5=phys flag, r0=bits | |
211 | mfsprg r10,2 // r10 <- processor feature flags | |
212 | cmpwi cr5,r5,0 // using physical addresses? | |
213 | mtcrf 0x01,r0 // move kDcbf, kDcbi, and kIcbi bits to CR7 | |
214 | andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size | |
215 | mtcrf 0x02,r10 // move pf64Bit bit to CR6 | |
216 | subi r8,r9,1 // r8 <- (linesize-1) | |
217 | beq-- cr5,cache_op_2 // skip if using virtual addresses | |
218 | ||
219 | bf-- pf64Bitb,cache_op_not64 // This is not a 64-bit machine | |
220 | ||
221 | srdi r12,r3,31 // Slide bit 32 to bit 63 | |
222 | cmpldi r12,1 // Are we in the I/O mapped area? | |
223 | beqlr-- // No cache ops allowed here... | |
224 | ||
225 | cache_op_not64: | |
226 | mflr r12 // save return address | |
227 | bl EXT(ml_set_physical) // turn on physical addressing | |
228 | mtlr r12 // restore return address | |
229 | ||
230 | // get r3=first cache line, r4=first line not in set, r6=byte count | |
231 | ||
232 | cache_op_2: | |
233 | add r7,r3,r4 // point to 1st byte not to operate on | |
234 | andc r3,r3,r8 // r3 <- 1st line to operate on | |
235 | add r4,r7,r8 // round up | |
236 | andc r4,r4,r8 // r4 <- 1st line not to operate on | |
237 | sub. r6,r4,r3 // r6 <- byte count to operate on | |
238 | beq-- cache_op_exit // nothing to do | |
239 | bf-- kDcbfb,cache_op_6 // no need to dcbf | |
240 | ||
241 | ||
242 | // DCBF loop | |
243 | ||
244 | cache_op_5: | |
245 | sub. r6,r6,r9 // more to go? | |
246 | dcbf r6,r3 // flush next line to RAM | |
247 | bne cache_op_5 // loop if more to go | |
248 | sync // make sure the data reaches RAM | |
249 | sub r6,r4,r3 // reset count | |
250 | ||
251 | ||
252 | // ICBI loop | |
253 | ||
254 | cache_op_6: | |
255 | bf-- kIcbib,cache_op_8 // no need to icbi | |
256 | cache_op_7: | |
257 | sub. r6,r6,r9 // more to go? | |
258 | icbi r6,r3 // invalidate next line | |
259 | bne cache_op_7 | |
260 | sub r6,r4,r3 // reset count | |
261 | isync | |
262 | sync | |
263 | ||
264 | ||
265 | // DCBI loop | |
266 | ||
267 | cache_op_8: | |
268 | bf++ kDcbib,cache_op_exit // no need to dcbi | |
269 | cache_op_9: | |
270 | sub. r6,r6,r9 // more to go? | |
271 | dcbi r6,r3 // invalidate next line | |
272 | bne cache_op_9 | |
273 | sync | |
274 | ||
275 | ||
276 | // restore MSR iff necessary and done | |
277 | ||
278 | cache_op_exit: | |
279 | beqlr-- cr5 // if using virtual addresses, no need to restore MSR | |
280 | b EXT(ml_restore) // restore MSR and return | |
1c79356b | 281 | |
91447636 A |
282 | |
283 | //////////////////////////////////////////////////// | |
284 | ||
285 | .align 5 | |
286 | .globl _dcache_incoherent_io_store64 | |
287 | _dcache_incoherent_io_store64: | |
288 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
289 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
290 | mr r4,r5 ; Move count | |
291 | ||
292 | // here with r3=addr, r4=count | |
293 | mfsprg r10,2 // r10 <- processor feature flags | |
294 | andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size | |
295 | mtcrf 0x02,r10 // move pf64Bit bit to CR6 | |
296 | subi r8,r9,1 // r8 <- (linesize-1) | |
297 | ||
298 | bf-- pf64Bitb,cache_ios_not64 // This is not a 64-bit machine | |
299 | ||
300 | srdi r12,r3,31 // Slide bit 32 to bit 63 | |
301 | cmpldi r12,1 // Are we in the I/O mapped area? | |
302 | beqlr-- // No cache ops allowed here... | |
303 | ||
304 | cache_ios_not64: | |
305 | mflr r12 // save return address | |
306 | bl EXT(ml_set_physical) // turn on physical addressing | |
307 | mtlr r12 // restore return address | |
308 | ||
309 | // get r3=first cache line, r4=first line not in set, r6=byte count | |
310 | add r7,r3,r4 // point to 1st byte not to operate on | |
311 | andc r3,r3,r8 // r3 <- 1st line to operate on | |
312 | add r4,r7,r8 // round up | |
313 | andc r4,r4,r8 // r4 <- 1st line not to operate on | |
314 | sub. r6,r4,r3 // r6 <- byte count to operate on | |
315 | beq-- cache_ios_exit // nothing to do | |
316 | ||
317 | sub. r6,r6,r9 // >1 line? | |
318 | beq cache_ios_last_line // use dcbst on all lines but last | |
319 | ||
320 | // DCBST loop | |
321 | cache_ios_5: | |
322 | sub. r6,r6,r9 // more to go? | |
323 | dcbst r6,r3 // store next line | |
324 | bne cache_ios_5 // loop if more to go | |
325 | ||
326 | cache_ios_last_line: | |
327 | sync // flush last line | |
328 | isync | |
329 | dcbf r6,r3 | |
330 | sync | |
331 | isync | |
332 | add r6,r6,r3 | |
333 | lwz r0,0(r6) // make sure the data reaches RAM (not just the memory controller) | |
334 | isync | |
335 | ||
336 | // restore MSR | |
337 | cache_ios_exit: | |
338 | b EXT(ml_restore) // restore MSR and return | |
339 | ||
340 | ||
341 | //////////////////////////////////////////////////// | |
342 | ||
343 | .align 5 | |
344 | .globl _dcache_incoherent_io_flush64 | |
345 | _dcache_incoherent_io_flush64: | |
346 | rlwinm r3,r3,0,1,0 ; Duplicate high half of long long paddr into top of reg | |
347 | rlwimi r3,r4,0,0,31 ; Combine bottom of long long to full 64-bits | |
348 | mr r4,r5 ; Move count | |
349 | ||
350 | // here with r3=addr, r4=count | |
351 | mfsprg r10,2 // r10 <- processor feature flags | |
352 | andi. r9,r10,pf32Byte+pf128Byte // r9 <- cache line size | |
353 | mtcrf 0x02,r10 // move pf64Bit bit to CR6 | |
354 | subi r8,r9,1 // r8 <- (linesize-1) | |
355 | ||
356 | bf-- pf64Bitb,cache_iof_not64 // This is not a 64-bit machine | |
357 | ||
358 | srdi r12,r3,31 // Slide bit 32 to bit 63 | |
359 | cmpldi r12,1 // Are we in the I/O mapped area? | |
360 | beqlr-- // No cache ops allowed here... | |
361 | ||
362 | cache_iof_not64: | |
363 | mflr r12 // save return address | |
364 | bl EXT(ml_set_physical) // turn on physical addressing | |
365 | mtlr r12 // restore return address | |
366 | ||
367 | // get r3=first cache line, r4=first line not in set, r6=byte count | |
368 | add r7,r3,r4 // point to 1st byte not to operate on | |
369 | andc r3,r3,r8 // r3 <- 1st line to operate on | |
370 | add r4,r7,r8 // round up | |
371 | andc r4,r4,r8 // r4 <- 1st line not to operate on | |
372 | sub. r6,r4,r3 // r6 <- byte count to operate on | |
373 | beq-- cache_iof_exit // nothing to do | |
374 | ||
375 | // DCBF loop | |
376 | cache_iof_5: | |
377 | sub. r6,r6,r9 // more to go? | |
378 | dcbf r6,r3 // store next line | |
379 | bne cache_iof_5 // loop if more to go | |
380 | ||
381 | cache_iof_last_line: | |
382 | sync // flush last line | |
383 | isync | |
384 | ||
385 | // restore MSR | |
386 | cache_iof_exit: | |
387 | b EXT(ml_restore) // restore MSR and return | |
388 | ||
389 |