]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cache.s
xnu-344.tar.gz
[apple/xnu.git] / osfmk / ppc / cache.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <cpus.h>
27
28 #include <ppc/asm.h>
29 #include <ppc/proc_reg.h>
30 #include <cpus.h>
31 #include <assym.s>
32 #include <mach_debug.h>
33 #include <mach/ppc/vm_param.h>
34
35 /*
36 * extern void sync_cache(vm_offset_t pa, unsigned count);
37 *
38 * sync_cache takes a physical address and count to sync, thus
39 * must not be called for multiple virtual pages.
40 *
41 * it writes out the data cache and invalidates the instruction
42 * cache for the address range in question
43 */
44
45 ENTRY(sync_cache, TAG_NO_FRAME_USED)
46
47 /* Switch off data translations */
48 mfmsr r6
49 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
50 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
51 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
52 mtmsr r7
53 isync
54
55 /* Check to see if the address is aligned. */
56 add r8, r3,r4
57 andi. r8,r8,(CACHE_LINE_SIZE-1)
58 beq- .L_sync_check
59 addi r4,r4,CACHE_LINE_SIZE
60 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
61 andc r4,r4,r7
62 andc r3,r3,r7
63
64 .L_sync_check:
65 cmpwi r4, CACHE_LINE_SIZE
66 ble .L_sync_one_line
67
68 /* Make ctr hold count of how many times we should loop */
69 addi r8, r4, (CACHE_LINE_SIZE-1)
70 srwi r8, r8, CACHE_LINE_POW2
71 mtctr r8
72
73 /* loop to flush the data cache */
74 .L_sync_data_loop:
75 subic r4, r4, CACHE_LINE_SIZE
76 dcbf r3, r4
77 bdnz .L_sync_data_loop
78
79 sync
80 mtctr r8
81
82 /* loop to invalidate the instruction cache */
83 .L_sync_inval_loop:
84 icbi r3, r4
85 addic r4, r4, CACHE_LINE_SIZE
86 bdnz .L_sync_inval_loop
87
88 .L_sync_cache_done:
89 sync /* Finish physical writes */
90 mtmsr r6 /* Restore original translations */
91 isync /* Ensure data translations are on */
92 blr
93
94 .L_sync_one_line:
95 dcbf 0,r3
96 sync
97 icbi 0,r3
98 b .L_sync_cache_done
99
100 /*
101 * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys);
102 *
103 * flush_dcache takes a virtual or physical address and count to flush
104 * and (can be called for multiple virtual pages).
105 *
106 * it flushes the data cache
107 * cache for the address range in question
108 *
109 * if 'phys' is non-zero then physical addresses will be used
110 */
111
112 ENTRY(flush_dcache, TAG_NO_FRAME_USED)
113
114 /* optionally switch off data translations */
115
116 cmpwi r5, 0
117 mfmsr r6
118 beq+ 0f
119 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
120 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
121 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
122 mtmsr r7
123 isync
124 0:
125
126 /* Check to see if the address is aligned. */
127 add r8, r3,r4
128 andi. r8,r8,(CACHE_LINE_SIZE-1)
129 beq- .L_flush_dcache_check
130 addi r4,r4,CACHE_LINE_SIZE
131 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
132 andc r4,r4,r7
133 andc r3,r3,r7
134
135 .L_flush_dcache_check:
136 cmpwi r4, CACHE_LINE_SIZE
137 ble .L_flush_dcache_one_line
138
139 /* Make ctr hold count of how many times we should loop */
140 addi r8, r4, (CACHE_LINE_SIZE-1)
141 srwi r8, r8, CACHE_LINE_POW2
142 mtctr r8
143
144 .L_flush_dcache_flush_loop:
145 subic r4, r4, CACHE_LINE_SIZE
146 dcbf r3, r4
147 bdnz .L_flush_dcache_flush_loop
148
149 .L_flush_dcache_done:
150 /* Sync restore msr if it was modified */
151 cmpwi r5, 0
152 sync /* make sure invalidates have completed */
153 beq+ 0f
154 mtmsr r6 /* Restore original translations */
155 isync /* Ensure data translations are on */
156 0:
157 blr
158
159 .L_flush_dcache_one_line:
160 xor r4,r4,r4
161 dcbf 0,r3
162 b .L_flush_dcache_done
163
164
165 /*
166 * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys);
167 *
168 * invalidate_dcache takes a virtual or physical address and count to
169 * invalidate and (can be called for multiple virtual pages).
170 *
171 * it invalidates the data cache for the address range in question
172 */
173
174 ENTRY(invalidate_dcache, TAG_NO_FRAME_USED)
175
176 /* optionally switch off data translations */
177
178 cmpwi r5, 0
179 mfmsr r6
180 beq+ 0f
181 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
182 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
183 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
184 mtmsr r7
185 isync
186 0:
187
188 /* Check to see if the address is aligned. */
189 add r8, r3,r4
190 andi. r8,r8,(CACHE_LINE_SIZE-1)
191 beq- .L_invalidate_dcache_check
192 addi r4,r4,CACHE_LINE_SIZE
193 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
194 andc r4,r4,r7
195 andc r3,r3,r7
196
197 .L_invalidate_dcache_check:
198 cmpwi r4, CACHE_LINE_SIZE
199 ble .L_invalidate_dcache_one_line
200
201 /* Make ctr hold count of how many times we should loop */
202 addi r8, r4, (CACHE_LINE_SIZE-1)
203 srwi r8, r8, CACHE_LINE_POW2
204 mtctr r8
205
206 .L_invalidate_dcache_invalidate_loop:
207 subic r4, r4, CACHE_LINE_SIZE
208 dcbi r3, r4
209 bdnz .L_invalidate_dcache_invalidate_loop
210
211 .L_invalidate_dcache_done:
212 /* Sync restore msr if it was modified */
213 cmpwi r5, 0
214 sync /* make sure invalidates have completed */
215 beq+ 0f
216 mtmsr r6 /* Restore original translations */
217 isync /* Ensure data translations are on */
218 0:
219 blr
220
221 .L_invalidate_dcache_one_line:
222 xor r4,r4,r4
223 dcbi 0,r3
224 b .L_invalidate_dcache_done
225
226 /*
227 * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys);
228 *
229 * invalidate_icache takes a virtual or physical address and
230 * count to invalidate, (can be called for multiple virtual pages).
231 *
232 * it invalidates the instruction cache for the address range in question.
233 */
234
235 ENTRY(invalidate_icache, TAG_NO_FRAME_USED)
236
237 /* optionally switch off data translations */
238 cmpwi r5, 0
239 mfmsr r6
240 beq+ 0f
241 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
242 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
243 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
244 mtmsr r7
245 isync
246 0:
247
248 /* Check to see if the address is aligned. */
249 add r8, r3,r4
250 andi. r8,r8,(CACHE_LINE_SIZE-1)
251 beq- .L_invalidate_icache_check
252 addi r4,r4,CACHE_LINE_SIZE
253 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
254 andc r4,r4,r7
255 andc r3,r3,r7
256
257 .L_invalidate_icache_check:
258 cmpwi r4, CACHE_LINE_SIZE
259 ble .L_invalidate_icache_one_line
260
261 /* Make ctr hold count of how many times we should loop */
262 addi r8, r4, (CACHE_LINE_SIZE-1)
263 srwi r8, r8, CACHE_LINE_POW2
264 mtctr r8
265
266 .L_invalidate_icache_invalidate_loop:
267 subic r4, r4, CACHE_LINE_SIZE
268 icbi r3, r4
269 bdnz .L_invalidate_icache_invalidate_loop
270
271 .L_invalidate_icache_done:
272 sync /* make sure invalidates have completed */
273 mtmsr r6 /* Restore original translations */
274 isync /* Ensure data translations are on */
275 blr
276
277 .L_invalidate_icache_one_line:
278 xor r4,r4,r4
279 icbi 0,r3
280 b .L_invalidate_icache_done