]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/cache.s
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / ppc / cache.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 #include <cpus.h>
27
28 #include <ppc/asm.h>
29 #include <ppc/proc_reg.h>
30 #include <cpus.h>
31 #include <assym.s>
32 #include <mach_debug.h>
33 #include <mach/ppc/vm_param.h>
34
35 /*
36 * extern void sync_cache(vm_offset_t pa, unsigned count);
37 *
38 * sync_cache takes a physical address and count to sync, thus
39 * must not be called for multiple virtual pages.
40 *
41 * it writes out the data cache and invalidates the instruction
42 * cache for the address range in question
43 */
44
45 ENTRY(sync_cache, TAG_NO_FRAME_USED)
46
47 /* Switch off data translations */
48 mfmsr r6
49 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
50 mtmsr r7
51 isync
52
53 /* Check to see if the address is aligned. */
54 add r8, r3,r4
55 andi. r8,r8,(CACHE_LINE_SIZE-1)
56 beq- .L_sync_check
57 addi r4,r4,CACHE_LINE_SIZE
58 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
59 andc r4,r4,r7
60 andc r3,r3,r7
61
62 .L_sync_check:
63 cmpwi r4, CACHE_LINE_SIZE
64 ble .L_sync_one_line
65
66 /* Make ctr hold count of how many times we should loop */
67 addi r8, r4, (CACHE_LINE_SIZE-1)
68 srwi r8, r8, CACHE_LINE_POW2
69 mtctr r8
70
71 /* loop to flush the data cache */
72 .L_sync_data_loop:
73 subic r4, r4, CACHE_LINE_SIZE
74 dcbst r3, r4
75 bdnz .L_sync_data_loop
76
77 sync
78 mtctr r8
79
80 /* loop to invalidate the instruction cache */
81 .L_sync_inval_loop:
82 icbi r3, r4
83 addic r4, r4, CACHE_LINE_SIZE
84 bdnz .L_sync_inval_loop
85
86 .L_sync_cache_done:
87 sync /* Finish physical writes */
88 mtmsr r6 /* Restore original translations */
89 isync /* Ensure data translations are on */
90 blr
91
92 .L_sync_one_line:
93 dcbst 0,r3
94 sync
95 icbi 0,r3
96 b .L_sync_cache_done
97
98 /*
99 * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys);
100 *
101 * flush_dcache takes a virtual or physical address and count to flush
102 * and (can be called for multiple virtual pages).
103 *
104 * it flushes the data cache
105 * cache for the address range in question
106 *
107 * if 'phys' is non-zero then physical addresses will be used
108 */
109
110 ENTRY(flush_dcache, TAG_NO_FRAME_USED)
111
112 /* optionally switch off data translations */
113
114 cmpwi r5, 0
115 mfmsr r6
116 beq+ 0f
117 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
118 mtmsr r7
119 isync
120 0:
121
122 /* Check to see if the address is aligned. */
123 add r8, r3,r4
124 andi. r8,r8,(CACHE_LINE_SIZE-1)
125 beq- .L_flush_dcache_check
126 addi r4,r4,CACHE_LINE_SIZE
127 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
128 andc r4,r4,r7
129 andc r3,r3,r7
130
131 .L_flush_dcache_check:
132 cmpwi r4, CACHE_LINE_SIZE
133 ble .L_flush_dcache_one_line
134
135 /* Make ctr hold count of how many times we should loop */
136 addi r8, r4, (CACHE_LINE_SIZE-1)
137 srwi r8, r8, CACHE_LINE_POW2
138 mtctr r8
139
140 .L_flush_dcache_flush_loop:
141 subic r4, r4, CACHE_LINE_SIZE
142 dcbf r3, r4
143 bdnz .L_flush_dcache_flush_loop
144
145 .L_flush_dcache_done:
146 /* Sync restore msr if it was modified */
147 cmpwi r5, 0
148 sync /* make sure invalidates have completed */
149 beq+ 0f
150 mtmsr r6 /* Restore original translations */
151 isync /* Ensure data translations are on */
152 0:
153 blr
154
155 .L_flush_dcache_one_line:
156 xor r4,r4,r4
157 dcbf 0,r3
158 b .L_flush_dcache_done
159
160
161 /*
162 * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys);
163 *
164 * invalidate_dcache takes a virtual or physical address and count to
165 * invalidate and (can be called for multiple virtual pages).
166 *
167 * it invalidates the data cache for the address range in question
168 */
169
170 ENTRY(invalidate_dcache, TAG_NO_FRAME_USED)
171
172 /* optionally switch off data translations */
173
174 cmpwi r5, 0
175 mfmsr r6
176 beq+ 0f
177 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
178 mtmsr r7
179 isync
180 0:
181
182 /* Check to see if the address is aligned. */
183 add r8, r3,r4
184 andi. r8,r8,(CACHE_LINE_SIZE-1)
185 beq- .L_invalidate_dcache_check
186 addi r4,r4,CACHE_LINE_SIZE
187 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
188 andc r4,r4,r7
189 andc r3,r3,r7
190
191 .L_invalidate_dcache_check:
192 cmpwi r4, CACHE_LINE_SIZE
193 ble .L_invalidate_dcache_one_line
194
195 /* Make ctr hold count of how many times we should loop */
196 addi r8, r4, (CACHE_LINE_SIZE-1)
197 srwi r8, r8, CACHE_LINE_POW2
198 mtctr r8
199
200 .L_invalidate_dcache_invalidate_loop:
201 subic r4, r4, CACHE_LINE_SIZE
202 dcbi r3, r4
203 dcbi r3, r4
204 bdnz .L_invalidate_dcache_invalidate_loop
205
206 .L_invalidate_dcache_done:
207 /* Sync restore msr if it was modified */
208 cmpwi r5, 0
209 sync /* make sure invalidates have completed */
210 beq+ 0f
211 mtmsr r6 /* Restore original translations */
212 isync /* Ensure data translations are on */
213 0:
214 blr
215
216 .L_invalidate_dcache_one_line:
217 xor r4,r4,r4
218 dcbi 0,r3
219 dcbi 0,r3
220 b .L_invalidate_dcache_done
221
222 /*
223 * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys);
224 *
225 * invalidate_icache takes a virtual or physical address and
226 * count to invalidate, (can be called for multiple virtual pages).
227 *
228 * it invalidates the instruction cache for the address range in question.
229 */
230
231 ENTRY(invalidate_icache, TAG_NO_FRAME_USED)
232
233 /* optionally switch off data translations */
234 cmpwi r5, 0
235 mfmsr r6
236 beq+ 0f
237 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
238 mtmsr r7
239 isync
240 0:
241
242 /* Check to see if the address is aligned. */
243 add r8, r3,r4
244 andi. r8,r8,(CACHE_LINE_SIZE-1)
245 beq- .L_invalidate_icache_check
246 addi r4,r4,CACHE_LINE_SIZE
247 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
248 andc r4,r4,r7
249 andc r3,r3,r7
250
251 .L_invalidate_icache_check:
252 cmpwi r4, CACHE_LINE_SIZE
253 ble .L_invalidate_icache_one_line
254
255 /* Make ctr hold count of how many times we should loop */
256 addi r8, r4, (CACHE_LINE_SIZE-1)
257 srwi r8, r8, CACHE_LINE_POW2
258 mtctr r8
259
260 .L_invalidate_icache_invalidate_loop:
261 subic r4, r4, CACHE_LINE_SIZE
262 icbi r3, r4
263 icbi r3, r4
264 bdnz .L_invalidate_icache_invalidate_loop
265
266 .L_invalidate_icache_done:
267 /* Sync restore msr if it was modified */
268 cmpwi r5, 0
269 sync /* make sure invalidates have completed */
270 beq+ 0f
271 mtmsr r6 /* Restore original translations */
272 isync /* Ensure data translations are on */
273 0:
274 blr
275
276 .L_invalidate_icache_one_line:
277 xor r4,r4,r4
278 icbi 0,r3
279 icbi 0,r3
280 b .L_invalidate_icache_done