]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm/proc_reg.h> | |
31 | #include <arm/pmap.h> | |
32 | #include <sys/errno.h> | |
33 | #include "assym.s" | |
34 | ||
35 | ||
36 | /* | |
37 | * void invalidate_mmu_cache(void) | |
38 | * | |
39 | * Invalidate d-cache and i-cache | |
40 | */ | |
41 | .text | |
42 | .align 2 | |
43 | .globl EXT(invalidate_mmu_cache) | |
44 | LEXT(invalidate_mmu_cache) | |
45 | mov r0, #0 | |
46 | mcr p15, 0, r0, c7, c7, 0 // Invalidate caches | |
47 | bx lr | |
48 | ||
49 | /* | |
50 | * void invalidate_mmu_dcache(void) | |
51 | * | |
52 | * Invalidate d-cache | |
53 | */ | |
54 | .text | |
55 | .align 2 | |
56 | .globl EXT(invalidate_mmu_dcache) | |
57 | LEXT(invalidate_mmu_dcache) | |
58 | mov r0, #0 | |
59 | mcr p15, 0, r0, c7, c6, 0 // Invalidate dcache | |
60 | bx lr | |
61 | ||
62 | /* | |
63 | * void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length) | |
64 | * | |
65 | * Invalidate d-cache region | |
66 | */ | |
67 | .text | |
68 | .align 2 | |
69 | .globl EXT(invalidate_mmu_dcache_region) | |
70 | LEXT(invalidate_mmu_dcache_region) | |
71 | and r2, r0, #((1<<MMU_CLINE)-1) | |
72 | bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned | |
73 | add r1, r1, r2 | |
74 | sub r1, r1, #1 | |
75 | mov r1, r1, LSR #MMU_CLINE // Set cache line counter | |
76 | fmdr_loop: | |
77 | mcr p15, 0, r0, c7, c14, 1 // Invalidate dcache line | |
78 | add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr | |
79 | subs r1, r1, #1 // Decrementer cache line counter | |
80 | bpl fmdr_loop // Loop in counter not null | |
81 | isb | |
82 | bx lr | |
83 | ||
84 | /* | |
85 | * void InvalidatePoU_Icache(void) | |
86 | * | |
87 | * Invalidate i-cache | |
88 | */ | |
89 | .text | |
90 | .align 2 | |
91 | .globl EXT(InvalidatePoU_Icache) | |
92 | .globl EXT(invalidate_mmu_icache) | |
93 | LEXT(InvalidatePoU_Icache) | |
94 | LEXT(invalidate_mmu_icache) | |
95 | mov r0, #0 | |
96 | mcr p15, 0, r0, c7, c5, 0 // Invalidate icache | |
97 | bx lr | |
98 | ||
99 | /* | |
100 | * void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length) | |
101 | * | |
102 | * Invalidate icache region | |
103 | */ | |
104 | .text | |
105 | .align 2 | |
106 | .globl EXT(InvalidatePoU_IcacheRegion) | |
107 | LEXT(InvalidatePoU_IcacheRegion) | |
108 | and r2, r0, #((1<<MMU_I_CLINE)-1) | |
109 | bic r0, r0, #((1<<MMU_I_CLINE)-1) // Cached aligned | |
110 | add r1, r1, r2 | |
111 | sub r1, r1, #1 | |
112 | mov r1, r1, LSR #MMU_I_CLINE // Set cache line counter | |
113 | fmir_loop: | |
114 | mcr p15, 0, r0, c7, c5, 1 // Invalidate icache line | |
115 | add r0, r0, #1<<MMU_I_CLINE // Get next cache aligned addr | |
116 | subs r1, r1, #1 // Decrementer cache line counter | |
117 | bpl fmir_loop // Loop in counter not null | |
118 | bx lr | |
119 | ||
120 | /* | |
121 | * void CleanPoC_Dcache(void) | |
122 | * | |
123 | * Clean all d-caches | |
124 | */ | |
125 | .text | |
126 | .align 2 | |
127 | .globl EXT(CleanPoC_Dcache) | |
128 | .globl EXT(clean_mmu_dcache) | |
129 | LEXT(CleanPoC_Dcache) | |
130 | LEXT(clean_mmu_dcache) | |
131 | #if !defined(__ARM_L1_WT_CACHE__) | |
132 | mov r0, #0 | |
133 | clean_dcacheway: | |
134 | clean_dcacheline: | |
135 | mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set | |
136 | add r0, r0, #1 << MMU_I7SET // increment set index | |
137 | tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow | |
138 | beq clean_dcacheline | |
139 | bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow | |
140 | adds r0, r0, #1 << MMU_I7WAY // increment way | |
141 | bcc clean_dcacheway // loop | |
142 | #endif | |
143 | #if __ARM_L2CACHE__ | |
144 | dsb | |
145 | mov r0, #2 | |
146 | clean_l2dcacheway: | |
147 | clean_l2dcacheline: | |
148 | mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set | |
149 | add r0, r0, #1 << L2_I7SET // increment set index | |
150 | tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow | |
151 | beq clean_l2dcacheline | |
152 | bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow | |
153 | adds r0, r0, #1 << L2_I7WAY // increment way | |
154 | bcc clean_l2dcacheway // loop | |
155 | #endif | |
156 | dsb | |
157 | bx lr | |
158 | ||
159 | /* | |
160 | * void CleanPoU_Dcache(void) | |
161 | * | |
162 | * Clean D-cache to Point of Unification | |
163 | */ | |
164 | .text | |
165 | .align 2 | |
166 | .globl EXT(CleanPoU_Dcache) | |
167 | LEXT(CleanPoU_Dcache) | |
168 | #if !defined(__ARM_PoU_WT_CACHE__) | |
169 | mov r0, #0 | |
170 | clean_dcacheway_idle: | |
171 | clean_dcacheline_idle: | |
172 | mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set | |
173 | add r0, r0, #1 << MMU_I7SET // increment set index | |
174 | tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow | |
175 | beq clean_dcacheline_idle | |
176 | bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow | |
177 | adds r0, r0, #1 << MMU_I7WAY // increment way | |
178 | bcc clean_dcacheway_idle // loop | |
179 | #endif | |
180 | dsb | |
181 | bx lr | |
182 | ||
183 | /* | |
184 | * void CleanPoU_DcacheRegion(vm_offset_t va, unsigned length) | |
185 | * | |
186 | * Clean d-cache region to Point of Unification | |
187 | */ | |
188 | .text | |
189 | .align 2 | |
190 | .globl EXT(CleanPoU_DcacheRegion) | |
191 | LEXT(CleanPoU_DcacheRegion) | |
192 | #if !defined(__ARM_PoU_WT_CACHE__) | |
193 | ||
194 | and r2, r0, #((1<<MMU_CLINE)-1) | |
195 | bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned | |
196 | add r1, r1, r2 | |
197 | sub r1, r1, #1 | |
198 | mov r1, r1, LSR #MMU_CLINE // Set cache line counter | |
199 | cudr_loop: | |
200 | mcr p15, 0, r0, c7, c11, 1 // Clean dcache line to PoU | |
201 | add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr | |
202 | subs r1, r1, #1 // Decrementer cache line counter | |
203 | bpl cudr_loop // Loop in counter not null | |
204 | ||
205 | #endif | |
206 | dsb | |
207 | bx lr | |
208 | ||
209 | /* | |
210 | * void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length) | |
211 | * | |
212 | * Clean d-cache region to Point of Coherency | |
213 | */ | |
214 | .text | |
215 | .align 2 | |
216 | .globl EXT(CleanPoC_DcacheRegion) | |
217 | .globl EXT(CleanPoC_DcacheRegion_Force) | |
218 | LEXT(CleanPoC_DcacheRegion) | |
219 | LEXT(CleanPoC_DcacheRegion_Force) | |
220 | and r2, r0, #((1<<MMU_CLINE)-1) | |
221 | bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned | |
222 | add r1, r1, r2 | |
223 | sub r1, r1, #1 | |
224 | mov r1, r1, LSR #MMU_CLINE // Set cache line counter | |
225 | ccdr_loop: | |
226 | mcr p15, 0, r0, c7, c10, 1 // Clean dcache line to PoC | |
227 | add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr | |
228 | subs r1, r1, #1 // Decrementer cache line counter | |
229 | bpl ccdr_loop // Loop in counter not null | |
230 | dsb | |
231 | bx lr | |
232 | ||
233 | /* | |
234 | * void FlushPoC_Dcache(void) | |
235 | * | |
236 | * Clean and Invalidate dcaches to Point of Coherency | |
237 | */ | |
238 | .text | |
239 | .align 2 | |
240 | .globl EXT(FlushPoC_Dcache) | |
241 | LEXT(FlushPoC_Dcache) | |
242 | mov r0, #0 | |
243 | cleanflush_dcacheway: | |
244 | cleanflush_dcacheline: | |
245 | mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set | |
246 | add r0, r0, #1 << MMU_I7SET // increment set index | |
247 | tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow | |
248 | beq cleanflush_dcacheline | |
249 | bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow | |
250 | adds r0, r0, #1 << MMU_I7WAY // increment way | |
251 | bcc cleanflush_dcacheway // loop | |
252 | #if __ARM_L2CACHE__ | |
253 | dsb | |
254 | mov r0, #2 | |
255 | cleanflush_l2dcacheway: | |
256 | cleanflush_l2dcacheline: | |
257 | mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set | |
258 | add r0, r0, #1 << L2_I7SET // increment set index | |
259 | tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow | |
260 | beq cleanflush_l2dcacheline | |
261 | bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow | |
262 | adds r0, r0, #1 << L2_I7WAY // increment way | |
263 | bcc cleanflush_l2dcacheway // loop | |
264 | #endif | |
265 | dsb | |
266 | bx lr | |
267 | ||
268 | /* | |
269 | * void FlushPoU_Dcache(void) | |
270 | * | |
271 | * Flush D-cache to Point of Unification | |
272 | */ | |
273 | .text | |
274 | .align 2 | |
275 | .globl EXT(FlushPoU_Dcache) | |
276 | LEXT(FlushPoU_Dcache) | |
277 | mov r0, #0 | |
278 | fpud_way: | |
279 | fpud_line: | |
280 | mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set | |
281 | add r0, r0, #1 << MMU_I7SET // increment set index | |
282 | tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow | |
283 | beq fpud_line | |
284 | bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow | |
285 | adds r0, r0, #1 << MMU_I7WAY // increment way | |
286 | bcc fpud_way // loop | |
287 | dsb | |
288 | bx lr | |
289 | ||
290 | /* | |
291 | * void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length) | |
292 | * | |
293 | * Clean and Invalidate d-cache region to Point of Coherency | |
294 | */ | |
295 | .text | |
296 | .align 2 | |
297 | .globl EXT(FlushPoC_DcacheRegion) | |
298 | LEXT(FlushPoC_DcacheRegion) | |
299 | and r2, r0, #((1<<MMU_CLINE)-1) | |
300 | bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned | |
301 | add r1, r1, r2 | |
302 | sub r1, r1, #1 | |
303 | mov r1, r1, LSR #MMU_CLINE // Set cache line counter | |
304 | cfmdr_loop: | |
305 | mcr p15, 0, r0, c7, c14, 1 // Clean & invalidate dcache line | |
306 | add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr | |
307 | subs r1, r1, #1 // Decrementer cache line counter | |
308 | bpl cfmdr_loop // Loop in counter not null | |
309 | dsb | |
310 | bx lr | |
311 | ||
312 | /* | |
313 | * void flush_dcache64(addr64_t addr, unsigned length, boolean_t phys) | |
314 | */ | |
315 | .text | |
316 | .align 2 | |
317 | .globl EXT(flush_dcache64) | |
318 | LEXT(flush_dcache64) | |
319 | mov r1, r2 | |
320 | mov r2, r3 | |
321 | LOAD_ADDR_PC(flush_dcache) | |
322 | ||
323 | /* | |
324 | * void clean_dcache64(addr64_t addr, unsigned length, boolean_t phys) | |
325 | */ | |
326 | .text | |
327 | .align 2 | |
328 | .globl EXT(clean_dcache64) | |
329 | LEXT(clean_dcache64) | |
330 | mov r1, r2 | |
331 | mov r2, r3 | |
332 | LOAD_ADDR_PC(clean_dcache) | |
333 | ||
334 | /* | |
335 | * void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys) | |
336 | * void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys) | |
337 | */ | |
338 | .text | |
339 | .align 2 | |
340 | .globl EXT(invalidate_icache64) | |
341 | .globl EXT(invalidate_icache) | |
342 | LEXT(invalidate_icache64) | |
343 | mov r1, r2 | |
344 | mov r2, r3 | |
345 | LEXT(invalidate_icache) | |
346 | cmp r2, #0 // Is it physical? | |
347 | COND_EXTERN_BEQ(InvalidatePoU_IcacheRegion) | |
348 | LOAD_ADDR(r2, gPhysBase) | |
349 | ldr r2, [r2] | |
350 | sub r0, r0, r2 | |
351 | LOAD_ADDR(r2, gVirtBase) | |
352 | ldr r2, [r2] | |
353 | add r0, r0, r2 | |
354 | b EXT(InvalidatePoU_IcacheRegion) | |
355 | ||
356 | ||
357 | #include "globals_asm.h" | |
358 | ||
359 | LOAD_ADDR_GEN_DEF(flush_dcache) | |
360 | LOAD_ADDR_GEN_DEF(clean_dcache) | |
361 | ||
362 | /* vim: set ts=4: */ |