]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/caches_asm.s
5556a00ae0c57d9659ab12d68c41b425a0b9770a
[apple/xnu.git] / osfmk / arm / caches_asm.s
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm/proc_reg.h>
31 #include <arm/pmap.h>
32 #include <sys/errno.h>
33 #include "assym.s"
34 #include "caches_macros.s"
35
36
37 /*
38 * void invalidate_mmu_cache(void)
39 *
40 * Invalidate d-cache and i-cache
41 */
42 .text
43 .align 2
44 .globl EXT(invalidate_mmu_cache)
45 LEXT(invalidate_mmu_cache)
46 mov r0, #0
47 dsb
48 mcr p15, 0, r0, c7, c7, 0 // Invalidate caches
49 dsb
50 isb
51 bx lr
52
53 /*
54 * void invalidate_mmu_dcache(void)
55 *
56 * Invalidate d-cache
57 */
58 .text
59 .align 2
60 .globl EXT(invalidate_mmu_dcache)
61 LEXT(invalidate_mmu_dcache)
62 mov r0, #0
63 dsb
64 mcr p15, 0, r0, c7, c6, 0 // Invalidate dcache
65 dsb
66 bx lr
67
68 /*
69 * void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length)
70 *
71 * Invalidate d-cache region
72 */
73 .text
74 .align 2
75 .globl EXT(invalidate_mmu_dcache_region)
76 LEXT(invalidate_mmu_dcache_region)
77 and r2, r0, #((1<<MMU_CLINE)-1)
78 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
79 add r1, r1, r2
80 sub r1, r1, #1
81 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
82 dsb
83 fmdr_loop:
84 mcr p15, 0, r0, c7, c14, 1 // Invalidate dcache line
85 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
86 subs r1, r1, #1 // Decrementer cache line counter
87 bpl fmdr_loop // Loop in counter not null
88 dsb
89 bx lr
90
91 /*
92 * void InvalidatePoU_Icache(void)
93 *
94 * Invalidate i-cache
95 */
96 .text
97 .align 2
98 .globl EXT(InvalidatePoU_Icache)
99 .globl EXT(invalidate_mmu_icache)
100 LEXT(InvalidatePoU_Icache)
101 LEXT(invalidate_mmu_icache)
102 mov r0, #0
103 dsb
104 mcr p15, 0, r0, c7, c5, 0 // Invalidate icache
105 dsb
106 isb
107 bx lr
108
109 /*
110 * void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length)
111 *
112 * Invalidate icache region
113 */
114 .text
115 .align 2
116 .globl EXT(InvalidatePoU_IcacheRegion)
117 LEXT(InvalidatePoU_IcacheRegion)
118 push {r7,lr}
119 mov r7, sp
120 bl EXT(CleanPoU_DcacheRegion)
121 and r2, r0, #((1<<MMU_I_CLINE)-1)
122 bic r0, r0, #((1<<MMU_I_CLINE)-1) // Cached aligned
123 add r1, r1, r2
124 sub r1, r1, #1
125 mov r1, r1, LSR #MMU_I_CLINE // Set cache line counter
126 fmir_loop:
127 mcr p15, 0, r0, c7, c5, 1 // Invalidate icache line
128 add r0, r0, #1<<MMU_I_CLINE // Get next cache aligned addr
129 subs r1, r1, #1 // Decrementer cache line counter
130 bpl fmir_loop // Loop in counter not null
131 dsb
132 isb
133 pop {r7,pc}
134
135 /*
136 * void CleanPoC_Dcache(void)
137 *
138 * Clean all d-caches
139 */
140 .text
141 .align 2
142 .globl EXT(CleanPoC_Dcache)
143 .globl EXT(clean_mmu_dcache)
144 LEXT(CleanPoC_Dcache)
145 LEXT(clean_mmu_dcache)
146 #if !defined(__ARM_L1_WT_CACHE__)
147 mov r0, #0
148 GET_CACHE_CONFIG r0, r1, r2, r3
149 mov r0, #0
150 dsb
151 clean_dcacheway:
152 clean_dcacheline:
153 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
154 add r0, r0, r1 // increment set index
155 tst r0, r2 // look for overflow
156 beq clean_dcacheline
157 bic r0, r0, r2 // clear set overflow
158 adds r0, r0, r3 // increment way
159 bcc clean_dcacheway // loop
160 #endif
161 HAS_L2_CACHE r0
162 cmp r0, #0
163 beq clean_skipl2dcache
164 mov r0, #1
165 GET_CACHE_CONFIG r0, r1, r2, r3
166 dsb
167 mov r0, #2
168 clean_l2dcacheway:
169 clean_l2dcacheline:
170 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
171 add r0, r0, r1 // increment set index
172 tst r0, r2 // look for overflow
173 beq clean_l2dcacheline
174 bic r0, r0, r2 // clear set overflow
175 adds r0, r0, r3 // increment way
176 bcc clean_l2dcacheway // loop
177 clean_skipl2dcache:
178 dsb
179 bx lr
180
181 /*
182 * void CleanPoU_Dcache(void)
183 *
184 * Clean D-cache to Point of Unification
185 */
186 .text
187 .align 2
188 .globl EXT(CleanPoU_Dcache)
189 LEXT(CleanPoU_Dcache)
190 #if !defined(__ARM_PoU_WT_CACHE__)
191 mov r0, #0
192 GET_CACHE_CONFIG r0, r1, r2, r3
193 mov r0, #0
194 dsb
195 clean_dcacheway_idle:
196 clean_dcacheline_idle:
197 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
198 add r0, r0, r1 // increment set index
199 tst r0, r2 // look for overflow
200 beq clean_dcacheline_idle
201 bic r0, r0, r2 // clear set overflow
202 adds r0, r0, r3 // increment way
203 bcc clean_dcacheway_idle // loop
204 #endif
205 dsb
206 bx lr
207
208 /*
209 * void CleanPoU_DcacheRegion(vm_offset_t va, unsigned length)
210 *
211 * Clean d-cache region to Point of Unification
212 */
213 .text
214 .align 2
215 .globl EXT(CleanPoU_DcacheRegion)
216 LEXT(CleanPoU_DcacheRegion)
217 #if !defined(__ARM_PoU_WT_CACHE__)
218
219 and r2, r0, #((1<<MMU_CLINE)-1)
220 bic r3, r0, #((1<<MMU_CLINE)-1) // Cached aligned
221 add r12, r1, r2
222 sub r12, r12, #1
223 mov r12, r12, LSR #MMU_CLINE // Set cache line counter
224 dsb
225 cudr_loop:
226 mcr p15, 0, r3, c7, c11, 1 // Clean dcache line to PoU
227 add r3, r3, #1<<MMU_CLINE // Get next cache aligned addr
228 subs r12, r12, #1 // Decrementer cache line counter
229 bpl cudr_loop // Loop in counter not null
230
231 #endif
232 dsb
233 bx lr
234
235 /*
236 * void CleanPoC_DcacheRegion(vm_offset_t va, size_t length)
237 *
238 * Clean d-cache region to Point of Coherency
239 */
240 .text
241 .align 2
242 .globl EXT(CleanPoC_DcacheRegion)
243 .globl EXT(CleanPoC_DcacheRegion_Force)
244 LEXT(CleanPoC_DcacheRegion)
245 LEXT(CleanPoC_DcacheRegion_Force)
246 and r2, r0, #((1<<MMU_CLINE)-1)
247 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
248 add r1, r1, r2
249 sub r1, r1, #1
250 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
251 ccdr_loop:
252 mcr p15, 0, r0, c7, c10, 1 // Clean dcache line to PoC
253 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
254 subs r1, r1, #1 // Decrementer cache line counter
255 bpl ccdr_loop // Loop in counter not null
256 dsb
257 bx lr
258
259 /*
260 * void FlushPoC_Dcache(void)
261 *
262 * Clean and Invalidate dcaches to Point of Coherency
263 */
264 .text
265 .align 2
266 .globl EXT(FlushPoC_Dcache)
267 LEXT(FlushPoC_Dcache)
268 mov r0, #0
269 GET_CACHE_CONFIG r0, r1, r2, r3
270 mov r0, #0
271 dsb
272 cleanflush_dcacheway:
273 cleanflush_dcacheline:
274 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
275 add r0, r0, r1 // increment set index
276 tst r0, r2 // look for overflow
277 beq cleanflush_dcacheline
278 bic r0, r0, r2 // clear set overflow
279 adds r0, r0, r3 // increment way
280 bcc cleanflush_dcacheway // loop
281 HAS_L2_CACHE r0
282 cmp r0, #0
283 beq cleanflush_skipl2dcache
284 mov r0, #1
285 GET_CACHE_CONFIG r0, r1, r2, r3
286 dsb
287 mov r0, #2
288 cleanflush_l2dcacheway:
289 cleanflush_l2dcacheline:
290 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
291 add r0, r0, r1 // increment set index
292 tst r0, r2 // look for overflow
293 beq cleanflush_l2dcacheline
294 bic r0, r0, r2 // clear set overflow
295 adds r0, r0, r3 // increment way
296 bcc cleanflush_l2dcacheway // loop
297 cleanflush_skipl2dcache:
298 dsb
299 bx lr
300
301 /*
302 * void FlushPoU_Dcache(void)
303 *
304 * Flush D-cache to Point of Unification
305 */
306 .text
307 .align 2
308 .globl EXT(FlushPoU_Dcache)
309 LEXT(FlushPoU_Dcache)
310 mov r0, #0
311 GET_CACHE_CONFIG r0, r1, r2, r3
312 mov r0, #0
313 dsb
314 fpud_way:
315 fpud_line:
316 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
317 add r0, r0, r1 // increment set index
318 tst r0, r2 // look for overflow
319 beq fpud_line
320 bic r0, r0, r2 // clear set overflow
321 adds r0, r0, r3 // increment way
322 bcc fpud_way // loop
323 dsb
324 bx lr
325
326 /*
327 * void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length)
328 *
329 * Clean and Invalidate d-cache region to Point of Coherency
330 */
331 .text
332 .align 2
333 .globl EXT(FlushPoC_DcacheRegion)
334 LEXT(FlushPoC_DcacheRegion)
335 and r2, r0, #((1<<MMU_CLINE)-1)
336 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
337 add r1, r1, r2
338 sub r1, r1, #1
339 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
340 dsb
341 cfmdr_loop:
342 mcr p15, 0, r0, c7, c14, 1 // Clean & invalidate dcache line
343 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
344 subs r1, r1, #1 // Decrementer cache line counter
345 bpl cfmdr_loop // Loop in counter not null
346 dsb
347 bx lr
348
349 /*
350 * void flush_dcache64(addr64_t addr, unsigned length, boolean_t phys)
351 */
352 .text
353 .align 2
354 .globl EXT(flush_dcache64)
355 LEXT(flush_dcache64)
356 mov r1, r2
357 mov r2, r3
358 LOAD_ADDR_PC(flush_dcache)
359
360 /*
361 * void clean_dcache64(addr64_t addr, unsigned length, boolean_t phys)
362 */
363 .text
364 .align 2
365 .globl EXT(clean_dcache64)
366 LEXT(clean_dcache64)
367 mov r1, r2
368 mov r2, r3
369 LOAD_ADDR_PC(clean_dcache)
370
371 /*
372 * void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys)
373 * void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys)
374 */
375 .text
376 .align 2
377 .globl EXT(invalidate_icache64)
378 .globl EXT(invalidate_icache)
379 LEXT(invalidate_icache64)
380 mov r1, r2
381 mov r2, r3
382 LEXT(invalidate_icache)
383 cmp r2, #0 // Is it physical?
384 COND_EXTERN_BEQ(InvalidatePoU_IcacheRegion)
385 LOAD_ADDR(r2, gPhysBase)
386 ldr r2, [r2]
387 sub r0, r0, r2
388 LOAD_ADDR(r2, gVirtBase)
389 ldr r2, [r2]
390 add r0, r0, r2
391 b EXT(InvalidatePoU_IcacheRegion)
392
393
394 #include "globals_asm.h"
395
396 LOAD_ADDR_GEN_DEF(flush_dcache)
397 LOAD_ADDR_GEN_DEF(clean_dcache)
398
399 /* vim: set ts=4: */