]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/caches_asm.s
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm / caches_asm.s
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm/proc_reg.h>
31 #include <arm/pmap.h>
32 #include <sys/errno.h>
33 #include "assym.s"
34
35
36 /*
37 * void invalidate_mmu_cache(void)
38 *
39 * Invalidate d-cache and i-cache
40 */
41 .text
42 .align 2
43 .globl EXT(invalidate_mmu_cache)
44 LEXT(invalidate_mmu_cache)
45 mov r0, #0
46 dsb
47 mcr p15, 0, r0, c7, c7, 0 // Invalidate caches
48 dsb
49 isb
50 bx lr
51
52 /*
53 * void invalidate_mmu_dcache(void)
54 *
55 * Invalidate d-cache
56 */
57 .text
58 .align 2
59 .globl EXT(invalidate_mmu_dcache)
60 LEXT(invalidate_mmu_dcache)
61 mov r0, #0
62 dsb
63 mcr p15, 0, r0, c7, c6, 0 // Invalidate dcache
64 dsb
65 bx lr
66
67 /*
68 * void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length)
69 *
70 * Invalidate d-cache region
71 */
72 .text
73 .align 2
74 .globl EXT(invalidate_mmu_dcache_region)
75 LEXT(invalidate_mmu_dcache_region)
76 and r2, r0, #((1<<MMU_CLINE)-1)
77 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
78 add r1, r1, r2
79 sub r1, r1, #1
80 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
81 dsb
82 fmdr_loop:
83 mcr p15, 0, r0, c7, c14, 1 // Invalidate dcache line
84 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
85 subs r1, r1, #1 // Decrementer cache line counter
86 bpl fmdr_loop // Loop in counter not null
87 dsb
88 bx lr
89
90 /*
91 * void InvalidatePoU_Icache(void)
92 *
93 * Invalidate i-cache
94 */
95 .text
96 .align 2
97 .globl EXT(InvalidatePoU_Icache)
98 .globl EXT(invalidate_mmu_icache)
99 LEXT(InvalidatePoU_Icache)
100 LEXT(invalidate_mmu_icache)
101 mov r0, #0
102 dsb
103 mcr p15, 0, r0, c7, c5, 0 // Invalidate icache
104 dsb
105 isb
106 bx lr
107
108 /*
109 * void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length)
110 *
111 * Invalidate icache region
112 */
113 .text
114 .align 2
115 .globl EXT(InvalidatePoU_IcacheRegion)
116 LEXT(InvalidatePoU_IcacheRegion)
117 push {r7,lr}
118 mov r7, sp
119 bl EXT(CleanPoU_DcacheRegion)
120 and r2, r0, #((1<<MMU_I_CLINE)-1)
121 bic r0, r0, #((1<<MMU_I_CLINE)-1) // Cached aligned
122 add r1, r1, r2
123 sub r1, r1, #1
124 mov r1, r1, LSR #MMU_I_CLINE // Set cache line counter
125 fmir_loop:
126 mcr p15, 0, r0, c7, c5, 1 // Invalidate icache line
127 add r0, r0, #1<<MMU_I_CLINE // Get next cache aligned addr
128 subs r1, r1, #1 // Decrementer cache line counter
129 bpl fmir_loop // Loop in counter not null
130 dsb
131 isb
132 pop {r7,pc}
133
134 /*
135 * void CleanPoC_Dcache(void)
136 *
137 * Clean all d-caches
138 */
139 .text
140 .align 2
141 .globl EXT(CleanPoC_Dcache)
142 .globl EXT(clean_mmu_dcache)
143 LEXT(CleanPoC_Dcache)
144 LEXT(clean_mmu_dcache)
145 #if !defined(__ARM_L1_WT_CACHE__)
146 mov r0, #0
147 dsb
148 clean_dcacheway:
149 clean_dcacheline:
150 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
151 add r0, r0, #1 << MMU_I7SET // increment set index
152 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
153 beq clean_dcacheline
154 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
155 adds r0, r0, #1 << MMU_I7WAY // increment way
156 bcc clean_dcacheway // loop
157 #endif
158 #if __ARM_L2CACHE__
159 dsb
160 mov r0, #2
161 clean_l2dcacheway:
162 clean_l2dcacheline:
163 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
164 add r0, r0, #1 << L2_I7SET // increment set index
165 tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow
166 beq clean_l2dcacheline
167 bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow
168 adds r0, r0, #1 << L2_I7WAY // increment way
169 bcc clean_l2dcacheway // loop
170 #endif
171 dsb
172 bx lr
173
174 /*
175 * void CleanPoU_Dcache(void)
176 *
177 * Clean D-cache to Point of Unification
178 */
179 .text
180 .align 2
181 .globl EXT(CleanPoU_Dcache)
182 LEXT(CleanPoU_Dcache)
183 #if !defined(__ARM_PoU_WT_CACHE__)
184 mov r0, #0
185 dsb
186 clean_dcacheway_idle:
187 clean_dcacheline_idle:
188 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
189 add r0, r0, #1 << MMU_I7SET // increment set index
190 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
191 beq clean_dcacheline_idle
192 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
193 adds r0, r0, #1 << MMU_I7WAY // increment way
194 bcc clean_dcacheway_idle // loop
195 #endif
196 dsb
197 bx lr
198
199 /*
200 * void CleanPoU_DcacheRegion(vm_offset_t va, unsigned length)
201 *
202 * Clean d-cache region to Point of Unification
203 */
204 .text
205 .align 2
206 .globl EXT(CleanPoU_DcacheRegion)
207 LEXT(CleanPoU_DcacheRegion)
208 #if !defined(__ARM_PoU_WT_CACHE__)
209
210 and r2, r0, #((1<<MMU_CLINE)-1)
211 bic r3, r0, #((1<<MMU_CLINE)-1) // Cached aligned
212 add r12, r1, r2
213 sub r12, r12, #1
214 mov r12, r12, LSR #MMU_CLINE // Set cache line counter
215 dsb
216 cudr_loop:
217 mcr p15, 0, r3, c7, c11, 1 // Clean dcache line to PoU
218 add r3, r3, #1<<MMU_CLINE // Get next cache aligned addr
219 subs r12, r12, #1 // Decrementer cache line counter
220 bpl cudr_loop // Loop in counter not null
221
222 #endif
223 dsb
224 bx lr
225
226 /*
227 * void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length)
228 *
229 * Clean d-cache region to Point of Coherency
230 */
231 .text
232 .align 2
233 .globl EXT(CleanPoC_DcacheRegion)
234 .globl EXT(CleanPoC_DcacheRegion_Force)
235 LEXT(CleanPoC_DcacheRegion)
236 LEXT(CleanPoC_DcacheRegion_Force)
237 and r2, r0, #((1<<MMU_CLINE)-1)
238 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
239 add r1, r1, r2
240 sub r1, r1, #1
241 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
242 ccdr_loop:
243 mcr p15, 0, r0, c7, c10, 1 // Clean dcache line to PoC
244 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
245 subs r1, r1, #1 // Decrementer cache line counter
246 bpl ccdr_loop // Loop in counter not null
247 dsb
248 bx lr
249
250 /*
251 * void FlushPoC_Dcache(void)
252 *
253 * Clean and Invalidate dcaches to Point of Coherency
254 */
255 .text
256 .align 2
257 .globl EXT(FlushPoC_Dcache)
258 LEXT(FlushPoC_Dcache)
259 mov r0, #0
260 dsb
261 cleanflush_dcacheway:
262 cleanflush_dcacheline:
263 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
264 add r0, r0, #1 << MMU_I7SET // increment set index
265 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
266 beq cleanflush_dcacheline
267 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
268 adds r0, r0, #1 << MMU_I7WAY // increment way
269 bcc cleanflush_dcacheway // loop
270 #if __ARM_L2CACHE__
271 dsb
272 mov r0, #2
273 cleanflush_l2dcacheway:
274 cleanflush_l2dcacheline:
275 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
276 add r0, r0, #1 << L2_I7SET // increment set index
277 tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow
278 beq cleanflush_l2dcacheline
279 bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow
280 adds r0, r0, #1 << L2_I7WAY // increment way
281 bcc cleanflush_l2dcacheway // loop
282 #endif
283 dsb
284 bx lr
285
286 /*
287 * void FlushPoU_Dcache(void)
288 *
289 * Flush D-cache to Point of Unification
290 */
291 .text
292 .align 2
293 .globl EXT(FlushPoU_Dcache)
294 LEXT(FlushPoU_Dcache)
295 mov r0, #0
296 dsb
297 fpud_way:
298 fpud_line:
299 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
300 add r0, r0, #1 << MMU_I7SET // increment set index
301 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
302 beq fpud_line
303 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
304 adds r0, r0, #1 << MMU_I7WAY // increment way
305 bcc fpud_way // loop
306 dsb
307 bx lr
308
309 /*
310 * void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length)
311 *
312 * Clean and Invalidate d-cache region to Point of Coherency
313 */
314 .text
315 .align 2
316 .globl EXT(FlushPoC_DcacheRegion)
317 LEXT(FlushPoC_DcacheRegion)
318 and r2, r0, #((1<<MMU_CLINE)-1)
319 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
320 add r1, r1, r2
321 sub r1, r1, #1
322 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
323 dsb
324 cfmdr_loop:
325 mcr p15, 0, r0, c7, c14, 1 // Clean & invalidate dcache line
326 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
327 subs r1, r1, #1 // Decrementer cache line counter
328 bpl cfmdr_loop // Loop in counter not null
329 dsb
330 bx lr
331
332 /*
333 * void flush_dcache64(addr64_t addr, unsigned length, boolean_t phys)
334 */
335 .text
336 .align 2
337 .globl EXT(flush_dcache64)
338 LEXT(flush_dcache64)
339 mov r1, r2
340 mov r2, r3
341 LOAD_ADDR_PC(flush_dcache)
342
343 /*
344 * void clean_dcache64(addr64_t addr, unsigned length, boolean_t phys)
345 */
346 .text
347 .align 2
348 .globl EXT(clean_dcache64)
349 LEXT(clean_dcache64)
350 mov r1, r2
351 mov r2, r3
352 LOAD_ADDR_PC(clean_dcache)
353
354 /*
355 * void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys)
356 * void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys)
357 */
358 .text
359 .align 2
360 .globl EXT(invalidate_icache64)
361 .globl EXT(invalidate_icache)
362 LEXT(invalidate_icache64)
363 mov r1, r2
364 mov r2, r3
365 LEXT(invalidate_icache)
366 cmp r2, #0 // Is it physical?
367 COND_EXTERN_BEQ(InvalidatePoU_IcacheRegion)
368 LOAD_ADDR(r2, gPhysBase)
369 ldr r2, [r2]
370 sub r0, r0, r2
371 LOAD_ADDR(r2, gVirtBase)
372 ldr r2, [r2]
373 add r0, r0, r2
374 b EXT(InvalidatePoU_IcacheRegion)
375
376
377 #include "globals_asm.h"
378
379 LOAD_ADDR_GEN_DEF(flush_dcache)
380 LOAD_ADDR_GEN_DEF(clean_dcache)
381
382 /* vim: set ts=4: */