]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/caches_asm.s
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / caches_asm.s
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm/proc_reg.h>
31 #include <arm/pmap.h>
32 #include <sys/errno.h>
33 #include "assym.s"
34
35
36 /*
37 * void invalidate_mmu_cache(void)
38 *
39 * Invalidate d-cache and i-cache
40 */
41 .text
42 .align 2
43 .globl EXT(invalidate_mmu_cache)
44 LEXT(invalidate_mmu_cache)
45 mov r0, #0
46 dsb
47 mcr p15, 0, r0, c7, c7, 0 // Invalidate caches
48 dsb
49 isb
50 bx lr
51
52 /*
53 * void invalidate_mmu_dcache(void)
54 *
55 * Invalidate d-cache
56 */
57 .text
58 .align 2
59 .globl EXT(invalidate_mmu_dcache)
60 LEXT(invalidate_mmu_dcache)
61 mov r0, #0
62 dsb
63 mcr p15, 0, r0, c7, c6, 0 // Invalidate dcache
64 dsb
65 bx lr
66
67 /*
68 * void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length)
69 *
70 * Invalidate d-cache region
71 */
72 .text
73 .align 2
74 .globl EXT(invalidate_mmu_dcache_region)
75 LEXT(invalidate_mmu_dcache_region)
76 and r2, r0, #((1<<MMU_CLINE)-1)
77 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
78 add r1, r1, r2
79 sub r1, r1, #1
80 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
81 dsb
82 fmdr_loop:
83 mcr p15, 0, r0, c7, c14, 1 // Invalidate dcache line
84 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
85 subs r1, r1, #1 // Decrementer cache line counter
86 bpl fmdr_loop // Loop in counter not null
87 dsb
88 bx lr
89
90 /*
91 * void InvalidatePoU_Icache(void)
92 *
93 * Invalidate i-cache
94 */
95 .text
96 .align 2
97 .globl EXT(InvalidatePoU_Icache)
98 .globl EXT(invalidate_mmu_icache)
99 LEXT(InvalidatePoU_Icache)
100 LEXT(invalidate_mmu_icache)
101 mov r0, #0
102 dsb
103 mcr p15, 0, r0, c7, c5, 0 // Invalidate icache
104 dsb
105 isb
106 bx lr
107
108 /*
109 * void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length)
110 *
111 * Invalidate icache region
112 */
113 .text
114 .align 2
115 .globl EXT(InvalidatePoU_IcacheRegion)
116 LEXT(InvalidatePoU_IcacheRegion)
117 push {r7,lr}
118 mov r7, sp
119 bl EXT(CleanPoU_DcacheRegion)
120 and r2, r0, #((1<<MMU_I_CLINE)-1)
121 bic r0, r0, #((1<<MMU_I_CLINE)-1) // Cached aligned
122 add r1, r1, r2
123 sub r1, r1, #1
124 mov r1, r1, LSR #MMU_I_CLINE // Set cache line counter
125 fmir_loop:
126 mcr p15, 0, r0, c7, c5, 1 // Invalidate icache line
127 add r0, r0, #1<<MMU_I_CLINE // Get next cache aligned addr
128 subs r1, r1, #1 // Decrementer cache line counter
129 bpl fmir_loop // Loop in counter not null
130 dsb
131 isb
132 pop {r7,pc}
133
134 /*
135 * void CleanPoC_Dcache(void)
136 *
137 * Clean all d-caches
138 */
139 .text
140 .align 2
141 .globl EXT(CleanPoC_Dcache)
142 .globl EXT(clean_mmu_dcache)
143 LEXT(CleanPoC_Dcache)
144 LEXT(clean_mmu_dcache)
145 #if !defined(__ARM_L1_WT_CACHE__)
146 mov r0, #0
147 dsb
148 clean_dcacheway:
149 clean_dcacheline:
150 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
151 add r0, r0, #1 << MMU_I7SET // increment set index
152 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
153 beq clean_dcacheline
154 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
155 adds r0, r0, #1 << MMU_I7WAY // increment way
156 bcc clean_dcacheway // loop
157 #endif
158 #if __ARM_L2CACHE__
159 dsb
160 mov r0, #2
161 clean_l2dcacheway:
162 clean_l2dcacheline:
163 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
164 add r0, r0, #1 << L2_I7SET // increment set index
165 tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow
166 beq clean_l2dcacheline
167 bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow
168 adds r0, r0, #1 << L2_I7WAY // increment way
169 bcc clean_l2dcacheway // loop
170 #endif
171 dsb
172 bx lr
173
174 /*
175 * void CleanPoU_Dcache(void)
176 *
177 * Clean D-cache to Point of Unification
178 */
179 .text
180 .align 2
181 .globl EXT(CleanPoU_Dcache)
182 LEXT(CleanPoU_Dcache)
183 #if !defined(__ARM_PoU_WT_CACHE__)
184 mov r0, #0
185 dsb
186 clean_dcacheway_idle:
187 clean_dcacheline_idle:
188 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
189 add r0, r0, #1 << MMU_I7SET // increment set index
190 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
191 beq clean_dcacheline_idle
192 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
193 adds r0, r0, #1 << MMU_I7WAY // increment way
194 bcc clean_dcacheway_idle // loop
195 #endif
196 dsb
197 bx lr
198
199 /*
200 * void CleanPoU_DcacheRegion(vm_offset_t va, unsigned length)
201 *
202 * Clean d-cache region to Point of Unification
203 */
204 .text
205 .align 2
206 .globl EXT(CleanPoU_DcacheRegion)
207 LEXT(CleanPoU_DcacheRegion)
208 #if !defined(__ARM_PoU_WT_CACHE__)
209
210 and r2, r0, #((1<<MMU_CLINE)-1)
211 bic r3, r0, #((1<<MMU_CLINE)-1) // Cached aligned
212 add r12, r1, r2
213 sub r12, r12, #1
214 mov r12, r12, LSR #MMU_CLINE // Set cache line counter
215 dsb
216 cudr_loop:
217 mcr p15, 0, r3, c7, c11, 1 // Clean dcache line to PoU
218 add r3, r3, #1<<MMU_CLINE // Get next cache aligned addr
219 subs r12, r12, #1 // Decrementer cache line counter
220 bpl cudr_loop // Loop in counter not null
221
222 #endif
223 dsb
224 bx lr
225
226 /*
227 * void CleanPoC_DcacheRegion(vm_offset_t va, size_t length)
228 *
229 * Clean d-cache region to Point of Coherency
230 */
231 .text
232 .align 2
233 .globl EXT(CleanPoC_DcacheRegion)
234 .globl EXT(CleanPoC_DcacheRegion_Force)
235 LEXT(CleanPoC_DcacheRegion)
236 LEXT(CleanPoC_DcacheRegion_Force)
237 and r2, r0, #((1<<MMU_CLINE)-1)
238 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
239 add r1, r1, r2
240 sub r1, r1, #1
241 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
242 dsb
243 ccdr_loop:
244 mcr p15, 0, r0, c7, c10, 1 // Clean dcache line to PoC
245 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
246 subs r1, r1, #1 // Decrementer cache line counter
247 bpl ccdr_loop // Loop in counter not null
248 dsb
249 bx lr
250
251 /*
252 * void FlushPoC_Dcache(void)
253 *
254 * Clean and Invalidate dcaches to Point of Coherency
255 */
256 .text
257 .align 2
258 .globl EXT(FlushPoC_Dcache)
259 LEXT(FlushPoC_Dcache)
260 mov r0, #0
261 dsb
262 cleanflush_dcacheway:
263 cleanflush_dcacheline:
264 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
265 add r0, r0, #1 << MMU_I7SET // increment set index
266 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
267 beq cleanflush_dcacheline
268 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
269 adds r0, r0, #1 << MMU_I7WAY // increment way
270 bcc cleanflush_dcacheway // loop
271 #if __ARM_L2CACHE__
272 dsb
273 mov r0, #2
274 cleanflush_l2dcacheway:
275 cleanflush_l2dcacheline:
276 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
277 add r0, r0, #1 << L2_I7SET // increment set index
278 tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow
279 beq cleanflush_l2dcacheline
280 bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow
281 adds r0, r0, #1 << L2_I7WAY // increment way
282 bcc cleanflush_l2dcacheway // loop
283 #endif
284 dsb
285 bx lr
286
287 /*
288 * void FlushPoU_Dcache(void)
289 *
290 * Flush D-cache to Point of Unification
291 */
292 .text
293 .align 2
294 .globl EXT(FlushPoU_Dcache)
295 LEXT(FlushPoU_Dcache)
296 mov r0, #0
297 dsb
298 fpud_way:
299 fpud_line:
300 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
301 add r0, r0, #1 << MMU_I7SET // increment set index
302 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
303 beq fpud_line
304 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
305 adds r0, r0, #1 << MMU_I7WAY // increment way
306 bcc fpud_way // loop
307 dsb
308 bx lr
309
310 /*
311 * void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length)
312 *
313 * Clean and Invalidate d-cache region to Point of Coherency
314 */
315 .text
316 .align 2
317 .globl EXT(FlushPoC_DcacheRegion)
318 LEXT(FlushPoC_DcacheRegion)
319 and r2, r0, #((1<<MMU_CLINE)-1)
320 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
321 add r1, r1, r2
322 sub r1, r1, #1
323 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
324 dsb
325 cfmdr_loop:
326 mcr p15, 0, r0, c7, c14, 1 // Clean & invalidate dcache line
327 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
328 subs r1, r1, #1 // Decrementer cache line counter
329 bpl cfmdr_loop // Loop in counter not null
330 dsb
331 bx lr
332
333 /*
334 * void flush_dcache64(addr64_t addr, unsigned length, boolean_t phys)
335 */
336 .text
337 .align 2
338 .globl EXT(flush_dcache64)
339 LEXT(flush_dcache64)
340 mov r1, r2
341 mov r2, r3
342 LOAD_ADDR_PC(flush_dcache)
343
344 /*
345 * void clean_dcache64(addr64_t addr, unsigned length, boolean_t phys)
346 */
347 .text
348 .align 2
349 .globl EXT(clean_dcache64)
350 LEXT(clean_dcache64)
351 mov r1, r2
352 mov r2, r3
353 LOAD_ADDR_PC(clean_dcache)
354
355 /*
356 * void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys)
357 * void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys)
358 */
359 .text
360 .align 2
361 .globl EXT(invalidate_icache64)
362 .globl EXT(invalidate_icache)
363 LEXT(invalidate_icache64)
364 mov r1, r2
365 mov r2, r3
366 LEXT(invalidate_icache)
367 cmp r2, #0 // Is it physical?
368 COND_EXTERN_BEQ(InvalidatePoU_IcacheRegion)
369 LOAD_ADDR(r2, gPhysBase)
370 ldr r2, [r2]
371 sub r0, r0, r2
372 LOAD_ADDR(r2, gVirtBase)
373 ldr r2, [r2]
374 add r0, r0, r2
375 b EXT(InvalidatePoU_IcacheRegion)
376
377
378 #include "globals_asm.h"
379
380 LOAD_ADDR_GEN_DEF(flush_dcache)
381 LOAD_ADDR_GEN_DEF(clean_dcache)
382
383 /* vim: set ts=4: */