]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/caches_asm.s
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / caches_asm.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm/proc_reg.h>
31#include <arm/pmap.h>
32#include <sys/errno.h>
33#include "assym.s"
34
35
36/*
37 * void invalidate_mmu_cache(void)
38 *
39 * Invalidate d-cache and i-cache
40 */
41 .text
42 .align 2
43 .globl EXT(invalidate_mmu_cache)
44LEXT(invalidate_mmu_cache)
45 mov r0, #0
cb323159 46 dsb
5ba3f43e 47 mcr p15, 0, r0, c7, c7, 0 // Invalidate caches
cb323159
A
48 dsb
49 isb
5ba3f43e
A
50 bx lr
51
52/*
53 * void invalidate_mmu_dcache(void)
54 *
55 * Invalidate d-cache
56 */
57 .text
58 .align 2
59 .globl EXT(invalidate_mmu_dcache)
60LEXT(invalidate_mmu_dcache)
61 mov r0, #0
cb323159 62 dsb
5ba3f43e 63 mcr p15, 0, r0, c7, c6, 0 // Invalidate dcache
cb323159 64 dsb
5ba3f43e
A
65 bx lr
66
67/*
68 * void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length)
69 *
70 * Invalidate d-cache region
71 */
72 .text
73 .align 2
74 .globl EXT(invalidate_mmu_dcache_region)
75LEXT(invalidate_mmu_dcache_region)
76 and r2, r0, #((1<<MMU_CLINE)-1)
77 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
78 add r1, r1, r2
79 sub r1, r1, #1
80 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
cb323159 81 dsb
5ba3f43e
A
82fmdr_loop:
83 mcr p15, 0, r0, c7, c14, 1 // Invalidate dcache line
84 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
85 subs r1, r1, #1 // Decrementer cache line counter
86 bpl fmdr_loop // Loop in counter not null
cb323159 87 dsb
5ba3f43e
A
88 bx lr
89
90/*
91 * void InvalidatePoU_Icache(void)
92 *
93 * Invalidate i-cache
94 */
95 .text
96 .align 2
97 .globl EXT(InvalidatePoU_Icache)
98 .globl EXT(invalidate_mmu_icache)
99LEXT(InvalidatePoU_Icache)
100LEXT(invalidate_mmu_icache)
101 mov r0, #0
cb323159 102 dsb
5ba3f43e 103 mcr p15, 0, r0, c7, c5, 0 // Invalidate icache
cb323159
A
104 dsb
105 isb
5ba3f43e
A
106 bx lr
107
108/*
109 * void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length)
110 *
111 * Invalidate icache region
112 */
113 .text
114 .align 2
115 .globl EXT(InvalidatePoU_IcacheRegion)
116LEXT(InvalidatePoU_IcacheRegion)
cb323159
A
117 push {r7,lr}
118 mov r7, sp
119 bl EXT(CleanPoU_DcacheRegion)
5ba3f43e
A
120 and r2, r0, #((1<<MMU_I_CLINE)-1)
121 bic r0, r0, #((1<<MMU_I_CLINE)-1) // Cached aligned
122 add r1, r1, r2
123 sub r1, r1, #1
124 mov r1, r1, LSR #MMU_I_CLINE // Set cache line counter
125fmir_loop:
126 mcr p15, 0, r0, c7, c5, 1 // Invalidate icache line
127 add r0, r0, #1<<MMU_I_CLINE // Get next cache aligned addr
128 subs r1, r1, #1 // Decrementer cache line counter
129 bpl fmir_loop // Loop in counter not null
cb323159
A
130 dsb
131 isb
132 pop {r7,pc}
5ba3f43e
A
133
134/*
135 * void CleanPoC_Dcache(void)
136 *
137 * Clean all d-caches
138 */
139 .text
140 .align 2
141 .globl EXT(CleanPoC_Dcache)
142 .globl EXT(clean_mmu_dcache)
143LEXT(CleanPoC_Dcache)
144LEXT(clean_mmu_dcache)
145#if !defined(__ARM_L1_WT_CACHE__)
146 mov r0, #0
cb323159 147 dsb
5ba3f43e
A
148clean_dcacheway:
149clean_dcacheline:
150 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
2a1bd2d3
A
151 add r0, r0, #1 << MMU_I7SET // increment set index
152 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
5ba3f43e 153 beq clean_dcacheline
2a1bd2d3
A
154 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
155 adds r0, r0, #1 << MMU_I7WAY // increment way
5ba3f43e
A
156 bcc clean_dcacheway // loop
157#endif
2a1bd2d3 158#if __ARM_L2CACHE__
5ba3f43e
A
159 dsb
160 mov r0, #2
161clean_l2dcacheway:
162clean_l2dcacheline:
163 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
2a1bd2d3
A
164 add r0, r0, #1 << L2_I7SET // increment set index
165 tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow
5ba3f43e 166 beq clean_l2dcacheline
2a1bd2d3
A
167 bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow
168 adds r0, r0, #1 << L2_I7WAY // increment way
5ba3f43e 169 bcc clean_l2dcacheway // loop
2a1bd2d3 170#endif
5ba3f43e
A
171 dsb
172 bx lr
173
174/*
175 * void CleanPoU_Dcache(void)
176 *
177 * Clean D-cache to Point of Unification
178 */
179 .text
180 .align 2
181 .globl EXT(CleanPoU_Dcache)
182LEXT(CleanPoU_Dcache)
183#if !defined(__ARM_PoU_WT_CACHE__)
184 mov r0, #0
cb323159 185 dsb
5ba3f43e
A
186clean_dcacheway_idle:
187clean_dcacheline_idle:
188 mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set
2a1bd2d3
A
189 add r0, r0, #1 << MMU_I7SET // increment set index
190 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
5ba3f43e 191 beq clean_dcacheline_idle
2a1bd2d3
A
192 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
193 adds r0, r0, #1 << MMU_I7WAY // increment way
5ba3f43e
A
194 bcc clean_dcacheway_idle // loop
195#endif
196 dsb
197 bx lr
198
199/*
200 * void CleanPoU_DcacheRegion(vm_offset_t va, unsigned length)
201 *
202 * Clean d-cache region to Point of Unification
203 */
204 .text
205 .align 2
206 .globl EXT(CleanPoU_DcacheRegion)
207LEXT(CleanPoU_DcacheRegion)
208#if !defined(__ARM_PoU_WT_CACHE__)
209
210 and r2, r0, #((1<<MMU_CLINE)-1)
cb323159
A
211 bic r3, r0, #((1<<MMU_CLINE)-1) // Cached aligned
212 add r12, r1, r2
213 sub r12, r12, #1
214 mov r12, r12, LSR #MMU_CLINE // Set cache line counter
215 dsb
5ba3f43e 216cudr_loop:
cb323159
A
217 mcr p15, 0, r3, c7, c11, 1 // Clean dcache line to PoU
218 add r3, r3, #1<<MMU_CLINE // Get next cache aligned addr
219 subs r12, r12, #1 // Decrementer cache line counter
5ba3f43e
A
220 bpl cudr_loop // Loop in counter not null
221
222#endif
223 dsb
224 bx lr
225
226/*
f427ee49 227 * void CleanPoC_DcacheRegion(vm_offset_t va, size_t length)
5ba3f43e
A
228 *
229 * Clean d-cache region to Point of Coherency
230 */
231 .text
232 .align 2
233 .globl EXT(CleanPoC_DcacheRegion)
234 .globl EXT(CleanPoC_DcacheRegion_Force)
235LEXT(CleanPoC_DcacheRegion)
236LEXT(CleanPoC_DcacheRegion_Force)
237 and r2, r0, #((1<<MMU_CLINE)-1)
238 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
239 add r1, r1, r2
240 sub r1, r1, #1
241 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
2a1bd2d3 242 dsb
5ba3f43e
A
243ccdr_loop:
244 mcr p15, 0, r0, c7, c10, 1 // Clean dcache line to PoC
245 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
246 subs r1, r1, #1 // Decrementer cache line counter
247 bpl ccdr_loop // Loop in counter not null
248 dsb
249 bx lr
250
251/*
252 * void FlushPoC_Dcache(void)
253 *
254 * Clean and Invalidate dcaches to Point of Coherency
255 */
256 .text
257 .align 2
258 .globl EXT(FlushPoC_Dcache)
259LEXT(FlushPoC_Dcache)
260 mov r0, #0
cb323159 261 dsb
5ba3f43e
A
262cleanflush_dcacheway:
263cleanflush_dcacheline:
264 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
2a1bd2d3
A
265 add r0, r0, #1 << MMU_I7SET // increment set index
266 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
5ba3f43e 267 beq cleanflush_dcacheline
2a1bd2d3
A
268 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
269 adds r0, r0, #1 << MMU_I7WAY // increment way
5ba3f43e 270 bcc cleanflush_dcacheway // loop
2a1bd2d3 271#if __ARM_L2CACHE__
5ba3f43e
A
272 dsb
273 mov r0, #2
274cleanflush_l2dcacheway:
275cleanflush_l2dcacheline:
276 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
2a1bd2d3
A
277 add r0, r0, #1 << L2_I7SET // increment set index
278 tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow
5ba3f43e 279 beq cleanflush_l2dcacheline
2a1bd2d3
A
280 bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow
281 adds r0, r0, #1 << L2_I7WAY // increment way
5ba3f43e 282 bcc cleanflush_l2dcacheway // loop
2a1bd2d3 283#endif
5ba3f43e
A
284 dsb
285 bx lr
286
287/*
288 * void FlushPoU_Dcache(void)
289 *
290 * Flush D-cache to Point of Unification
291 */
292 .text
293 .align 2
294 .globl EXT(FlushPoU_Dcache)
295LEXT(FlushPoU_Dcache)
296 mov r0, #0
cb323159 297 dsb
5ba3f43e
A
298fpud_way:
299fpud_line:
300 mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set
2a1bd2d3
A
301 add r0, r0, #1 << MMU_I7SET // increment set index
302 tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow
5ba3f43e 303 beq fpud_line
2a1bd2d3
A
304 bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
305 adds r0, r0, #1 << MMU_I7WAY // increment way
5ba3f43e
A
306 bcc fpud_way // loop
307 dsb
308 bx lr
309
310/*
311 * void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length)
312 *
313 * Clean and Invalidate d-cache region to Point of Coherency
314 */
315 .text
316 .align 2
317 .globl EXT(FlushPoC_DcacheRegion)
318LEXT(FlushPoC_DcacheRegion)
319 and r2, r0, #((1<<MMU_CLINE)-1)
320 bic r0, r0, #((1<<MMU_CLINE)-1) // Cached aligned
321 add r1, r1, r2
322 sub r1, r1, #1
323 mov r1, r1, LSR #MMU_CLINE // Set cache line counter
cb323159 324 dsb
5ba3f43e
A
325cfmdr_loop:
326 mcr p15, 0, r0, c7, c14, 1 // Clean & invalidate dcache line
327 add r0, r0, #1<<MMU_CLINE // Get next cache aligned addr
328 subs r1, r1, #1 // Decrementer cache line counter
329 bpl cfmdr_loop // Loop in counter not null
330 dsb
331 bx lr
332
333/*
334 * void flush_dcache64(addr64_t addr, unsigned length, boolean_t phys)
335 */
336 .text
337 .align 2
338 .globl EXT(flush_dcache64)
339LEXT(flush_dcache64)
340 mov r1, r2
341 mov r2, r3
342 LOAD_ADDR_PC(flush_dcache)
343
344/*
345 * void clean_dcache64(addr64_t addr, unsigned length, boolean_t phys)
346 */
347 .text
348 .align 2
349 .globl EXT(clean_dcache64)
350LEXT(clean_dcache64)
351 mov r1, r2
352 mov r2, r3
353 LOAD_ADDR_PC(clean_dcache)
354
355/*
356 * void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys)
357 * void invalidate_icache64(addr64_t va, unsigned length, boolean_t phys)
358 */
359 .text
360 .align 2
361 .globl EXT(invalidate_icache64)
362 .globl EXT(invalidate_icache)
363LEXT(invalidate_icache64)
364 mov r1, r2
365 mov r2, r3
366LEXT(invalidate_icache)
367 cmp r2, #0 // Is it physical?
368 COND_EXTERN_BEQ(InvalidatePoU_IcacheRegion)
369 LOAD_ADDR(r2, gPhysBase)
370 ldr r2, [r2]
371 sub r0, r0, r2
372 LOAD_ADDR(r2, gVirtBase)
373 ldr r2, [r2]
374 add r0, r0, r2
375 b EXT(InvalidatePoU_IcacheRegion)
376
377
378#include "globals_asm.h"
379
380LOAD_ADDR_GEN_DEF(flush_dcache)
381LOAD_ADDR_GEN_DEF(clean_dcache)
382
383/* vim: set ts=4: */