2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
32 #include <ppc/proc_reg.h>
35 #include <mach_debug.h>
36 #include <mach/ppc/vm_param.h>
39 * extern void sync_cache(vm_offset_t pa, unsigned count);
41 * sync_cache takes a physical address and count to sync, thus
42 * must not be called for multiple virtual pages.
44 * it writes out the data cache and invalidates the instruction
45 * cache for the address range in question
48 ENTRY(sync_cache, TAG_NO_FRAME_USED)
50 /* Switch off data translations */
52 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
53 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
54 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
58 /* Check to see if the address is aligned. */
60 andi. r8,r8,(CACHE_LINE_SIZE-1)
62 addi r4,r4,CACHE_LINE_SIZE
63 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
68 cmpwi r4, CACHE_LINE_SIZE
71 /* Make ctr hold count of how many times we should loop */
72 addi r8, r4, (CACHE_LINE_SIZE-1)
73 srwi r8, r8, CACHE_LINE_POW2
76 /* loop to flush the data cache */
78 subic r4, r4, CACHE_LINE_SIZE
80 bdnz .L_sync_data_loop
85 /* loop to invalidate the instruction cache */
88 addic r4, r4, CACHE_LINE_SIZE
89 bdnz .L_sync_inval_loop
92 sync /* Finish physical writes */
93 mtmsr r6 /* Restore original translations */
94 isync /* Ensure data translations are on */
104 * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys);
106 * flush_dcache takes a virtual or physical address and count to flush
107 * and (can be called for multiple virtual pages).
109 * it flushes the data cache
110 * cache for the address range in question
112 * if 'phys' is non-zero then physical addresses will be used
115 ENTRY(flush_dcache, TAG_NO_FRAME_USED)
117 /* optionally switch off data translations */
122 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
123 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
124 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
129 /* Check to see if the address is aligned. */
131 andi. r8,r8,(CACHE_LINE_SIZE-1)
132 beq- .L_flush_dcache_check
133 addi r4,r4,CACHE_LINE_SIZE
134 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
138 .L_flush_dcache_check:
139 cmpwi r4, CACHE_LINE_SIZE
140 ble .L_flush_dcache_one_line
142 /* Make ctr hold count of how many times we should loop */
143 addi r8, r4, (CACHE_LINE_SIZE-1)
144 srwi r8, r8, CACHE_LINE_POW2
147 .L_flush_dcache_flush_loop:
148 subic r4, r4, CACHE_LINE_SIZE
150 bdnz .L_flush_dcache_flush_loop
152 .L_flush_dcache_done:
153 /* Sync restore msr if it was modified */
155 sync /* make sure invalidates have completed */
157 mtmsr r6 /* Restore original translations */
158 isync /* Ensure data translations are on */
162 .L_flush_dcache_one_line:
165 b .L_flush_dcache_done
169 * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys);
171 * invalidate_dcache takes a virtual or physical address and count to
172 * invalidate and (can be called for multiple virtual pages).
174 * it invalidates the data cache for the address range in question
177 ENTRY(invalidate_dcache, TAG_NO_FRAME_USED)
179 /* optionally switch off data translations */
184 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
185 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
186 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
191 /* Check to see if the address is aligned. */
193 andi. r8,r8,(CACHE_LINE_SIZE-1)
194 beq- .L_invalidate_dcache_check
195 addi r4,r4,CACHE_LINE_SIZE
196 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
200 .L_invalidate_dcache_check:
201 cmpwi r4, CACHE_LINE_SIZE
202 ble .L_invalidate_dcache_one_line
204 /* Make ctr hold count of how many times we should loop */
205 addi r8, r4, (CACHE_LINE_SIZE-1)
206 srwi r8, r8, CACHE_LINE_POW2
209 .L_invalidate_dcache_invalidate_loop:
210 subic r4, r4, CACHE_LINE_SIZE
212 bdnz .L_invalidate_dcache_invalidate_loop
214 .L_invalidate_dcache_done:
215 /* Sync restore msr if it was modified */
217 sync /* make sure invalidates have completed */
219 mtmsr r6 /* Restore original translations */
220 isync /* Ensure data translations are on */
224 .L_invalidate_dcache_one_line:
227 b .L_invalidate_dcache_done
230 * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys);
232 * invalidate_icache takes a virtual or physical address and
233 * count to invalidate, (can be called for multiple virtual pages).
235 * it invalidates the instruction cache for the address range in question.
238 ENTRY(invalidate_icache, TAG_NO_FRAME_USED)
240 /* optionally switch off data translations */
244 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
245 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
246 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
251 /* Check to see if the address is aligned. */
253 andi. r8,r8,(CACHE_LINE_SIZE-1)
254 beq- .L_invalidate_icache_check
255 addi r4,r4,CACHE_LINE_SIZE
256 li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */
260 .L_invalidate_icache_check:
261 cmpwi r4, CACHE_LINE_SIZE
262 ble .L_invalidate_icache_one_line
264 /* Make ctr hold count of how many times we should loop */
265 addi r8, r4, (CACHE_LINE_SIZE-1)
266 srwi r8, r8, CACHE_LINE_POW2
269 .L_invalidate_icache_invalidate_loop:
270 subic r4, r4, CACHE_LINE_SIZE
272 bdnz .L_invalidate_icache_invalidate_loop
274 .L_invalidate_icache_done:
275 sync /* make sure invalidates have completed */
276 mtmsr r6 /* Restore original translations */
277 isync /* Ensure data translations are on */
280 .L_invalidate_icache_one_line:
283 b .L_invalidate_icache_done