]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | #include <cpus.h> | |
27 | ||
28 | #include <ppc/asm.h> | |
29 | #include <ppc/proc_reg.h> | |
30 | #include <cpus.h> | |
31 | #include <assym.s> | |
32 | #include <mach_debug.h> | |
33 | #include <mach/ppc/vm_param.h> | |
34 | ||
35 | /* | |
36 | * extern void sync_cache(vm_offset_t pa, unsigned count); | |
37 | * | |
38 | * sync_cache takes a physical address and count to sync, thus | |
39 | * must not be called for multiple virtual pages. | |
40 | * | |
41 | * it writes out the data cache and invalidates the instruction | |
42 | * cache for the address range in question | |
43 | */ | |
44 | ||
45 | ENTRY(sync_cache, TAG_NO_FRAME_USED) | |
46 | ||
47 | /* Switch off data translations */ | |
48 | mfmsr r6 | |
49 | rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 | |
50 | mtmsr r7 | |
51 | isync | |
52 | ||
53 | /* Check to see if the address is aligned. */ | |
54 | add r8, r3,r4 | |
55 | andi. r8,r8,(CACHE_LINE_SIZE-1) | |
56 | beq- .L_sync_check | |
57 | addi r4,r4,CACHE_LINE_SIZE | |
58 | li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ | |
59 | andc r4,r4,r7 | |
60 | andc r3,r3,r7 | |
61 | ||
62 | .L_sync_check: | |
63 | cmpwi r4, CACHE_LINE_SIZE | |
64 | ble .L_sync_one_line | |
65 | ||
66 | /* Make ctr hold count of how many times we should loop */ | |
67 | addi r8, r4, (CACHE_LINE_SIZE-1) | |
68 | srwi r8, r8, CACHE_LINE_POW2 | |
69 | mtctr r8 | |
70 | ||
71 | /* loop to flush the data cache */ | |
72 | .L_sync_data_loop: | |
73 | subic r4, r4, CACHE_LINE_SIZE | |
74 | dcbst r3, r4 | |
75 | bdnz .L_sync_data_loop | |
76 | ||
77 | sync | |
78 | mtctr r8 | |
79 | ||
80 | /* loop to invalidate the instruction cache */ | |
81 | .L_sync_inval_loop: | |
82 | icbi r3, r4 | |
83 | addic r4, r4, CACHE_LINE_SIZE | |
84 | bdnz .L_sync_inval_loop | |
85 | ||
86 | .L_sync_cache_done: | |
87 | sync /* Finish physical writes */ | |
88 | mtmsr r6 /* Restore original translations */ | |
89 | isync /* Ensure data translations are on */ | |
90 | blr | |
91 | ||
92 | .L_sync_one_line: | |
93 | dcbst 0,r3 | |
94 | sync | |
95 | icbi 0,r3 | |
96 | b .L_sync_cache_done | |
97 | ||
98 | /* | |
99 | * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys); | |
100 | * | |
101 | * flush_dcache takes a virtual or physical address and count to flush | |
102 | * and (can be called for multiple virtual pages). | |
103 | * | |
104 | * it flushes the data cache | |
105 | * cache for the address range in question | |
106 | * | |
107 | * if 'phys' is non-zero then physical addresses will be used | |
108 | */ | |
109 | ||
110 | ENTRY(flush_dcache, TAG_NO_FRAME_USED) | |
111 | ||
112 | /* optionally switch off data translations */ | |
113 | ||
114 | cmpwi r5, 0 | |
115 | mfmsr r6 | |
116 | beq+ 0f | |
117 | rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 | |
118 | mtmsr r7 | |
119 | isync | |
120 | 0: | |
121 | ||
122 | /* Check to see if the address is aligned. */ | |
123 | add r8, r3,r4 | |
124 | andi. r8,r8,(CACHE_LINE_SIZE-1) | |
125 | beq- .L_flush_dcache_check | |
126 | addi r4,r4,CACHE_LINE_SIZE | |
127 | li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ | |
128 | andc r4,r4,r7 | |
129 | andc r3,r3,r7 | |
130 | ||
131 | .L_flush_dcache_check: | |
132 | cmpwi r4, CACHE_LINE_SIZE | |
133 | ble .L_flush_dcache_one_line | |
134 | ||
135 | /* Make ctr hold count of how many times we should loop */ | |
136 | addi r8, r4, (CACHE_LINE_SIZE-1) | |
137 | srwi r8, r8, CACHE_LINE_POW2 | |
138 | mtctr r8 | |
139 | ||
140 | .L_flush_dcache_flush_loop: | |
141 | subic r4, r4, CACHE_LINE_SIZE | |
142 | dcbf r3, r4 | |
143 | bdnz .L_flush_dcache_flush_loop | |
144 | ||
145 | .L_flush_dcache_done: | |
146 | /* Sync restore msr if it was modified */ | |
147 | cmpwi r5, 0 | |
148 | sync /* make sure invalidates have completed */ | |
149 | beq+ 0f | |
150 | mtmsr r6 /* Restore original translations */ | |
151 | isync /* Ensure data translations are on */ | |
152 | 0: | |
153 | blr | |
154 | ||
155 | .L_flush_dcache_one_line: | |
156 | xor r4,r4,r4 | |
157 | dcbf 0,r3 | |
158 | b .L_flush_dcache_done | |
159 | ||
160 | ||
161 | /* | |
162 | * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys); | |
163 | * | |
164 | * invalidate_dcache takes a virtual or physical address and count to | |
165 | * invalidate and (can be called for multiple virtual pages). | |
166 | * | |
167 | * it invalidates the data cache for the address range in question | |
168 | */ | |
169 | ||
170 | ENTRY(invalidate_dcache, TAG_NO_FRAME_USED) | |
171 | ||
172 | /* optionally switch off data translations */ | |
173 | ||
174 | cmpwi r5, 0 | |
175 | mfmsr r6 | |
176 | beq+ 0f | |
177 | rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 | |
178 | mtmsr r7 | |
179 | isync | |
180 | 0: | |
181 | ||
182 | /* Check to see if the address is aligned. */ | |
183 | add r8, r3,r4 | |
184 | andi. r8,r8,(CACHE_LINE_SIZE-1) | |
185 | beq- .L_invalidate_dcache_check | |
186 | addi r4,r4,CACHE_LINE_SIZE | |
187 | li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ | |
188 | andc r4,r4,r7 | |
189 | andc r3,r3,r7 | |
190 | ||
191 | .L_invalidate_dcache_check: | |
192 | cmpwi r4, CACHE_LINE_SIZE | |
193 | ble .L_invalidate_dcache_one_line | |
194 | ||
195 | /* Make ctr hold count of how many times we should loop */ | |
196 | addi r8, r4, (CACHE_LINE_SIZE-1) | |
197 | srwi r8, r8, CACHE_LINE_POW2 | |
198 | mtctr r8 | |
199 | ||
200 | .L_invalidate_dcache_invalidate_loop: | |
201 | subic r4, r4, CACHE_LINE_SIZE | |
202 | dcbi r3, r4 | |
1c79356b A |
203 | bdnz .L_invalidate_dcache_invalidate_loop |
204 | ||
205 | .L_invalidate_dcache_done: | |
206 | /* Sync restore msr if it was modified */ | |
207 | cmpwi r5, 0 | |
208 | sync /* make sure invalidates have completed */ | |
209 | beq+ 0f | |
210 | mtmsr r6 /* Restore original translations */ | |
211 | isync /* Ensure data translations are on */ | |
212 | 0: | |
213 | blr | |
214 | ||
215 | .L_invalidate_dcache_one_line: | |
216 | xor r4,r4,r4 | |
217 | dcbi 0,r3 | |
1c79356b A |
218 | b .L_invalidate_dcache_done |
219 | ||
220 | /* | |
221 | * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys); | |
222 | * | |
223 | * invalidate_icache takes a virtual or physical address and | |
224 | * count to invalidate, (can be called for multiple virtual pages). | |
225 | * | |
226 | * it invalidates the instruction cache for the address range in question. | |
227 | */ | |
228 | ||
229 | ENTRY(invalidate_icache, TAG_NO_FRAME_USED) | |
230 | ||
231 | /* optionally switch off data translations */ | |
232 | cmpwi r5, 0 | |
233 | mfmsr r6 | |
234 | beq+ 0f | |
235 | rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 | |
236 | mtmsr r7 | |
237 | isync | |
238 | 0: | |
239 | ||
240 | /* Check to see if the address is aligned. */ | |
241 | add r8, r3,r4 | |
242 | andi. r8,r8,(CACHE_LINE_SIZE-1) | |
243 | beq- .L_invalidate_icache_check | |
244 | addi r4,r4,CACHE_LINE_SIZE | |
245 | li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ | |
246 | andc r4,r4,r7 | |
247 | andc r3,r3,r7 | |
248 | ||
249 | .L_invalidate_icache_check: | |
250 | cmpwi r4, CACHE_LINE_SIZE | |
251 | ble .L_invalidate_icache_one_line | |
252 | ||
253 | /* Make ctr hold count of how many times we should loop */ | |
254 | addi r8, r4, (CACHE_LINE_SIZE-1) | |
255 | srwi r8, r8, CACHE_LINE_POW2 | |
256 | mtctr r8 | |
257 | ||
258 | .L_invalidate_icache_invalidate_loop: | |
259 | subic r4, r4, CACHE_LINE_SIZE | |
260 | icbi r3, r4 | |
1c79356b A |
261 | bdnz .L_invalidate_icache_invalidate_loop |
262 | ||
263 | .L_invalidate_icache_done: | |
1c79356b | 264 | sync /* make sure invalidates have completed */ |
1c79356b A |
265 | mtmsr r6 /* Restore original translations */ |
266 | isync /* Ensure data translations are on */ | |
1c79356b A |
267 | blr |
268 | ||
269 | .L_invalidate_icache_one_line: | |
270 | xor r4,r4,r4 | |
271 | icbi 0,r3 | |
1c79356b | 272 | b .L_invalidate_icache_done |