]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/caches.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm / caches.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach_assert.h>
29#include <mach/vm_types.h>
30#include <mach/mach_time.h>
31#include <kern/timer.h>
32#include <kern/clock.h>
33#include <kern/machine.h>
34#include <mach/machine.h>
35#include <mach/machine/vm_param.h>
36#include <mach_kdp.h>
37#include <kdp/kdp_udp.h>
38#include <arm/caches_internal.h>
39#include <arm/cpuid.h>
40#include <arm/cpu_data.h>
41#include <arm/cpu_data_internal.h>
42#include <arm/cpu_internal.h>
43
44#include <vm/vm_kern.h>
45#include <vm/vm_map.h>
46#include <vm/pmap.h>
47
48#include <arm/misc_protos.h>
49
50/*
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
52 */
0a7de745
A
53#define LWOpDone 1
54#define BWOpDone 3
5ba3f43e 55
0a7de745 56#ifndef __ARM_COHERENT_IO__
5ba3f43e
A
57
58extern boolean_t up_style_idle_exit;
59
60void
61flush_dcache(
62 vm_offset_t addr,
63 unsigned length,
64 boolean_t phys)
65{
0a7de745 66 cpu_data_t *cpu_data_ptr = getCpuDatap();
cb323159
A
67 vm_offset_t vaddr;
68 addr64_t paddr;
69 vm_size_t count;
70
71 while (length > 0) {
72 if (phys) {
73 count = length;
74 paddr = CAST_DOWN(pmap_paddr_t, addr);
75 vaddr = phystokv_range(paddr, &count);
76 } else {
77 paddr = kvtophys(addr);
78 vaddr = addr;
5ba3f43e 79 count = PAGE_SIZE - (addr & PAGE_MASK);
0a7de745 80 if (count > length) {
5ba3f43e 81 count = length;
0a7de745 82 }
5ba3f43e 83 }
cb323159
A
84 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
85 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
86 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
87 cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
88 }
89 addr += count;
90 length -= count;
5ba3f43e
A
91 }
92 return;
93}
94
95void
96clean_dcache(
97 vm_offset_t addr,
98 unsigned length,
99 boolean_t phys)
100{
0a7de745 101 cpu_data_t *cpu_data_ptr = getCpuDatap();
cb323159
A
102 vm_offset_t vaddr;
103 addr64_t paddr;
104 vm_size_t count;
105
106 while (length > 0) {
107 if (phys) {
108 count = length;
109 paddr = CAST_DOWN(pmap_paddr_t, addr);
110 vaddr = phystokv_range(paddr, &count);
111 } else {
112 paddr = kvtophys(addr);
113 vaddr = addr;
5ba3f43e 114 count = PAGE_SIZE - (addr & PAGE_MASK);
0a7de745 115 if (count > length) {
5ba3f43e 116 count = length;
0a7de745 117 }
5ba3f43e 118 }
cb323159
A
119 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
120 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
121 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
122 cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
123 }
124 addr += count;
125 length -= count;
5ba3f43e
A
126 }
127 return;
128}
129
130void
131flush_dcache_syscall(
132 vm_offset_t va,
133 unsigned length)
134{
0a7de745
A
135 if ((cache_info()->c_bulksize_op != 0) && (length >= (cache_info()->c_bulksize_op))) {
136#if __ARM_SMP__ && defined(ARMA7)
5ba3f43e
A
137 cache_xcall(LWFlush);
138#else
139 FlushPoC_Dcache();
0a7de745
A
140 if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
141 ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
142 }
5ba3f43e
A
143#endif
144 } else {
0a7de745 145 FlushPoC_DcacheRegion((vm_offset_t) va, length);
5ba3f43e
A
146 }
147 return;
148}
149
150void
151dcache_incoherent_io_flush64(
152 addr64_t pa,
153 unsigned int size,
154 unsigned int remaining,
155 unsigned int *res)
156{
5ba3f43e
A
157 cpu_data_t *cpu_data_ptr = getCpuDatap();
158
0a7de745
A
159 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
160#if __ARM_SMP__ && defined (ARMA7)
5ba3f43e
A
161 cache_xcall(LWFlush);
162#else
163 FlushPoC_Dcache();
0a7de745
A
164 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
165 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
166 }
5ba3f43e
A
167#endif
168 *res = BWOpDone;
169 } else {
cb323159
A
170 vm_offset_t vaddr;
171 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
172 vm_size_t count;
173 unsigned int wimg_bits, index;
174
175 while (size > 0) {
176 if (isphysmem(paddr)) {
177 count = size;
178 vaddr = phystokv_range(paddr, &count);
179 } else {
5ba3f43e 180 count = PAGE_SIZE - (paddr & PAGE_MASK);
0a7de745 181 if (count > size) {
5ba3f43e 182 count = size;
0a7de745 183 }
5ba3f43e 184
d9a64523 185 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
cb323159 186 mp_disable_preemption();
0a7de745 187 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
5ba3f43e 188 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
cb323159
A
189 }
190 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
191 if (isphysmem(paddr)) {
192 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
193 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
194 cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
195 }
196 } else {
5ba3f43e 197 pmap_unmap_cpu_windows_copy(index);
cb323159 198 mp_enable_preemption();
5ba3f43e 199 }
cb323159
A
200 paddr += count;
201 size -= count;
5ba3f43e
A
202 }
203 }
204
205 return;
206}
207
208void
209dcache_incoherent_io_store64(
210 addr64_t pa,
211 unsigned int size,
212 unsigned int remaining,
213 unsigned int *res)
214{
d9a64523 215 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
5ba3f43e
A
216 cpu_data_t *cpu_data_ptr = getCpuDatap();
217
218 if (isphysmem(paddr)) {
d9a64523 219 unsigned int wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
cb323159 220 if ((wimg_bits == VM_WIMG_IO) || (wimg_bits == VM_WIMG_WCOMB) || (wimg_bits == VM_WIMG_RT)) {
5ba3f43e
A
221 return;
222 }
223 }
224
0a7de745
A
225 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
226#if __ARM_SMP__ && defined (ARMA7)
5ba3f43e 227 cache_xcall(LWClean);
0a7de745
A
228 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
229 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
230 }
5ba3f43e
A
231#else
232 CleanPoC_Dcache();
0a7de745
A
233 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
234 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
235 }
5ba3f43e
A
236#endif
237 *res = BWOpDone;
238 } else {
cb323159
A
239 vm_offset_t vaddr;
240 vm_size_t count;
241 unsigned int wimg_bits, index;
242
243 while (size > 0) {
244 if (isphysmem(paddr)) {
245 count = size;
246 vaddr = phystokv_range(paddr, &count);
247 } else {
5ba3f43e 248 count = PAGE_SIZE - (paddr & PAGE_MASK);
0a7de745 249 if (count > size) {
5ba3f43e 250 count = size;
0a7de745 251 }
d9a64523 252 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
cb323159 253 mp_disable_preemption();
0a7de745 254 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
5ba3f43e 255 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
cb323159
A
256 }
257 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
258 if (isphysmem(paddr)) {
259 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
260 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
261 cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
262 }
263 } else {
5ba3f43e 264 pmap_unmap_cpu_windows_copy(index);
cb323159 265 mp_enable_preemption();
5ba3f43e 266 }
cb323159
A
267 paddr += count;
268 size -= count;
5ba3f43e
A
269 }
270 }
271
272 return;
273}
274
275void
276cache_sync_page(
277 ppnum_t pp
0a7de745 278 )
5ba3f43e 279{
0a7de745 280 pmap_paddr_t paddr = ptoa(pp);
5ba3f43e
A
281
282 if (isphysmem(paddr)) {
283 vm_offset_t vaddr = phystokv(paddr);
5ba3f43e 284 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
5ba3f43e
A
285 } else {
286 FlushPoC_Dcache();
287 InvalidatePoU_Icache();
288 };
289}
290
291void
292platform_cache_init(
293 void)
294{
295 cache_info_t *cpuid_cache_info;
296 unsigned int cache_size = 0x0UL;
0a7de745 297 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
298
299 cpuid_cache_info = cache_info();
300
301 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
0a7de745
A
302 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
303 cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL);
5ba3f43e 304
0a7de745
A
305 if (cpuid_cache_info->c_l2size == 0x0) {
306 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
307 cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size);
5ba3f43e
A
308 cpuid_cache_info->c_l2size = cache_size;
309 }
310 }
5ba3f43e
A
311}
312
313void
314platform_cache_flush(
315 void)
316{
0a7de745 317 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
318
319 FlushPoC_Dcache();
320
0a7de745
A
321 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
322 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
323 cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
324 }
5ba3f43e
A
325}
326
327void
328platform_cache_clean(
329 void)
330{
0a7de745 331 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
332
333 CleanPoC_Dcache();
334
0a7de745
A
335 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
336 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
337 cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
338 }
5ba3f43e
A
339}
340
341void
342platform_cache_shutdown(
343 void)
344{
0a7de745 345 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e
A
346
347 CleanPoC_Dcache();
348
0a7de745
A
349 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
350 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
351 cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL);
352 }
5ba3f43e
A
353}
354
355void
356platform_cache_disable(void)
357{
d9a64523 358#if (__ARM_ARCH__ < 8)
5ba3f43e
A
359 uint32_t sctlr_value = 0;
360
361 /* Disable dcache allocation. */
cb323159 362 sctlr_value = __builtin_arm_mrc(MRC_SCTLR);
5ba3f43e 363 sctlr_value &= ~SCTLR_DCACHE;
cb323159
A
364 __builtin_arm_mcr(MCR_SCTLR(sctlr_value));
365 __builtin_arm_isb(ISB_SY);
d9a64523 366#endif /* (__ARM_ARCH__ < 8) */
5ba3f43e
A
367}
368
369void
370platform_cache_idle_enter(
371 void)
372{
cb323159 373#if __ARM_SMP__
5ba3f43e
A
374 platform_cache_disable();
375
376 /*
377 * If we're only using a single CPU, just write back any
378 * dirty cachelines. We can avoid doing housekeeping
379 * on CPU data that would normally be modified by other
380 * CPUs.
381 */
0a7de745 382 if (up_style_idle_exit && (real_ncpus == 1)) {
5ba3f43e 383 CleanPoU_Dcache();
0a7de745 384 } else {
5ba3f43e
A
385 FlushPoU_Dcache();
386
d9a64523 387#if (__ARM_ARCH__ < 8)
0a7de745 388 cpu_data_t *cpu_data_ptr = getCpuDatap();
5ba3f43e 389 cpu_data_ptr->cpu_CLW_active = 0;
cb323159 390 __builtin_arm_dmb(DMB_ISH);
5ba3f43e
A
391 cpu_data_ptr->cpu_CLWFlush_req = 0;
392 cpu_data_ptr->cpu_CLWClean_req = 0;
393 CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
d9a64523 394#endif /* (__ARM_ARCH__ < 8) */
5ba3f43e 395 }
cb323159 396#else /* !__ARM_SMP__ */
5ba3f43e 397 CleanPoU_Dcache();
cb323159 398#endif /* !__ARM_SMP__ */
5ba3f43e 399
cb323159 400#if defined(__ARM_SMP__) && defined(ARMA7)
5ba3f43e
A
401 uint32_t actlr_value = 0;
402
403 /* Leave the coherency domain */
cb323159
A
404 __builtin_arm_clrex();
405 actlr_value = __builtin_arm_mrc(MRC_ACTLR);
5ba3f43e
A
406 actlr_value &= ~0x40;
407
cb323159
A
408 __builtin_arm_mcr(MCR_ACTLR(actlr_value));
409 /* Ensures any pending fwd request gets serviced and ends up */
410 __builtin_arm_dsb(DSB_SY);
411 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
412 __builtin_arm_isb(ISB_SY);
413 /* Ensures the second possible pending fwd request ends up. */
414 __builtin_arm_dsb(DSB_SY);
415#endif /* defined(__ARM_SMP__) && defined(ARMA7) */
5ba3f43e
A
416}
417
418void
419platform_cache_idle_exit(
420 void)
421{
cb323159 422#if defined(ARMA7)
5ba3f43e
A
423 uint32_t actlr_value = 0;
424
425 /* Flush L1 caches and TLB before rejoining the coherency domain */
426 FlushPoU_Dcache();
427 /*
428 * If we're only using a single CPU, we can avoid flushing the
429 * I-cache or the TLB, as neither program text nor pagetables
430 * should have been changed during the idle period. We still
431 * want to flush the D-cache to PoU (above), as memory contents
432 * may have been changed by DMA.
433 */
434 if (!up_style_idle_exit || (real_ncpus > 1)) {
435 InvalidatePoU_Icache();
436 flush_core_tlb();
437 }
438
439 /* Rejoin the coherency domain */
cb323159 440 actlr_value = __builtin_arm_mrc(MRC_ACTLR);
5ba3f43e 441 actlr_value |= 0x40;
cb323159
A
442 __builtin_arm_mcr(MCR_ACTLR(actlr_value));
443 __builtin_arm_isb(ISB_SY);
5ba3f43e
A
444
445#if __ARM_SMP__
446 uint32_t sctlr_value = 0;
447
448 /* Enable dcache allocation. */
cb323159 449 sctlr_value = __builtin_arm_mrc(MRC_SCTLR);
5ba3f43e 450 sctlr_value |= SCTLR_DCACHE;
cb323159
A
451 __builtin_arm_mcr(MCR_SCTLR(sctlr_value));
452 __builtin_arm_isb(ISB_SY);
5ba3f43e 453 getCpuDatap()->cpu_CLW_active = 1;
cb323159
A
454#endif /* __ARM_SMP__ */
455#endif /* defined(ARMA7) */
5ba3f43e
A
456}
457
458boolean_t
459platform_cache_batch_wimg(
0a7de745 460 __unused unsigned int new_wimg,
5ba3f43e
A
461 __unused unsigned int size
462 )
463{
0a7de745 464 boolean_t do_cache_op = FALSE;
5ba3f43e 465
0a7de745
A
466 if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) {
467 do_cache_op = TRUE;
468 }
5ba3f43e
A
469
470 return do_cache_op;
471}
472
473void
474platform_cache_flush_wimg(
475 __unused unsigned int new_wimg
0a7de745 476 )
5ba3f43e 477{
0a7de745 478#if __ARM_SMP__ && defined (ARMA7)
5ba3f43e
A
479 cache_xcall(LWFlush);
480#else
481 FlushPoC_Dcache();
0a7de745
A
482 if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
483 ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
484 }
5ba3f43e
A
485#endif
486}
487
0a7de745 488#if __ARM_SMP__ && defined(ARMA7)
5ba3f43e
A
489void
490cache_xcall_handler(unsigned int op)
491{
0a7de745
A
492 cpu_data_t *cdp;
493 uint64_t abstime;
5ba3f43e
A
494
495 cdp = getCpuDatap();
496
497 if ((op == LWFlush) && (cdp->cpu_CLWFlush_req > cdp->cpu_CLWFlush_last)) {
498 FlushPoU_Dcache();
499 abstime = ml_get_timebase();
500 cdp->cpu_CLWFlush_last = abstime;
501 cdp->cpu_CLWClean_last = abstime;
0a7de745 502 } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) {
5ba3f43e
A
503 CleanPoU_Dcache();
504 abstime = ml_get_timebase();
505 cdp->cpu_CLWClean_last = abstime;
506 }
507}
508
509
510void
511cache_xcall(unsigned int op)
512{
0a7de745
A
513 boolean_t intr;
514 cpu_data_t *cdp;
515 cpu_data_t *target_cdp;
516 unsigned int cpu;
517 unsigned int signal;
518 uint64_t abstime;
5ba3f43e
A
519
520 intr = ml_set_interrupts_enabled(FALSE);
521 cdp = getCpuDatap();
522 abstime = ml_get_timebase();
0a7de745 523 if (op == LWClean) {
5ba3f43e 524 signal = SIGPLWClean;
0a7de745 525 } else {
5ba3f43e 526 signal = SIGPLWFlush;
0a7de745 527 }
5ba3f43e 528
0a7de745 529 for (cpu = 0; cpu < MAX_CPUS; cpu++) {
5ba3f43e 530 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
0a7de745 531 if (target_cdp == (cpu_data_t *)NULL) {
5ba3f43e 532 break;
0a7de745 533 }
5ba3f43e 534
0a7de745 535 if (target_cdp->cpu_CLW_active == 0) {
5ba3f43e 536 continue;
0a7de745 537 }
5ba3f43e 538
0a7de745 539 if (op == LWFlush) {
5ba3f43e 540 target_cdp->cpu_CLWFlush_req = abstime;
0a7de745 541 } else if (op == LWClean) {
5ba3f43e 542 target_cdp->cpu_CLWClean_req = abstime;
0a7de745 543 }
cb323159 544 __builtin_arm_dmb(DMB_ISH);
5ba3f43e 545 if (target_cdp->cpu_CLW_active == 0) {
0a7de745 546 if (op == LWFlush) {
5ba3f43e 547 target_cdp->cpu_CLWFlush_req = 0x0ULL;
0a7de745 548 } else if (op == LWClean) {
5ba3f43e 549 target_cdp->cpu_CLWClean_req = 0x0ULL;
0a7de745 550 }
5ba3f43e
A
551 continue;
552 }
553
0a7de745 554 if (target_cdp == cdp) {
5ba3f43e 555 continue;
0a7de745 556 }
5ba3f43e 557
0a7de745
A
558 if (KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) {
559 if (op == LWFlush) {
5ba3f43e 560 target_cdp->cpu_CLWFlush_req = 0x0ULL;
0a7de745 561 } else if (op == LWClean) {
5ba3f43e 562 target_cdp->cpu_CLWClean_req = 0x0ULL;
0a7de745 563 }
5ba3f43e 564 }
0a7de745 565 if (cpu == real_ncpus) {
5ba3f43e 566 break;
0a7de745 567 }
5ba3f43e
A
568 }
569
0a7de745 570 cache_xcall_handler(op);
5ba3f43e
A
571
572 (void) ml_set_interrupts_enabled(intr);
573
0a7de745 574 for (cpu = 0; cpu < MAX_CPUS; cpu++) {
5ba3f43e 575 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
0a7de745 576 if (target_cdp == (cpu_data_t *)NULL) {
5ba3f43e 577 break;
0a7de745 578 }
5ba3f43e 579
0a7de745 580 if (target_cdp == cdp) {
5ba3f43e 581 continue;
0a7de745 582 }
5ba3f43e 583
0a7de745
A
584 if (op == LWFlush) {
585 while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime)) {
586 ;
587 }
588 } else if (op == LWClean) {
589 while ((target_cdp->cpu_CLWClean_req != 0x0ULL) && (target_cdp->cpu_CLWClean_last < abstime)) {
590 ;
591 }
592 }
5ba3f43e 593
0a7de745 594 if (cpu == real_ncpus) {
5ba3f43e 595 break;
0a7de745 596 }
5ba3f43e
A
597 }
598
0a7de745 599 if (op == LWFlush) {
5ba3f43e 600 FlushPoC_Dcache();
0a7de745 601 } else if (op == LWClean) {
5ba3f43e 602 CleanPoC_Dcache();
0a7de745 603 }
5ba3f43e
A
604}
605#endif
606
607
0a7de745 608#else /* __ARM_COHERENT_IO__ */
5ba3f43e
A
609
610void
611flush_dcache(
612 __unused vm_offset_t addr,
613 __unused unsigned length,
614 __unused boolean_t phys)
615{
cb323159 616 __builtin_arm_dsb(DSB_SY);
5ba3f43e
A
617}
618
619void
620clean_dcache(
621 __unused vm_offset_t addr,
622 __unused unsigned length,
623 __unused boolean_t phys)
624{
cb323159 625 __builtin_arm_dsb(DSB_SY);
5ba3f43e
A
626}
627
628void
629flush_dcache_syscall(
630 __unused vm_offset_t va,
631 __unused unsigned length)
632{
cb323159 633 __builtin_arm_dsb(DSB_SY);
5ba3f43e
A
634}
635
636void
637dcache_incoherent_io_flush64(
638 __unused addr64_t pa,
639 __unused unsigned int size,
640 __unused unsigned int remaining,
641 __unused unsigned int *res)
642{
cb323159 643 __builtin_arm_dsb(DSB_SY);
5ba3f43e
A
644 *res = LWOpDone;
645 return;
646}
647
648void
649dcache_incoherent_io_store64(
650 __unused addr64_t pa,
651 __unused unsigned int size,
652 __unused unsigned int remaining,
653 __unused unsigned int *res)
654{
cb323159 655 __builtin_arm_dsb(DSB_SY);
5ba3f43e
A
656 *res = LWOpDone;
657 return;
658}
659
660void
661cache_sync_page(
662 ppnum_t pp
0a7de745 663 )
5ba3f43e 664{
0a7de745 665 pmap_paddr_t paddr = ptoa(pp);
5ba3f43e
A
666
667 if (isphysmem(paddr)) {
668 vm_offset_t vaddr = phystokv(paddr);
5ba3f43e 669 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
0a7de745 670 }
5ba3f43e
A
671}
672
673void
674platform_cache_init(
675 void)
676{
677}
678
679void
680platform_cache_flush(
681 void)
682{
683}
684
685void
686platform_cache_clean(
687 void)
688{
689}
690
691void
692platform_cache_shutdown(
693 void)
694{
695}
696
697void
698platform_cache_idle_enter(
699 void)
700{
701}
702
703void
704platform_cache_idle_exit(
705 void)
706{
707}
708
709boolean_t
710platform_cache_batch_wimg(
0a7de745 711 __unused unsigned int new_wimg,
5ba3f43e
A
712 __unused unsigned int size
713 )
714{
715 return TRUE;
716}
717
718void
719platform_cache_flush_wimg(
720 __unused unsigned int new_wimg)
721{
722}
723
0a7de745 724#endif /* __ARM_COHERENT_IO__ */