]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/caches.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / caches.c
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach_assert.h>
29 #include <mach/vm_types.h>
30 #include <mach/mach_time.h>
31 #include <kern/timer.h>
32 #include <kern/clock.h>
33 #include <kern/machine.h>
34 #include <mach/machine.h>
35 #include <mach/machine/vm_param.h>
36 #include <mach_kdp.h>
37 #include <kdp/kdp_udp.h>
38 #include <arm/caches_internal.h>
39 #include <arm/cpuid.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #include <arm/cpu_internal.h>
43
44 #include <vm/vm_kern.h>
45 #include <vm/vm_map.h>
46 #include <vm/pmap.h>
47
48 #include <arm/misc_protos.h>
49
50 /*
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
52 */
53 #define LWOpDone 1
54 #define BWOpDone 3
55
56 #ifndef __ARM_COHERENT_IO__
57
58 extern boolean_t up_style_idle_exit;
59
60 void
61 flush_dcache(
62 vm_offset_t addr,
63 unsigned length,
64 boolean_t phys)
65 {
66 cpu_data_t *cpu_data_ptr = getCpuDatap();
67
68 if (phys) {
69 unsigned int paddr;
70 unsigned int vaddr;
71
72 paddr = CAST_DOWN(unsigned int, addr);
73 if (!isphysmem(paddr))
74 return;
75 vaddr = (unsigned int)phystokv(paddr);
76 FlushPoC_DcacheRegion( (vm_offset_t) vaddr, length);
77
78 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
79 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
80 cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, length);
81 return;
82 }
83 if (cpu_data_ptr->cpu_cache_dispatch == (cache_dispatch_t) NULL) {
84 FlushPoC_DcacheRegion( (vm_offset_t) addr, length);
85 } else {
86 addr64_t paddr;
87 uint32_t count;
88
89 while (length > 0) {
90 count = PAGE_SIZE - (addr & PAGE_MASK);
91 if (count > length)
92 count = length;
93 FlushPoC_DcacheRegion( (vm_offset_t) addr, count);
94 paddr = kvtophys(addr);
95 if (paddr)
96 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
97 cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, count);
98 addr += count;
99 length -= count;
100 }
101 }
102 return;
103 }
104
105 void
106 clean_dcache(
107 vm_offset_t addr,
108 unsigned length,
109 boolean_t phys)
110 {
111 cpu_data_t *cpu_data_ptr = getCpuDatap();
112
113 if (phys) {
114 unsigned int paddr;
115 unsigned int vaddr;
116
117 paddr = CAST_DOWN(unsigned int, addr);
118 if (!isphysmem(paddr))
119 return;
120
121 vaddr = (unsigned int)phystokv(paddr);
122 CleanPoC_DcacheRegion( (vm_offset_t) vaddr, length);
123
124 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
125 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
126 cpu_data_ptr->cpu_id, CacheCleanRegion, paddr, length);
127 return;
128 }
129
130 if (cpu_data_ptr->cpu_cache_dispatch == (cache_dispatch_t) NULL) {
131 CleanPoC_DcacheRegion( (vm_offset_t) addr, length);
132 } else {
133 addr64_t paddr;
134 uint32_t count;
135
136 while (length > 0) {
137 count = PAGE_SIZE - (addr & PAGE_MASK);
138 if (count > length)
139 count = length;
140 CleanPoC_DcacheRegion( (vm_offset_t) addr, count);
141 paddr = kvtophys(addr);
142 if (paddr)
143 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
144 cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, count);
145 addr += count;
146 length -= count;
147 }
148 }
149 return;
150 }
151
152 void
153 flush_dcache_syscall(
154 vm_offset_t va,
155 unsigned length)
156 {
157 if ((cache_info()->c_bulksize_op !=0) && (length >= (cache_info()->c_bulksize_op))) {
158 #if __ARM_SMP__ && defined(ARMA7)
159 cache_xcall(LWFlush);
160 #else
161 FlushPoC_Dcache();
162 if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL)
163 ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch) ( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL);
164 #endif
165 } else {
166 FlushPoC_DcacheRegion( (vm_offset_t) va, length);
167 }
168 return;
169 }
170
171 void
172 dcache_incoherent_io_flush64(
173 addr64_t pa,
174 unsigned int size,
175 unsigned int remaining,
176 unsigned int *res)
177 {
178 unsigned int vaddr;
179 unsigned int paddr = CAST_DOWN(unsigned int, pa);
180 cpu_data_t *cpu_data_ptr = getCpuDatap();
181
182 if ((cache_info()->c_bulksize_op !=0) && (remaining >= (cache_info()->c_bulksize_op))) {
183 #if __ARM_SMP__ && defined (ARMA7)
184 cache_xcall(LWFlush);
185 #else
186 FlushPoC_Dcache();
187 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
188 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL);
189 #endif
190 *res = BWOpDone;
191 } else {
192 if (isphysmem(paddr)) {
193 vaddr = (unsigned int)phystokv(pa);
194 {
195 FlushPoC_DcacheRegion( (vm_offset_t) vaddr, size);
196
197 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
198 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) pa, size);
199 }
200 } else {
201 /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */
202 unsigned int wimg_bits, index;
203 uint32_t count;
204
205 mp_disable_preemption();
206
207 while (size > 0) {
208 count = PAGE_SIZE - (paddr & PAGE_MASK);
209 if (count > size)
210 count = size;
211
212 wimg_bits = pmap_cache_attributes((paddr >> PAGE_SHIFT));
213 index = pmap_map_cpu_windows_copy((paddr >> PAGE_SHIFT), VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
214 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
215
216 CleanPoC_DcacheRegion( (vm_offset_t) vaddr, count);
217
218 pmap_unmap_cpu_windows_copy(index);
219
220 paddr += count;
221 size -= count;
222 }
223
224 mp_enable_preemption();
225 }
226 }
227
228 return;
229 }
230
231 void
232 dcache_incoherent_io_store64(
233 addr64_t pa,
234 unsigned int size,
235 unsigned int remaining,
236 unsigned int *res)
237 {
238 unsigned int vaddr;
239 unsigned int paddr = CAST_DOWN(unsigned int, pa);
240 cpu_data_t *cpu_data_ptr = getCpuDatap();
241
242 if (isphysmem(paddr)) {
243 unsigned int wimg_bits = pmap_cache_attributes(paddr >> PAGE_SHIFT);
244 if ((wimg_bits == VM_WIMG_IO) || (wimg_bits == VM_WIMG_WCOMB)) {
245 return;
246 }
247 }
248
249 if ((cache_info()->c_bulksize_op !=0) && (remaining >= (cache_info()->c_bulksize_op))) {
250 #if __ARM_SMP__ && defined (ARMA7)
251 cache_xcall(LWClean);
252 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
253 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL);
254 #else
255 CleanPoC_Dcache();
256 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
257 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL);
258 #endif
259 *res = BWOpDone;
260 } else {
261 if (isphysmem(paddr)) {
262 vaddr = (unsigned int)phystokv(pa);
263 {
264 CleanPoC_DcacheRegion( (vm_offset_t) vaddr, size);
265
266 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
267 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) pa, size);
268 }
269 } else {
270 /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */
271 unsigned int wimg_bits, index;
272 uint32_t count;
273
274 mp_disable_preemption();
275
276 while (size > 0) {
277 count = PAGE_SIZE - (paddr & PAGE_MASK);
278 if (count > size)
279 count = size;
280
281 wimg_bits = pmap_cache_attributes((paddr >> PAGE_SHIFT));
282 index = pmap_map_cpu_windows_copy((paddr >> PAGE_SHIFT), VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
283 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
284
285 CleanPoC_DcacheRegion( (vm_offset_t) vaddr, count);
286
287 pmap_unmap_cpu_windows_copy(index);
288
289 paddr += count;
290 size -= count;
291 }
292
293 mp_enable_preemption();
294 }
295 }
296
297 return;
298 }
299
300 void
301 cache_sync_page(
302 ppnum_t pp
303 )
304 {
305 pmap_paddr_t paddr = ptoa(pp);
306
307 if (isphysmem(paddr)) {
308 vm_offset_t vaddr = phystokv(paddr);
309
310 CleanPoU_DcacheRegion(vaddr, PAGE_SIZE);
311 #ifdef __ARM_IC_NOALIAS_ICACHE__
312 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
313 #else
314 InvalidatePoU_Icache();
315 #endif
316 } else {
317 FlushPoC_Dcache();
318 InvalidatePoU_Icache();
319 };
320 }
321
322 void
323 platform_cache_init(
324 void)
325 {
326 cache_info_t *cpuid_cache_info;
327 unsigned int cache_size = 0x0UL;
328 cpu_data_t *cpu_data_ptr = getCpuDatap();
329
330 cpuid_cache_info = cache_info();
331
332 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
333 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
334 cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL);
335
336 if ( cpuid_cache_info->c_l2size == 0x0 ) {
337 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
338 cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize , (unsigned int)&cache_size);
339 cpuid_cache_info->c_l2size = cache_size;
340 }
341 }
342
343 }
344
345 void
346 platform_cache_flush(
347 void)
348 {
349 cpu_data_t *cpu_data_ptr = getCpuDatap();
350
351 FlushPoC_Dcache();
352
353 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
354 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
355 cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL);
356 }
357
358 void
359 platform_cache_clean(
360 void)
361 {
362 cpu_data_t *cpu_data_ptr = getCpuDatap();
363
364 CleanPoC_Dcache();
365
366 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
367 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
368 cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL);
369 }
370
371 void
372 platform_cache_shutdown(
373 void)
374 {
375 cpu_data_t *cpu_data_ptr = getCpuDatap();
376
377 CleanPoC_Dcache();
378
379 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
380 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (
381 cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL , 0x0UL);
382 }
383
384 void
385 platform_cache_disable(void)
386 {
387 uint32_t sctlr_value = 0;
388
389 /* Disable dcache allocation. */
390 __asm__ volatile("mrc p15, 0, %0, c1, c0, 0"
391 : "=r"(sctlr_value));
392
393 sctlr_value &= ~SCTLR_DCACHE;
394
395 __asm__ volatile("mcr p15, 0, %0, c1, c0, 0\n"
396 "isb"
397 :: "r"(sctlr_value));
398
399 }
400
401 void
402 platform_cache_idle_enter(
403 void)
404 {
405 #if __ARM_SMP__
406 platform_cache_disable();
407
408 /*
409 * If we're only using a single CPU, just write back any
410 * dirty cachelines. We can avoid doing housekeeping
411 * on CPU data that would normally be modified by other
412 * CPUs.
413 */
414 if (up_style_idle_exit && (real_ncpus == 1))
415 CleanPoU_Dcache();
416 else {
417 cpu_data_t *cpu_data_ptr = getCpuDatap();
418
419 FlushPoU_Dcache();
420
421 cpu_data_ptr->cpu_CLW_active = 0;
422 __asm__ volatile("dmb ish");
423 cpu_data_ptr->cpu_CLWFlush_req = 0;
424 cpu_data_ptr->cpu_CLWClean_req = 0;
425 CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
426 }
427 #else
428 CleanPoU_Dcache();
429 #endif
430
431 #if defined (__ARM_SMP__) && defined (ARMA7)
432 uint32_t actlr_value = 0;
433
434 /* Leave the coherency domain */
435 __asm__ volatile("clrex\n"
436 "mrc p15, 0, %0, c1, c0, 1\n"
437 : "=r"(actlr_value));
438
439 actlr_value &= ~0x40;
440
441 __asm__ volatile("mcr p15, 0, %0, c1, c0, 1\n"
442 /* Ensures any pending fwd request gets serviced and ends up */
443 "dsb\n"
444 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
445 "isb\n"
446 /* Ensures the second possible pending fwd request ends up. */
447 "dsb\n"
448 :: "r"(actlr_value));
449 #endif
450 }
451
452 void
453 platform_cache_idle_exit(
454 void)
455 {
456 #if defined (ARMA7)
457 uint32_t actlr_value = 0;
458
459 /* Flush L1 caches and TLB before rejoining the coherency domain */
460 FlushPoU_Dcache();
461 /*
462 * If we're only using a single CPU, we can avoid flushing the
463 * I-cache or the TLB, as neither program text nor pagetables
464 * should have been changed during the idle period. We still
465 * want to flush the D-cache to PoU (above), as memory contents
466 * may have been changed by DMA.
467 */
468 if (!up_style_idle_exit || (real_ncpus > 1)) {
469 InvalidatePoU_Icache();
470 flush_core_tlb();
471 }
472
473 /* Rejoin the coherency domain */
474 __asm__ volatile("mrc p15, 0, %0, c1, c0, 1\n"
475 : "=r"(actlr_value));
476
477 actlr_value |= 0x40;
478
479 __asm__ volatile("mcr p15, 0, %0, c1, c0, 1\n"
480 "isb\n"
481 :: "r"(actlr_value));
482
483 #if __ARM_SMP__
484 uint32_t sctlr_value = 0;
485
486 /* Enable dcache allocation. */
487 __asm__ volatile("mrc p15, 0, %0, c1, c0, 0\n"
488 : "=r"(sctlr_value));
489
490 sctlr_value |= SCTLR_DCACHE;
491
492 __asm__ volatile("mcr p15, 0, %0, c1, c0, 0\n"
493 "isb"
494 :: "r"(sctlr_value));
495 getCpuDatap()->cpu_CLW_active = 1;
496 #endif
497 #endif
498 }
499
500 boolean_t
501 platform_cache_batch_wimg(
502 __unused unsigned int new_wimg,
503 __unused unsigned int size
504 )
505 {
506 boolean_t do_cache_op = FALSE;
507
508 if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) do_cache_op = TRUE;
509
510 return do_cache_op;
511 }
512
513 void
514 platform_cache_flush_wimg(
515 __unused unsigned int new_wimg
516 )
517 {
518 #if __ARM_SMP__ && defined (ARMA7)
519 cache_xcall(LWFlush);
520 #else
521 FlushPoC_Dcache();
522 if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL)
523 ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch) ( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL);
524 #endif
525 }
526
527 #if __ARM_SMP__ && defined(ARMA7)
528 void
529 cache_xcall_handler(unsigned int op)
530 {
531 cpu_data_t *cdp;
532 uint64_t abstime;
533
534 cdp = getCpuDatap();
535
536 if ((op == LWFlush) && (cdp->cpu_CLWFlush_req > cdp->cpu_CLWFlush_last)) {
537 FlushPoU_Dcache();
538 abstime = ml_get_timebase();
539 cdp->cpu_CLWFlush_last = abstime;
540 cdp->cpu_CLWClean_last = abstime;
541 } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) {
542 CleanPoU_Dcache();
543 abstime = ml_get_timebase();
544 cdp->cpu_CLWClean_last = abstime;
545 }
546 }
547
548
549 void
550 cache_xcall(unsigned int op)
551 {
552 boolean_t intr;
553 cpu_data_t *cdp;
554 cpu_data_t *target_cdp;
555 unsigned int cpu;
556 unsigned int signal;
557 uint64_t abstime;
558
559 intr = ml_set_interrupts_enabled(FALSE);
560 cdp = getCpuDatap();
561 abstime = ml_get_timebase();
562 if (op == LWClean)
563 signal = SIGPLWClean;
564 else
565 signal = SIGPLWFlush;
566
567 for (cpu=0; cpu < MAX_CPUS; cpu++) {
568
569 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
570 if(target_cdp == (cpu_data_t *)NULL)
571 break;
572
573 if (target_cdp->cpu_CLW_active == 0)
574 continue;
575
576 if (op == LWFlush)
577 target_cdp->cpu_CLWFlush_req = abstime;
578 else if (op == LWClean)
579 target_cdp->cpu_CLWClean_req = abstime;
580 __asm__ volatile("dmb ish");
581 if (target_cdp->cpu_CLW_active == 0) {
582 if (op == LWFlush)
583 target_cdp->cpu_CLWFlush_req = 0x0ULL;
584 else if (op == LWClean)
585 target_cdp->cpu_CLWClean_req = 0x0ULL;
586 continue;
587 }
588
589 if (target_cdp == cdp)
590 continue;
591
592 if(KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) {
593 if (op == LWFlush)
594 target_cdp->cpu_CLWFlush_req = 0x0ULL;
595 else if (op == LWClean)
596 target_cdp->cpu_CLWClean_req = 0x0ULL;
597 }
598 if (cpu == real_ncpus)
599 break;
600 }
601
602 cache_xcall_handler (op);
603
604 (void) ml_set_interrupts_enabled(intr);
605
606 for (cpu=0; cpu < MAX_CPUS; cpu++) {
607
608 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
609 if(target_cdp == (cpu_data_t *)NULL)
610 break;
611
612 if (target_cdp == cdp)
613 continue;
614
615 if (op == LWFlush)
616 while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime));
617 else if (op == LWClean)
618 while ((target_cdp->cpu_CLWClean_req != 0x0ULL ) && (target_cdp->cpu_CLWClean_last < abstime));
619
620 if (cpu == real_ncpus)
621 break;
622 }
623
624 if (op == LWFlush)
625 FlushPoC_Dcache();
626 else if (op == LWClean)
627 CleanPoC_Dcache();
628 }
629 #endif
630
631
632 #else /* __ARM_COHERENT_IO__ */
633
634 void
635 flush_dcache(
636 __unused vm_offset_t addr,
637 __unused unsigned length,
638 __unused boolean_t phys)
639 {
640 __asm__ volatile ("dsb sy");
641 }
642
643 void
644 clean_dcache(
645 __unused vm_offset_t addr,
646 __unused unsigned length,
647 __unused boolean_t phys)
648 {
649 __asm__ volatile ("dsb sy");
650 }
651
652 void
653 flush_dcache_syscall(
654 __unused vm_offset_t va,
655 __unused unsigned length)
656 {
657 __asm__ volatile ("dsb sy");
658 }
659
660 void
661 dcache_incoherent_io_flush64(
662 __unused addr64_t pa,
663 __unused unsigned int size,
664 __unused unsigned int remaining,
665 __unused unsigned int *res)
666 {
667 __asm__ volatile ("dsb sy");
668 *res = LWOpDone;
669 return;
670 }
671
672 void
673 dcache_incoherent_io_store64(
674 __unused addr64_t pa,
675 __unused unsigned int size,
676 __unused unsigned int remaining,
677 __unused unsigned int *res)
678 {
679 __asm__ volatile ("dsb sy");
680 *res = LWOpDone;
681 return;
682 }
683
684 void
685 cache_sync_page(
686 ppnum_t pp
687 )
688 {
689 pmap_paddr_t paddr = ptoa(pp);
690
691 if (isphysmem(paddr)) {
692 vm_offset_t vaddr = phystokv(paddr);
693
694 #ifdef __ARM_IC_NOALIAS_ICACHE__
695 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
696 #else
697 InvalidatePoU_Icache();
698 #endif
699 }
700 }
701
702 void
703 platform_cache_init(
704 void)
705 {
706 }
707
708 void
709 platform_cache_flush(
710 void)
711 {
712 }
713
714 void
715 platform_cache_clean(
716 void)
717 {
718 }
719
720 void
721 platform_cache_shutdown(
722 void)
723 {
724 }
725
726 void
727 platform_cache_idle_enter(
728 void)
729 {
730 }
731
732 void
733 platform_cache_idle_exit(
734 void)
735 {
736 }
737
738 boolean_t
739 platform_cache_batch_wimg(
740 __unused unsigned int new_wimg,
741 __unused unsigned int size
742 )
743 {
744 return TRUE;
745 }
746
747 void
748 platform_cache_flush_wimg(
749 __unused unsigned int new_wimg)
750 {
751 }
752
753 #endif /* __ARM_COHERENT_IO__ */