]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/caches.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / arm / caches.c
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach_assert.h>
29 #include <mach/vm_types.h>
30 #include <mach/mach_time.h>
31 #include <kern/timer.h>
32 #include <kern/clock.h>
33 #include <kern/machine.h>
34 #include <mach/machine.h>
35 #include <mach/machine/vm_param.h>
36 #include <mach_kdp.h>
37 #include <kdp/kdp_udp.h>
38 #include <arm/caches_internal.h>
39 #include <arm/cpuid.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #include <arm/cpu_internal.h>
43
44 #include <vm/vm_kern.h>
45 #include <vm/vm_map.h>
46 #include <vm/pmap.h>
47
48 #include <arm/misc_protos.h>
49
50 /*
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
52 */
53 #define LWOpDone 1
54 #define BWOpDone 3
55
56 #ifndef __ARM_COHERENT_IO__
57
58 extern boolean_t up_style_idle_exit;
59
60 void
61 flush_dcache(
62 vm_offset_t addr,
63 unsigned length,
64 boolean_t phys)
65 {
66 cpu_data_t *cpu_data_ptr = getCpuDatap();
67 vm_offset_t vaddr;
68 addr64_t paddr;
69 vm_size_t count;
70
71 while (length > 0) {
72 if (phys) {
73 count = length;
74 paddr = CAST_DOWN(pmap_paddr_t, addr);
75 vaddr = phystokv_range(paddr, &count);
76 } else {
77 paddr = kvtophys(addr);
78 vaddr = addr;
79 count = PAGE_SIZE - (addr & PAGE_MASK);
80 if (count > length) {
81 count = length;
82 }
83 }
84 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
85 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
86 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
87 cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
88 }
89 addr += count;
90 length -= count;
91 }
92 return;
93 }
94
95 void
96 clean_dcache(
97 vm_offset_t addr,
98 unsigned length,
99 boolean_t phys)
100 {
101 cpu_data_t *cpu_data_ptr = getCpuDatap();
102 vm_offset_t vaddr;
103 addr64_t paddr;
104 vm_size_t count;
105
106 while (length > 0) {
107 if (phys) {
108 count = length;
109 paddr = CAST_DOWN(pmap_paddr_t, addr);
110 vaddr = phystokv_range(paddr, &count);
111 } else {
112 paddr = kvtophys(addr);
113 vaddr = addr;
114 count = PAGE_SIZE - (addr & PAGE_MASK);
115 if (count > length) {
116 count = length;
117 }
118 }
119 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
120 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
121 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
122 cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
123 }
124 addr += count;
125 length -= count;
126 }
127 return;
128 }
129
130 void
131 flush_dcache_syscall(
132 vm_offset_t va,
133 unsigned length)
134 {
135 if ((cache_info()->c_bulksize_op != 0) && (length >= (cache_info()->c_bulksize_op))) {
136 #if __ARM_SMP__ && defined(ARMA7)
137 cache_xcall(LWFlush);
138 #else
139 FlushPoC_Dcache();
140 if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
141 ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
142 }
143 #endif
144 } else {
145 FlushPoC_DcacheRegion((vm_offset_t) va, length);
146 }
147 return;
148 }
149
150 void
151 dcache_incoherent_io_flush64(
152 addr64_t pa,
153 unsigned int size,
154 unsigned int remaining,
155 unsigned int *res)
156 {
157 cpu_data_t *cpu_data_ptr = getCpuDatap();
158
159 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
160 #if __ARM_SMP__ && defined (ARMA7)
161 cache_xcall(LWFlush);
162 #else
163 FlushPoC_Dcache();
164 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
165 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
166 }
167 #endif
168 *res = BWOpDone;
169 } else {
170 vm_offset_t vaddr;
171 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
172 vm_size_t count;
173 unsigned int wimg_bits, index;
174
175 while (size > 0) {
176 if (isphysmem(paddr)) {
177 count = size;
178 vaddr = phystokv_range(paddr, &count);
179 } else {
180 count = PAGE_SIZE - (paddr & PAGE_MASK);
181 if (count > size) {
182 count = size;
183 }
184
185 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
186 mp_disable_preemption();
187 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
188 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
189 }
190 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
191 if (isphysmem(paddr)) {
192 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
193 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
194 cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
195 }
196 } else {
197 pmap_unmap_cpu_windows_copy(index);
198 mp_enable_preemption();
199 }
200 paddr += count;
201 size -= count;
202 }
203 }
204
205 return;
206 }
207
208 void
209 dcache_incoherent_io_store64(
210 addr64_t pa,
211 unsigned int size,
212 unsigned int remaining,
213 unsigned int *res)
214 {
215 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
216 cpu_data_t *cpu_data_ptr = getCpuDatap();
217
218 if (isphysmem(paddr)) {
219 unsigned int wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
220 if ((wimg_bits == VM_WIMG_IO) || (wimg_bits == VM_WIMG_WCOMB) || (wimg_bits == VM_WIMG_RT)) {
221 return;
222 }
223 }
224
225 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
226 #if __ARM_SMP__ && defined (ARMA7)
227 cache_xcall(LWClean);
228 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
229 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
230 }
231 #else
232 CleanPoC_Dcache();
233 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
234 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
235 }
236 #endif
237 *res = BWOpDone;
238 } else {
239 vm_offset_t vaddr;
240 vm_size_t count;
241 unsigned int wimg_bits, index;
242
243 while (size > 0) {
244 if (isphysmem(paddr)) {
245 count = size;
246 vaddr = phystokv_range(paddr, &count);
247 } else {
248 count = PAGE_SIZE - (paddr & PAGE_MASK);
249 if (count > size) {
250 count = size;
251 }
252 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
253 mp_disable_preemption();
254 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
255 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
256 }
257 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
258 if (isphysmem(paddr)) {
259 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
260 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
261 cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
262 }
263 } else {
264 pmap_unmap_cpu_windows_copy(index);
265 mp_enable_preemption();
266 }
267 paddr += count;
268 size -= count;
269 }
270 }
271
272 return;
273 }
274
275 void
276 cache_sync_page(
277 ppnum_t pp
278 )
279 {
280 pmap_paddr_t paddr = ptoa(pp);
281
282 if (isphysmem(paddr)) {
283 vm_offset_t vaddr = phystokv(paddr);
284 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
285 } else {
286 FlushPoC_Dcache();
287 InvalidatePoU_Icache();
288 };
289 }
290
291 void
292 platform_cache_init(
293 void)
294 {
295 cache_info_t *cpuid_cache_info;
296 unsigned int cache_size = 0x0UL;
297 cpu_data_t *cpu_data_ptr = getCpuDatap();
298
299 cpuid_cache_info = cache_info();
300
301 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
302 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
303 cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL);
304
305 if (cpuid_cache_info->c_l2size == 0x0) {
306 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
307 cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size);
308 cpuid_cache_info->c_l2size = cache_size;
309 }
310 }
311 }
312
313 void
314 platform_cache_flush(
315 void)
316 {
317 cpu_data_t *cpu_data_ptr = getCpuDatap();
318
319 FlushPoC_Dcache();
320
321 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
322 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
323 cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
324 }
325 }
326
327 void
328 platform_cache_clean(
329 void)
330 {
331 cpu_data_t *cpu_data_ptr = getCpuDatap();
332
333 CleanPoC_Dcache();
334
335 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
336 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
337 cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
338 }
339 }
340
341 void
342 platform_cache_shutdown(
343 void)
344 {
345 cpu_data_t *cpu_data_ptr = getCpuDatap();
346
347 CleanPoC_Dcache();
348
349 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
350 ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(
351 cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL);
352 }
353 }
354
355 void
356 platform_cache_disable(void)
357 {
358 #if (__ARM_ARCH__ < 8)
359 uint32_t sctlr_value = 0;
360
361 /* Disable dcache allocation. */
362 sctlr_value = __builtin_arm_mrc(MRC_SCTLR);
363 sctlr_value &= ~SCTLR_DCACHE;
364 __builtin_arm_mcr(MCR_SCTLR(sctlr_value));
365 __builtin_arm_isb(ISB_SY);
366 #endif /* (__ARM_ARCH__ < 8) */
367 }
368
369 void
370 platform_cache_idle_enter(
371 void)
372 {
373 #if __ARM_SMP__
374 platform_cache_disable();
375
376 /*
377 * If we're only using a single CPU, just write back any
378 * dirty cachelines. We can avoid doing housekeeping
379 * on CPU data that would normally be modified by other
380 * CPUs.
381 */
382 if (up_style_idle_exit && (real_ncpus == 1)) {
383 CleanPoU_Dcache();
384 } else {
385 FlushPoU_Dcache();
386
387 #if (__ARM_ARCH__ < 8)
388 cpu_data_t *cpu_data_ptr = getCpuDatap();
389 cpu_data_ptr->cpu_CLW_active = 0;
390 __builtin_arm_dmb(DMB_ISH);
391 cpu_data_ptr->cpu_CLWFlush_req = 0;
392 cpu_data_ptr->cpu_CLWClean_req = 0;
393 CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
394 #endif /* (__ARM_ARCH__ < 8) */
395 }
396 #else /* !__ARM_SMP__ */
397 CleanPoU_Dcache();
398 #endif /* !__ARM_SMP__ */
399
400 #if defined(__ARM_SMP__) && defined(ARMA7)
401 uint32_t actlr_value = 0;
402
403 /* Leave the coherency domain */
404 __builtin_arm_clrex();
405 actlr_value = __builtin_arm_mrc(MRC_ACTLR);
406 actlr_value &= ~0x40;
407
408 __builtin_arm_mcr(MCR_ACTLR(actlr_value));
409 /* Ensures any pending fwd request gets serviced and ends up */
410 __builtin_arm_dsb(DSB_SY);
411 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
412 __builtin_arm_isb(ISB_SY);
413 /* Ensures the second possible pending fwd request ends up. */
414 __builtin_arm_dsb(DSB_SY);
415 #endif /* defined(__ARM_SMP__) && defined(ARMA7) */
416 }
417
418 void
419 platform_cache_idle_exit(
420 void)
421 {
422 #if defined(ARMA7)
423 uint32_t actlr_value = 0;
424
425 /* Flush L1 caches and TLB before rejoining the coherency domain */
426 FlushPoU_Dcache();
427 /*
428 * If we're only using a single CPU, we can avoid flushing the
429 * I-cache or the TLB, as neither program text nor pagetables
430 * should have been changed during the idle period. We still
431 * want to flush the D-cache to PoU (above), as memory contents
432 * may have been changed by DMA.
433 */
434 if (!up_style_idle_exit || (real_ncpus > 1)) {
435 InvalidatePoU_Icache();
436 flush_core_tlb();
437 }
438
439 /* Rejoin the coherency domain */
440 actlr_value = __builtin_arm_mrc(MRC_ACTLR);
441 actlr_value |= 0x40;
442 __builtin_arm_mcr(MCR_ACTLR(actlr_value));
443 __builtin_arm_isb(ISB_SY);
444
445 #if __ARM_SMP__
446 uint32_t sctlr_value = 0;
447
448 /* Enable dcache allocation. */
449 sctlr_value = __builtin_arm_mrc(MRC_SCTLR);
450 sctlr_value |= SCTLR_DCACHE;
451 __builtin_arm_mcr(MCR_SCTLR(sctlr_value));
452 __builtin_arm_isb(ISB_SY);
453 getCpuDatap()->cpu_CLW_active = 1;
454 #endif /* __ARM_SMP__ */
455 #endif /* defined(ARMA7) */
456 }
457
458 boolean_t
459 platform_cache_batch_wimg(
460 __unused unsigned int new_wimg,
461 __unused unsigned int size
462 )
463 {
464 boolean_t do_cache_op = FALSE;
465
466 if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) {
467 do_cache_op = TRUE;
468 }
469
470 return do_cache_op;
471 }
472
473 void
474 platform_cache_flush_wimg(
475 __unused unsigned int new_wimg
476 )
477 {
478 #if __ARM_SMP__ && defined (ARMA7)
479 cache_xcall(LWFlush);
480 #else
481 FlushPoC_Dcache();
482 if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
483 ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
484 }
485 #endif
486 }
487
488 #if __ARM_SMP__ && defined(ARMA7)
489 void
490 cache_xcall_handler(unsigned int op)
491 {
492 cpu_data_t *cdp;
493 uint64_t abstime;
494
495 cdp = getCpuDatap();
496
497 if ((op == LWFlush) && (cdp->cpu_CLWFlush_req > cdp->cpu_CLWFlush_last)) {
498 FlushPoU_Dcache();
499 abstime = ml_get_timebase();
500 cdp->cpu_CLWFlush_last = abstime;
501 cdp->cpu_CLWClean_last = abstime;
502 } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) {
503 CleanPoU_Dcache();
504 abstime = ml_get_timebase();
505 cdp->cpu_CLWClean_last = abstime;
506 }
507 }
508
509
510 void
511 cache_xcall(unsigned int op)
512 {
513 boolean_t intr;
514 cpu_data_t *cdp;
515 cpu_data_t *target_cdp;
516 unsigned int cpu;
517 unsigned int signal;
518 uint64_t abstime;
519
520 intr = ml_set_interrupts_enabled(FALSE);
521 cdp = getCpuDatap();
522 abstime = ml_get_timebase();
523 if (op == LWClean) {
524 signal = SIGPLWClean;
525 } else {
526 signal = SIGPLWFlush;
527 }
528
529 for (cpu = 0; cpu < MAX_CPUS; cpu++) {
530 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
531 if (target_cdp == (cpu_data_t *)NULL) {
532 break;
533 }
534
535 if (target_cdp->cpu_CLW_active == 0) {
536 continue;
537 }
538
539 if (op == LWFlush) {
540 target_cdp->cpu_CLWFlush_req = abstime;
541 } else if (op == LWClean) {
542 target_cdp->cpu_CLWClean_req = abstime;
543 }
544 __builtin_arm_dmb(DMB_ISH);
545 if (target_cdp->cpu_CLW_active == 0) {
546 if (op == LWFlush) {
547 target_cdp->cpu_CLWFlush_req = 0x0ULL;
548 } else if (op == LWClean) {
549 target_cdp->cpu_CLWClean_req = 0x0ULL;
550 }
551 continue;
552 }
553
554 if (target_cdp == cdp) {
555 continue;
556 }
557
558 if (KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) {
559 if (op == LWFlush) {
560 target_cdp->cpu_CLWFlush_req = 0x0ULL;
561 } else if (op == LWClean) {
562 target_cdp->cpu_CLWClean_req = 0x0ULL;
563 }
564 }
565 if (cpu == real_ncpus) {
566 break;
567 }
568 }
569
570 cache_xcall_handler(op);
571
572 (void) ml_set_interrupts_enabled(intr);
573
574 for (cpu = 0; cpu < MAX_CPUS; cpu++) {
575 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
576 if (target_cdp == (cpu_data_t *)NULL) {
577 break;
578 }
579
580 if (target_cdp == cdp) {
581 continue;
582 }
583
584 if (op == LWFlush) {
585 while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime)) {
586 ;
587 }
588 } else if (op == LWClean) {
589 while ((target_cdp->cpu_CLWClean_req != 0x0ULL) && (target_cdp->cpu_CLWClean_last < abstime)) {
590 ;
591 }
592 }
593
594 if (cpu == real_ncpus) {
595 break;
596 }
597 }
598
599 if (op == LWFlush) {
600 FlushPoC_Dcache();
601 } else if (op == LWClean) {
602 CleanPoC_Dcache();
603 }
604 }
605 #endif
606
607
608 #else /* __ARM_COHERENT_IO__ */
609
610 void
611 flush_dcache(
612 __unused vm_offset_t addr,
613 __unused unsigned length,
614 __unused boolean_t phys)
615 {
616 __builtin_arm_dsb(DSB_SY);
617 }
618
619 void
620 clean_dcache(
621 __unused vm_offset_t addr,
622 __unused unsigned length,
623 __unused boolean_t phys)
624 {
625 __builtin_arm_dsb(DSB_SY);
626 }
627
628 void
629 flush_dcache_syscall(
630 __unused vm_offset_t va,
631 __unused unsigned length)
632 {
633 __builtin_arm_dsb(DSB_SY);
634 }
635
636 void
637 dcache_incoherent_io_flush64(
638 __unused addr64_t pa,
639 __unused unsigned int size,
640 __unused unsigned int remaining,
641 __unused unsigned int *res)
642 {
643 __builtin_arm_dsb(DSB_SY);
644 *res = LWOpDone;
645 return;
646 }
647
648 void
649 dcache_incoherent_io_store64(
650 __unused addr64_t pa,
651 __unused unsigned int size,
652 __unused unsigned int remaining,
653 __unused unsigned int *res)
654 {
655 __builtin_arm_dsb(DSB_SY);
656 *res = LWOpDone;
657 return;
658 }
659
660 void
661 cache_sync_page(
662 ppnum_t pp
663 )
664 {
665 pmap_paddr_t paddr = ptoa(pp);
666
667 if (isphysmem(paddr)) {
668 vm_offset_t vaddr = phystokv(paddr);
669 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
670 }
671 }
672
673 void
674 platform_cache_init(
675 void)
676 {
677 }
678
679 void
680 platform_cache_flush(
681 void)
682 {
683 }
684
685 void
686 platform_cache_clean(
687 void)
688 {
689 }
690
691 void
692 platform_cache_shutdown(
693 void)
694 {
695 }
696
697 void
698 platform_cache_idle_enter(
699 void)
700 {
701 }
702
703 void
704 platform_cache_idle_exit(
705 void)
706 {
707 }
708
709 boolean_t
710 platform_cache_batch_wimg(
711 __unused unsigned int new_wimg,
712 __unused unsigned int size
713 )
714 {
715 return TRUE;
716 }
717
718 void
719 platform_cache_flush_wimg(
720 __unused unsigned int new_wimg)
721 {
722 }
723
724 #endif /* __ARM_COHERENT_IO__ */