2 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach_assert.h>
29 #include <mach/vm_types.h>
30 #include <mach/mach_time.h>
31 #include <kern/timer.h>
32 #include <kern/clock.h>
33 #include <kern/machine.h>
34 #include <mach/machine.h>
35 #include <mach/machine/vm_param.h>
37 #include <kdp/kdp_udp.h>
38 #include <arm/caches_internal.h>
39 #include <arm/cpuid.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #include <arm/cpu_internal.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_map.h>
48 #include <arm/misc_protos.h>
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
56 #ifndef __ARM_COHERENT_IO__
58 extern boolean_t up_style_idle_exit
;
66 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
72 paddr
= CAST_DOWN(unsigned int, addr
);
73 if (!isphysmem(paddr
))
75 vaddr
= (unsigned int)phystokv(paddr
);
76 FlushPoC_DcacheRegion( (vm_offset_t
) vaddr
, length
);
78 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
79 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
80 cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) paddr
, length
);
83 if (cpu_data_ptr
->cpu_cache_dispatch
== (cache_dispatch_t
) NULL
) {
84 FlushPoC_DcacheRegion( (vm_offset_t
) addr
, length
);
90 count
= PAGE_SIZE
- (addr
& PAGE_MASK
);
93 FlushPoC_DcacheRegion( (vm_offset_t
) addr
, count
);
94 paddr
= kvtophys(addr
);
96 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
97 cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) paddr
, count
);
111 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
117 paddr
= CAST_DOWN(unsigned int, addr
);
118 if (!isphysmem(paddr
))
121 vaddr
= (unsigned int)phystokv(paddr
);
122 CleanPoC_DcacheRegion( (vm_offset_t
) vaddr
, length
);
124 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
125 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
126 cpu_data_ptr
->cpu_id
, CacheCleanRegion
, paddr
, length
);
130 if (cpu_data_ptr
->cpu_cache_dispatch
== (cache_dispatch_t
) NULL
) {
131 CleanPoC_DcacheRegion( (vm_offset_t
) addr
, length
);
137 count
= PAGE_SIZE
- (addr
& PAGE_MASK
);
140 CleanPoC_DcacheRegion( (vm_offset_t
) addr
, count
);
141 paddr
= kvtophys(addr
);
143 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
144 cpu_data_ptr
->cpu_id
, CacheCleanRegion
, (unsigned int) paddr
, count
);
153 flush_dcache_syscall(
157 if ((cache_info()->c_bulksize_op
!=0) && (length
>= (cache_info()->c_bulksize_op
))) {
158 #if __ARM_SMP__ && defined(ARMA7)
159 cache_xcall(LWFlush
);
162 if (getCpuDatap()->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
163 ((cache_dispatch_t
) getCpuDatap()->cpu_cache_dispatch
) ( getCpuDatap()->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
166 FlushPoC_DcacheRegion( (vm_offset_t
) va
, length
);
172 dcache_incoherent_io_flush64(
175 unsigned int remaining
,
179 unsigned int paddr
= CAST_DOWN(unsigned int, pa
);
180 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
182 if ((cache_info()->c_bulksize_op
!=0) && (remaining
>= (cache_info()->c_bulksize_op
))) {
183 #if __ARM_SMP__ && defined (ARMA7)
184 cache_xcall(LWFlush
);
187 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
188 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) ( cpu_data_ptr
->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
192 if (isphysmem(paddr
)) {
193 vaddr
= (unsigned int)phystokv(pa
);
195 FlushPoC_DcacheRegion( (vm_offset_t
) vaddr
, size
);
197 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
198 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) pa
, size
);
201 /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */
202 unsigned int wimg_bits
, index
;
205 mp_disable_preemption();
208 count
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
212 wimg_bits
= pmap_cache_attributes((paddr
>> PAGE_SHIFT
));
213 index
= pmap_map_cpu_windows_copy((paddr
>> PAGE_SHIFT
), VM_PROT_READ
|VM_PROT_WRITE
, wimg_bits
);
214 vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | (paddr
& PAGE_MASK
);
216 CleanPoC_DcacheRegion( (vm_offset_t
) vaddr
, count
);
218 pmap_unmap_cpu_windows_copy(index
);
224 mp_enable_preemption();
232 dcache_incoherent_io_store64(
235 unsigned int remaining
,
239 unsigned int paddr
= CAST_DOWN(unsigned int, pa
);
240 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
242 if (isphysmem(paddr
)) {
243 unsigned int wimg_bits
= pmap_cache_attributes(paddr
>> PAGE_SHIFT
);
244 if ((wimg_bits
== VM_WIMG_IO
) || (wimg_bits
== VM_WIMG_WCOMB
)) {
249 if ((cache_info()->c_bulksize_op
!=0) && (remaining
>= (cache_info()->c_bulksize_op
))) {
250 #if __ARM_SMP__ && defined (ARMA7)
251 cache_xcall(LWClean
);
252 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
253 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) ( cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
256 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
257 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) ( cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
261 if (isphysmem(paddr
)) {
262 vaddr
= (unsigned int)phystokv(pa
);
264 CleanPoC_DcacheRegion( (vm_offset_t
) vaddr
, size
);
266 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
267 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (cpu_data_ptr
->cpu_id
, CacheCleanRegion
, (unsigned int) pa
, size
);
270 /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */
271 unsigned int wimg_bits
, index
;
274 mp_disable_preemption();
277 count
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
281 wimg_bits
= pmap_cache_attributes((paddr
>> PAGE_SHIFT
));
282 index
= pmap_map_cpu_windows_copy((paddr
>> PAGE_SHIFT
), VM_PROT_READ
|VM_PROT_WRITE
, wimg_bits
);
283 vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | (paddr
& PAGE_MASK
);
285 CleanPoC_DcacheRegion( (vm_offset_t
) vaddr
, count
);
287 pmap_unmap_cpu_windows_copy(index
);
293 mp_enable_preemption();
305 pmap_paddr_t paddr
= ptoa(pp
);
307 if (isphysmem(paddr
)) {
308 vm_offset_t vaddr
= phystokv(paddr
);
310 CleanPoU_DcacheRegion(vaddr
, PAGE_SIZE
);
311 #ifdef __ARM_IC_NOALIAS_ICACHE__
312 InvalidatePoU_IcacheRegion(vaddr
, PAGE_SIZE
);
314 InvalidatePoU_Icache();
318 InvalidatePoU_Icache();
326 cache_info_t
*cpuid_cache_info
;
327 unsigned int cache_size
= 0x0UL
;
328 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
330 cpuid_cache_info
= cache_info();
332 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
333 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
334 cpu_data_ptr
->cpu_id
, CacheControl
, CacheControlEnable
, 0x0UL
);
336 if ( cpuid_cache_info
->c_l2size
== 0x0 ) {
337 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
338 cpu_data_ptr
->cpu_id
, CacheConfig
, CacheConfigSize
, (unsigned int)&cache_size
);
339 cpuid_cache_info
->c_l2size
= cache_size
;
346 platform_cache_flush(
349 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
353 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
354 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
355 cpu_data_ptr
->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
359 platform_cache_clean(
362 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
366 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
367 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
368 cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
372 platform_cache_shutdown(
375 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
379 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
380 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
) (
381 cpu_data_ptr
->cpu_id
, CacheShutdown
, 0x0UL
, 0x0UL
);
385 platform_cache_disable(void)
387 uint32_t sctlr_value
= 0;
389 /* Disable dcache allocation. */
390 __asm__
volatile("mrc p15, 0, %0, c1, c0, 0"
391 : "=r"(sctlr_value
));
393 sctlr_value
&= ~SCTLR_DCACHE
;
395 __asm__
volatile("mcr p15, 0, %0, c1, c0, 0\n"
397 :: "r"(sctlr_value
));
402 platform_cache_idle_enter(
406 platform_cache_disable();
409 * If we're only using a single CPU, just write back any
410 * dirty cachelines. We can avoid doing housekeeping
411 * on CPU data that would normally be modified by other
414 if (up_style_idle_exit
&& (real_ncpus
== 1))
417 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
421 cpu_data_ptr
->cpu_CLW_active
= 0;
422 __asm__
volatile("dmb ish");
423 cpu_data_ptr
->cpu_CLWFlush_req
= 0;
424 cpu_data_ptr
->cpu_CLWClean_req
= 0;
425 CleanPoC_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
431 #if defined (__ARM_SMP__) && defined (ARMA7)
432 uint32_t actlr_value
= 0;
434 /* Leave the coherency domain */
435 __asm__
volatile("clrex\n"
436 "mrc p15, 0, %0, c1, c0, 1\n"
437 : "=r"(actlr_value
));
439 actlr_value
&= ~0x40;
441 __asm__
volatile("mcr p15, 0, %0, c1, c0, 1\n"
442 /* Ensures any pending fwd request gets serviced and ends up */
444 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
446 /* Ensures the second possible pending fwd request ends up. */
448 :: "r"(actlr_value
));
453 platform_cache_idle_exit(
457 uint32_t actlr_value
= 0;
459 /* Flush L1 caches and TLB before rejoining the coherency domain */
462 * If we're only using a single CPU, we can avoid flushing the
463 * I-cache or the TLB, as neither program text nor pagetables
464 * should have been changed during the idle period. We still
465 * want to flush the D-cache to PoU (above), as memory contents
466 * may have been changed by DMA.
468 if (!up_style_idle_exit
|| (real_ncpus
> 1)) {
469 InvalidatePoU_Icache();
473 /* Rejoin the coherency domain */
474 __asm__
volatile("mrc p15, 0, %0, c1, c0, 1\n"
475 : "=r"(actlr_value
));
479 __asm__
volatile("mcr p15, 0, %0, c1, c0, 1\n"
481 :: "r"(actlr_value
));
484 uint32_t sctlr_value
= 0;
486 /* Enable dcache allocation. */
487 __asm__
volatile("mrc p15, 0, %0, c1, c0, 0\n"
488 : "=r"(sctlr_value
));
490 sctlr_value
|= SCTLR_DCACHE
;
492 __asm__
volatile("mcr p15, 0, %0, c1, c0, 0\n"
494 :: "r"(sctlr_value
));
495 getCpuDatap()->cpu_CLW_active
= 1;
501 platform_cache_batch_wimg(
502 __unused
unsigned int new_wimg
,
503 __unused
unsigned int size
506 boolean_t do_cache_op
= FALSE
;
508 if ((cache_info()->c_bulksize_op
!= 0) && (size
>= (cache_info()->c_bulksize_op
))) do_cache_op
= TRUE
;
514 platform_cache_flush_wimg(
515 __unused
unsigned int new_wimg
518 #if __ARM_SMP__ && defined (ARMA7)
519 cache_xcall(LWFlush
);
522 if (getCpuDatap()->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
)
523 ((cache_dispatch_t
) getCpuDatap()->cpu_cache_dispatch
) ( getCpuDatap()->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
527 #if __ARM_SMP__ && defined(ARMA7)
529 cache_xcall_handler(unsigned int op
)
536 if ((op
== LWFlush
) && (cdp
->cpu_CLWFlush_req
> cdp
->cpu_CLWFlush_last
)) {
538 abstime
= ml_get_timebase();
539 cdp
->cpu_CLWFlush_last
= abstime
;
540 cdp
->cpu_CLWClean_last
= abstime
;
541 } else if ((op
== LWClean
) && (cdp
->cpu_CLWClean_req
> cdp
->cpu_CLWClean_last
)) {
543 abstime
= ml_get_timebase();
544 cdp
->cpu_CLWClean_last
= abstime
;
550 cache_xcall(unsigned int op
)
554 cpu_data_t
*target_cdp
;
559 intr
= ml_set_interrupts_enabled(FALSE
);
561 abstime
= ml_get_timebase();
563 signal
= SIGPLWClean
;
565 signal
= SIGPLWFlush
;
567 for (cpu
=0; cpu
< MAX_CPUS
; cpu
++) {
569 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
570 if(target_cdp
== (cpu_data_t
*)NULL
)
573 if (target_cdp
->cpu_CLW_active
== 0)
577 target_cdp
->cpu_CLWFlush_req
= abstime
;
578 else if (op
== LWClean
)
579 target_cdp
->cpu_CLWClean_req
= abstime
;
580 __asm__
volatile("dmb ish");
581 if (target_cdp
->cpu_CLW_active
== 0) {
583 target_cdp
->cpu_CLWFlush_req
= 0x0ULL
;
584 else if (op
== LWClean
)
585 target_cdp
->cpu_CLWClean_req
= 0x0ULL
;
589 if (target_cdp
== cdp
)
592 if(KERN_SUCCESS
!= cpu_signal(target_cdp
, signal
, (void *)NULL
, NULL
)) {
594 target_cdp
->cpu_CLWFlush_req
= 0x0ULL
;
595 else if (op
== LWClean
)
596 target_cdp
->cpu_CLWClean_req
= 0x0ULL
;
598 if (cpu
== real_ncpus
)
602 cache_xcall_handler (op
);
604 (void) ml_set_interrupts_enabled(intr
);
606 for (cpu
=0; cpu
< MAX_CPUS
; cpu
++) {
608 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
609 if(target_cdp
== (cpu_data_t
*)NULL
)
612 if (target_cdp
== cdp
)
616 while ((target_cdp
->cpu_CLWFlush_req
!= 0x0ULL
) && (target_cdp
->cpu_CLWFlush_last
< abstime
));
617 else if (op
== LWClean
)
618 while ((target_cdp
->cpu_CLWClean_req
!= 0x0ULL
) && (target_cdp
->cpu_CLWClean_last
< abstime
));
620 if (cpu
== real_ncpus
)
626 else if (op
== LWClean
)
632 #else /* __ARM_COHERENT_IO__ */
636 __unused vm_offset_t addr
,
637 __unused
unsigned length
,
638 __unused boolean_t phys
)
640 __asm__
volatile ("dsb sy");
645 __unused vm_offset_t addr
,
646 __unused
unsigned length
,
647 __unused boolean_t phys
)
649 __asm__
volatile ("dsb sy");
653 flush_dcache_syscall(
654 __unused vm_offset_t va
,
655 __unused
unsigned length
)
657 __asm__
volatile ("dsb sy");
661 dcache_incoherent_io_flush64(
662 __unused addr64_t pa
,
663 __unused
unsigned int size
,
664 __unused
unsigned int remaining
,
665 __unused
unsigned int *res
)
667 __asm__
volatile ("dsb sy");
673 dcache_incoherent_io_store64(
674 __unused addr64_t pa
,
675 __unused
unsigned int size
,
676 __unused
unsigned int remaining
,
677 __unused
unsigned int *res
)
679 __asm__
volatile ("dsb sy");
689 pmap_paddr_t paddr
= ptoa(pp
);
691 if (isphysmem(paddr
)) {
692 vm_offset_t vaddr
= phystokv(paddr
);
694 #ifdef __ARM_IC_NOALIAS_ICACHE__
695 InvalidatePoU_IcacheRegion(vaddr
, PAGE_SIZE
);
697 InvalidatePoU_Icache();
709 platform_cache_flush(
715 platform_cache_clean(
721 platform_cache_shutdown(
727 platform_cache_idle_enter(
733 platform_cache_idle_exit(
739 platform_cache_batch_wimg(
740 __unused
unsigned int new_wimg
,
741 __unused
unsigned int size
748 platform_cache_flush_wimg(
749 __unused
unsigned int new_wimg
)
753 #endif /* __ARM_COHERENT_IO__ */