2 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach_assert.h>
29 #include <mach/vm_types.h>
30 #include <mach/mach_time.h>
31 #include <kern/timer.h>
32 #include <kern/clock.h>
33 #include <kern/machine.h>
34 #include <mach/machine.h>
35 #include <mach/machine/vm_param.h>
37 #include <kdp/kdp_udp.h>
38 #include <arm/caches_internal.h>
39 #include <arm/cpuid.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #include <arm/cpu_internal.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_map.h>
48 #include <arm/misc_protos.h>
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
56 #ifndef __ARM_COHERENT_IO__
58 TUNABLE(bool, up_style_idle_exit
, "up_style_idle_exit", false);
66 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
74 paddr
= CAST_DOWN(pmap_paddr_t
, addr
);
75 vaddr
= phystokv_range(paddr
, &count
);
77 paddr
= kvtophys(addr
);
79 count
= PAGE_SIZE
- (addr
& PAGE_MASK
);
84 FlushPoC_DcacheRegion(vaddr
, (unsigned)count
);
85 if (paddr
&& (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
)) {
86 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) paddr
, (unsigned)count
);
100 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
108 paddr
= CAST_DOWN(pmap_paddr_t
, addr
);
109 vaddr
= phystokv_range(paddr
, &count
);
111 paddr
= kvtophys(addr
);
113 count
= PAGE_SIZE
- (addr
& PAGE_MASK
);
114 if (count
> length
) {
118 CleanPoC_DcacheRegion(vaddr
, (unsigned)count
);
119 if (paddr
&& (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
)) {
120 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheCleanRegion
, (unsigned int) paddr
, (unsigned)count
);
129 flush_dcache_syscall(
133 if ((cache_info()->c_bulksize_op
!= 0) && (length
>= (cache_info()->c_bulksize_op
))) {
135 cache_xcall(LWFlush
);
138 if (getCpuDatap()->cpu_cache_dispatch
!= NULL
) {
139 getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
143 FlushPoC_DcacheRegion((vm_offset_t
) va
, length
);
149 dcache_incoherent_io_flush64(
152 unsigned int remaining
,
155 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
157 if ((cache_info()->c_bulksize_op
!= 0) && (remaining
>= (cache_info()->c_bulksize_op
))) {
159 cache_xcall(LWFlush
);
162 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
163 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
169 pmap_paddr_t paddr
= CAST_DOWN(pmap_paddr_t
, pa
);
171 unsigned int wimg_bits
, index
;
174 if (isphysmem(paddr
)) {
176 vaddr
= phystokv_range(paddr
, &count
);
178 count
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
183 wimg_bits
= pmap_cache_attributes((ppnum_t
) (paddr
>> PAGE_SHIFT
));
184 mp_disable_preemption();
185 index
= pmap_map_cpu_windows_copy((ppnum_t
) (paddr
>> PAGE_SHIFT
), VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
186 vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | (paddr
& PAGE_MASK
);
188 FlushPoC_DcacheRegion(vaddr
, (unsigned)count
);
189 if (isphysmem(paddr
)) {
190 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
191 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) paddr
, (unsigned)count
);
194 pmap_unmap_cpu_windows_copy(index
);
195 mp_enable_preemption();
206 dcache_incoherent_io_store64(
209 unsigned int remaining
,
212 pmap_paddr_t paddr
= CAST_DOWN(pmap_paddr_t
, pa
);
213 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
215 if (isphysmem(paddr
)) {
216 unsigned int wimg_bits
= pmap_cache_attributes((ppnum_t
) (paddr
>> PAGE_SHIFT
));
217 if ((wimg_bits
== VM_WIMG_IO
) || (wimg_bits
== VM_WIMG_WCOMB
) || (wimg_bits
== VM_WIMG_RT
)) {
222 if ((cache_info()->c_bulksize_op
!= 0) && (remaining
>= (cache_info()->c_bulksize_op
))) {
224 cache_xcall(LWClean
);
225 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
226 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
230 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
231 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
238 unsigned int wimg_bits
, index
;
241 if (isphysmem(paddr
)) {
243 vaddr
= phystokv_range(paddr
, &count
);
245 count
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
249 wimg_bits
= pmap_cache_attributes((ppnum_t
) (paddr
>> PAGE_SHIFT
));
250 mp_disable_preemption();
251 index
= pmap_map_cpu_windows_copy((ppnum_t
) (paddr
>> PAGE_SHIFT
), VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
252 vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | (paddr
& PAGE_MASK
);
254 CleanPoC_DcacheRegion(vaddr
, (unsigned)count
);
255 if (isphysmem(paddr
)) {
256 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
257 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheCleanRegion
, (unsigned int) paddr
, (unsigned)count
);
260 pmap_unmap_cpu_windows_copy(index
);
261 mp_enable_preemption();
276 pmap_paddr_t paddr
= ptoa(pp
);
278 if (isphysmem(paddr
)) {
279 vm_offset_t vaddr
= phystokv(paddr
);
280 InvalidatePoU_IcacheRegion(vaddr
, PAGE_SIZE
);
283 InvalidatePoU_Icache();
291 cache_info_t
*cpuid_cache_info
;
292 unsigned int cache_size
= 0x0UL
;
293 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
295 cpuid_cache_info
= cache_info();
297 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
298 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheControl
, CacheControlEnable
, 0x0UL
);
300 if (cpuid_cache_info
->c_l2size
== 0x0) {
301 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheConfig
, CacheConfigSize
, (unsigned int)&cache_size
);
302 cpuid_cache_info
->c_l2size
= cache_size
;
308 platform_cache_flush(
311 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
315 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
316 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
321 platform_cache_clean(
324 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
328 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
329 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
334 platform_cache_shutdown(
337 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
341 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
342 cpu_data_ptr
->cpu_cache_dispatch(cpu_data_ptr
->cpu_id
, CacheShutdown
, 0x0UL
, 0x0UL
);
347 platform_cache_disable(void)
349 #if (__ARM_ARCH__ < 8)
350 uint32_t sctlr_value
= 0;
352 /* Disable dcache allocation. */
353 sctlr_value
= __builtin_arm_mrc(MRC_SCTLR
);
354 sctlr_value
&= ~SCTLR_DCACHE
;
355 __builtin_arm_mcr(MCR_SCTLR(sctlr_value
));
356 __builtin_arm_isb(ISB_SY
);
357 #endif /* (__ARM_ARCH__ < 8) */
361 platform_cache_idle_enter(
364 platform_cache_disable();
367 * If we're only using a single CPU, just write back any
368 * dirty cachelines. We can avoid doing housekeeping
369 * on CPU data that would normally be modified by other
372 if (up_style_idle_exit
&& (real_ncpus
== 1)) {
377 #if (__ARM_ARCH__ < 8)
378 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
379 cpu_data_ptr
->cpu_CLW_active
= 0;
380 __builtin_arm_dmb(DMB_ISH
);
381 cpu_data_ptr
->cpu_CLWFlush_req
= 0;
382 cpu_data_ptr
->cpu_CLWClean_req
= 0;
383 CleanPoC_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
384 #endif /* (__ARM_ARCH__ < 8) */
388 uint32_t actlr_value
= 0;
390 /* Leave the coherency domain */
391 __builtin_arm_clrex();
392 actlr_value
= __builtin_arm_mrc(MRC_ACTLR
);
393 actlr_value
&= ~0x40;
395 __builtin_arm_mcr(MCR_ACTLR(actlr_value
));
396 /* Ensures any pending fwd request gets serviced and ends up */
397 __builtin_arm_dsb(DSB_SY
);
398 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
399 __builtin_arm_isb(ISB_SY
);
400 /* Ensures the second possible pending fwd request ends up. */
401 __builtin_arm_dsb(DSB_SY
);
402 #endif /* defined(ARMA7) */
406 platform_cache_idle_exit(
410 uint32_t actlr_value
= 0;
412 /* Flush L1 caches and TLB before rejoining the coherency domain */
415 * If we're only using a single CPU, we can avoid flushing the
416 * I-cache or the TLB, as neither program text nor pagetables
417 * should have been changed during the idle period. We still
418 * want to flush the D-cache to PoU (above), as memory contents
419 * may have been changed by DMA.
421 if (!up_style_idle_exit
|| (real_ncpus
> 1)) {
422 InvalidatePoU_Icache();
426 /* Rejoin the coherency domain */
427 actlr_value
= __builtin_arm_mrc(MRC_ACTLR
);
429 __builtin_arm_mcr(MCR_ACTLR(actlr_value
));
430 __builtin_arm_isb(ISB_SY
);
432 uint32_t sctlr_value
= 0;
434 /* Enable dcache allocation. */
435 sctlr_value
= __builtin_arm_mrc(MRC_SCTLR
);
436 sctlr_value
|= SCTLR_DCACHE
;
437 __builtin_arm_mcr(MCR_SCTLR(sctlr_value
));
438 __builtin_arm_isb(ISB_SY
);
439 getCpuDatap()->cpu_CLW_active
= 1;
440 #endif /* defined(ARMA7) */
444 platform_cache_batch_wimg(
445 __unused
unsigned int new_wimg
,
446 __unused
unsigned int size
449 boolean_t do_cache_op
= FALSE
;
451 if ((cache_info()->c_bulksize_op
!= 0) && (size
>= (cache_info()->c_bulksize_op
))) {
459 platform_cache_flush_wimg(
460 __unused
unsigned int new_wimg
464 cache_xcall(LWFlush
);
467 if (getCpuDatap()->cpu_cache_dispatch
!= NULL
) {
468 getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
475 cache_xcall_handler(unsigned int op
)
482 if ((op
== LWFlush
) && (cdp
->cpu_CLWFlush_req
> cdp
->cpu_CLWFlush_last
)) {
484 abstime
= ml_get_timebase();
485 cdp
->cpu_CLWFlush_last
= abstime
;
486 cdp
->cpu_CLWClean_last
= abstime
;
487 } else if ((op
== LWClean
) && (cdp
->cpu_CLWClean_req
> cdp
->cpu_CLWClean_last
)) {
489 abstime
= ml_get_timebase();
490 cdp
->cpu_CLWClean_last
= abstime
;
496 cache_xcall(unsigned int op
)
500 cpu_data_t
*target_cdp
;
505 intr
= ml_set_interrupts_enabled(FALSE
);
507 abstime
= ml_get_timebase();
509 signal
= SIGPLWClean
;
511 signal
= SIGPLWFlush
;
514 const unsigned int max_cpu_id
= ml_get_max_cpu_number();
515 for (cpu
= 0; cpu
<= max_cpu_id
; cpu
++) {
516 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
517 if (target_cdp
== (cpu_data_t
*)NULL
) {
521 if (target_cdp
->cpu_CLW_active
== 0) {
526 target_cdp
->cpu_CLWFlush_req
= abstime
;
527 } else if (op
== LWClean
) {
528 target_cdp
->cpu_CLWClean_req
= abstime
;
530 __builtin_arm_dmb(DMB_ISH
);
531 if (target_cdp
->cpu_CLW_active
== 0) {
533 target_cdp
->cpu_CLWFlush_req
= 0x0ULL
;
534 } else if (op
== LWClean
) {
535 target_cdp
->cpu_CLWClean_req
= 0x0ULL
;
540 if (target_cdp
== cdp
) {
544 if (KERN_SUCCESS
!= cpu_signal(target_cdp
, signal
, (void *)NULL
, NULL
)) {
546 target_cdp
->cpu_CLWFlush_req
= 0x0ULL
;
547 } else if (op
== LWClean
) {
548 target_cdp
->cpu_CLWClean_req
= 0x0ULL
;
551 if (cpu
== real_ncpus
) {
556 cache_xcall_handler(op
);
558 (void) ml_set_interrupts_enabled(intr
);
560 for (cpu
= 0; cpu
<= max_cpu_id
; cpu
++) {
561 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
562 if (target_cdp
== (cpu_data_t
*)NULL
) {
566 if (target_cdp
== cdp
) {
571 while ((target_cdp
->cpu_CLWFlush_req
!= 0x0ULL
) && (target_cdp
->cpu_CLWFlush_last
< abstime
)) {
574 } else if (op
== LWClean
) {
575 while ((target_cdp
->cpu_CLWClean_req
!= 0x0ULL
) && (target_cdp
->cpu_CLWClean_last
< abstime
)) {
580 if (cpu
== real_ncpus
) {
587 } else if (op
== LWClean
) {
594 #else /* __ARM_COHERENT_IO__ */
598 __unused vm_offset_t addr
,
599 __unused
unsigned length
,
600 __unused boolean_t phys
)
602 __builtin_arm_dsb(DSB_SY
);
607 __unused vm_offset_t addr
,
608 __unused
unsigned length
,
609 __unused boolean_t phys
)
611 __builtin_arm_dsb(DSB_SY
);
615 flush_dcache_syscall(
616 __unused vm_offset_t va
,
617 __unused
unsigned length
)
619 __builtin_arm_dsb(DSB_SY
);
623 dcache_incoherent_io_flush64(
624 __unused addr64_t pa
,
625 __unused
unsigned int size
,
626 __unused
unsigned int remaining
,
627 __unused
unsigned int *res
)
629 __builtin_arm_dsb(DSB_SY
);
635 dcache_incoherent_io_store64(
636 __unused addr64_t pa
,
637 __unused
unsigned int size
,
638 __unused
unsigned int remaining
,
639 __unused
unsigned int *res
)
641 __builtin_arm_dsb(DSB_SY
);
651 pmap_paddr_t paddr
= ptoa(pp
);
653 if (isphysmem(paddr
)) {
654 vm_offset_t vaddr
= phystokv(paddr
);
655 InvalidatePoU_IcacheRegion(vaddr
, PAGE_SIZE
);
666 platform_cache_flush(
672 platform_cache_clean(
678 platform_cache_shutdown(
684 platform_cache_idle_enter(
690 platform_cache_idle_exit(
696 platform_cache_batch_wimg(
697 __unused
unsigned int new_wimg
,
698 __unused
unsigned int size
705 platform_cache_flush_wimg(
706 __unused
unsigned int new_wimg
)
710 #endif /* __ARM_COHERENT_IO__ */