2 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach_assert.h>
29 #include <mach/vm_types.h>
30 #include <mach/mach_time.h>
31 #include <kern/timer.h>
32 #include <kern/clock.h>
33 #include <kern/machine.h>
34 #include <mach/machine.h>
35 #include <mach/machine/vm_param.h>
37 #include <kdp/kdp_udp.h>
38 #include <arm/caches_internal.h>
39 #include <arm/cpuid.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #include <arm/cpu_internal.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_map.h>
48 #include <arm/misc_protos.h>
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
56 #ifndef __ARM_COHERENT_IO__
58 extern boolean_t up_style_idle_exit
;
66 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
74 paddr
= CAST_DOWN(pmap_paddr_t
, addr
);
75 vaddr
= phystokv_range(paddr
, &count
);
77 paddr
= kvtophys(addr
);
79 count
= PAGE_SIZE
- (addr
& PAGE_MASK
);
84 FlushPoC_DcacheRegion(vaddr
, (unsigned)count
);
85 if (paddr
&& (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
)) {
86 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
87 cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) paddr
, (unsigned)count
);
101 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
109 paddr
= CAST_DOWN(pmap_paddr_t
, addr
);
110 vaddr
= phystokv_range(paddr
, &count
);
112 paddr
= kvtophys(addr
);
114 count
= PAGE_SIZE
- (addr
& PAGE_MASK
);
115 if (count
> length
) {
119 CleanPoC_DcacheRegion(vaddr
, (unsigned)count
);
120 if (paddr
&& (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
)) {
121 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
122 cpu_data_ptr
->cpu_id
, CacheCleanRegion
, (unsigned int) paddr
, (unsigned)count
);
131 flush_dcache_syscall(
135 if ((cache_info()->c_bulksize_op
!= 0) && (length
>= (cache_info()->c_bulksize_op
))) {
136 #if __ARM_SMP__ && defined(ARMA7)
137 cache_xcall(LWFlush
);
140 if (getCpuDatap()->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
141 ((cache_dispatch_t
) getCpuDatap()->cpu_cache_dispatch
)( getCpuDatap()->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
145 FlushPoC_DcacheRegion((vm_offset_t
) va
, length
);
151 dcache_incoherent_io_flush64(
154 unsigned int remaining
,
157 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
159 if ((cache_info()->c_bulksize_op
!= 0) && (remaining
>= (cache_info()->c_bulksize_op
))) {
160 #if __ARM_SMP__ && defined (ARMA7)
161 cache_xcall(LWFlush
);
164 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
165 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)( cpu_data_ptr
->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
171 pmap_paddr_t paddr
= CAST_DOWN(pmap_paddr_t
, pa
);
173 unsigned int wimg_bits
, index
;
176 if (isphysmem(paddr
)) {
178 vaddr
= phystokv_range(paddr
, &count
);
180 count
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
185 wimg_bits
= pmap_cache_attributes((ppnum_t
) (paddr
>> PAGE_SHIFT
));
186 mp_disable_preemption();
187 index
= pmap_map_cpu_windows_copy((ppnum_t
) (paddr
>> PAGE_SHIFT
), VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
188 vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | (paddr
& PAGE_MASK
);
190 FlushPoC_DcacheRegion(vaddr
, (unsigned)count
);
191 if (isphysmem(paddr
)) {
192 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
193 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
194 cpu_data_ptr
->cpu_id
, CacheCleanFlushRegion
, (unsigned int) paddr
, (unsigned)count
);
197 pmap_unmap_cpu_windows_copy(index
);
198 mp_enable_preemption();
209 dcache_incoherent_io_store64(
212 unsigned int remaining
,
215 pmap_paddr_t paddr
= CAST_DOWN(pmap_paddr_t
, pa
);
216 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
218 if (isphysmem(paddr
)) {
219 unsigned int wimg_bits
= pmap_cache_attributes((ppnum_t
) (paddr
>> PAGE_SHIFT
));
220 if ((wimg_bits
== VM_WIMG_IO
) || (wimg_bits
== VM_WIMG_WCOMB
) || (wimg_bits
== VM_WIMG_RT
)) {
225 if ((cache_info()->c_bulksize_op
!= 0) && (remaining
>= (cache_info()->c_bulksize_op
))) {
226 #if __ARM_SMP__ && defined (ARMA7)
227 cache_xcall(LWClean
);
228 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
229 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)( cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
233 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
234 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)( cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
241 unsigned int wimg_bits
, index
;
244 if (isphysmem(paddr
)) {
246 vaddr
= phystokv_range(paddr
, &count
);
248 count
= PAGE_SIZE
- (paddr
& PAGE_MASK
);
252 wimg_bits
= pmap_cache_attributes((ppnum_t
) (paddr
>> PAGE_SHIFT
));
253 mp_disable_preemption();
254 index
= pmap_map_cpu_windows_copy((ppnum_t
) (paddr
>> PAGE_SHIFT
), VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
255 vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | (paddr
& PAGE_MASK
);
257 CleanPoC_DcacheRegion(vaddr
, (unsigned)count
);
258 if (isphysmem(paddr
)) {
259 if (cpu_data_ptr
->cpu_cache_dispatch
!= NULL
) {
260 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
261 cpu_data_ptr
->cpu_id
, CacheCleanRegion
, (unsigned int) paddr
, (unsigned)count
);
264 pmap_unmap_cpu_windows_copy(index
);
265 mp_enable_preemption();
280 pmap_paddr_t paddr
= ptoa(pp
);
282 if (isphysmem(paddr
)) {
283 vm_offset_t vaddr
= phystokv(paddr
);
284 InvalidatePoU_IcacheRegion(vaddr
, PAGE_SIZE
);
287 InvalidatePoU_Icache();
295 cache_info_t
*cpuid_cache_info
;
296 unsigned int cache_size
= 0x0UL
;
297 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
299 cpuid_cache_info
= cache_info();
301 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
302 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
303 cpu_data_ptr
->cpu_id
, CacheControl
, CacheControlEnable
, 0x0UL
);
305 if (cpuid_cache_info
->c_l2size
== 0x0) {
306 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
307 cpu_data_ptr
->cpu_id
, CacheConfig
, CacheConfigSize
, (unsigned int)&cache_size
);
308 cpuid_cache_info
->c_l2size
= cache_size
;
314 platform_cache_flush(
317 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
321 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
322 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
323 cpu_data_ptr
->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
328 platform_cache_clean(
331 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
335 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
336 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
337 cpu_data_ptr
->cpu_id
, CacheClean
, 0x0UL
, 0x0UL
);
342 platform_cache_shutdown(
345 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
349 if (cpu_data_ptr
->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
350 ((cache_dispatch_t
) cpu_data_ptr
->cpu_cache_dispatch
)(
351 cpu_data_ptr
->cpu_id
, CacheShutdown
, 0x0UL
, 0x0UL
);
356 platform_cache_disable(void)
358 #if (__ARM_ARCH__ < 8)
359 uint32_t sctlr_value
= 0;
361 /* Disable dcache allocation. */
362 sctlr_value
= __builtin_arm_mrc(MRC_SCTLR
);
363 sctlr_value
&= ~SCTLR_DCACHE
;
364 __builtin_arm_mcr(MCR_SCTLR(sctlr_value
));
365 __builtin_arm_isb(ISB_SY
);
366 #endif /* (__ARM_ARCH__ < 8) */
370 platform_cache_idle_enter(
374 platform_cache_disable();
377 * If we're only using a single CPU, just write back any
378 * dirty cachelines. We can avoid doing housekeeping
379 * on CPU data that would normally be modified by other
382 if (up_style_idle_exit
&& (real_ncpus
== 1)) {
387 #if (__ARM_ARCH__ < 8)
388 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
389 cpu_data_ptr
->cpu_CLW_active
= 0;
390 __builtin_arm_dmb(DMB_ISH
);
391 cpu_data_ptr
->cpu_CLWFlush_req
= 0;
392 cpu_data_ptr
->cpu_CLWClean_req
= 0;
393 CleanPoC_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
394 #endif /* (__ARM_ARCH__ < 8) */
396 #else /* !__ARM_SMP__ */
398 #endif /* !__ARM_SMP__ */
400 #if defined(__ARM_SMP__) && defined(ARMA7)
401 uint32_t actlr_value
= 0;
403 /* Leave the coherency domain */
404 __builtin_arm_clrex();
405 actlr_value
= __builtin_arm_mrc(MRC_ACTLR
);
406 actlr_value
&= ~0x40;
408 __builtin_arm_mcr(MCR_ACTLR(actlr_value
));
409 /* Ensures any pending fwd request gets serviced and ends up */
410 __builtin_arm_dsb(DSB_SY
);
411 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
412 __builtin_arm_isb(ISB_SY
);
413 /* Ensures the second possible pending fwd request ends up. */
414 __builtin_arm_dsb(DSB_SY
);
415 #endif /* defined(__ARM_SMP__) && defined(ARMA7) */
419 platform_cache_idle_exit(
423 uint32_t actlr_value
= 0;
425 /* Flush L1 caches and TLB before rejoining the coherency domain */
428 * If we're only using a single CPU, we can avoid flushing the
429 * I-cache or the TLB, as neither program text nor pagetables
430 * should have been changed during the idle period. We still
431 * want to flush the D-cache to PoU (above), as memory contents
432 * may have been changed by DMA.
434 if (!up_style_idle_exit
|| (real_ncpus
> 1)) {
435 InvalidatePoU_Icache();
439 /* Rejoin the coherency domain */
440 actlr_value
= __builtin_arm_mrc(MRC_ACTLR
);
442 __builtin_arm_mcr(MCR_ACTLR(actlr_value
));
443 __builtin_arm_isb(ISB_SY
);
446 uint32_t sctlr_value
= 0;
448 /* Enable dcache allocation. */
449 sctlr_value
= __builtin_arm_mrc(MRC_SCTLR
);
450 sctlr_value
|= SCTLR_DCACHE
;
451 __builtin_arm_mcr(MCR_SCTLR(sctlr_value
));
452 __builtin_arm_isb(ISB_SY
);
453 getCpuDatap()->cpu_CLW_active
= 1;
454 #endif /* __ARM_SMP__ */
455 #endif /* defined(ARMA7) */
459 platform_cache_batch_wimg(
460 __unused
unsigned int new_wimg
,
461 __unused
unsigned int size
464 boolean_t do_cache_op
= FALSE
;
466 if ((cache_info()->c_bulksize_op
!= 0) && (size
>= (cache_info()->c_bulksize_op
))) {
474 platform_cache_flush_wimg(
475 __unused
unsigned int new_wimg
478 #if __ARM_SMP__ && defined (ARMA7)
479 cache_xcall(LWFlush
);
482 if (getCpuDatap()->cpu_cache_dispatch
!= (cache_dispatch_t
) NULL
) {
483 ((cache_dispatch_t
) getCpuDatap()->cpu_cache_dispatch
)( getCpuDatap()->cpu_id
, CacheCleanFlush
, 0x0UL
, 0x0UL
);
488 #if __ARM_SMP__ && defined(ARMA7)
490 cache_xcall_handler(unsigned int op
)
497 if ((op
== LWFlush
) && (cdp
->cpu_CLWFlush_req
> cdp
->cpu_CLWFlush_last
)) {
499 abstime
= ml_get_timebase();
500 cdp
->cpu_CLWFlush_last
= abstime
;
501 cdp
->cpu_CLWClean_last
= abstime
;
502 } else if ((op
== LWClean
) && (cdp
->cpu_CLWClean_req
> cdp
->cpu_CLWClean_last
)) {
504 abstime
= ml_get_timebase();
505 cdp
->cpu_CLWClean_last
= abstime
;
511 cache_xcall(unsigned int op
)
515 cpu_data_t
*target_cdp
;
520 intr
= ml_set_interrupts_enabled(FALSE
);
522 abstime
= ml_get_timebase();
524 signal
= SIGPLWClean
;
526 signal
= SIGPLWFlush
;
529 for (cpu
= 0; cpu
< MAX_CPUS
; cpu
++) {
530 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
531 if (target_cdp
== (cpu_data_t
*)NULL
) {
535 if (target_cdp
->cpu_CLW_active
== 0) {
540 target_cdp
->cpu_CLWFlush_req
= abstime
;
541 } else if (op
== LWClean
) {
542 target_cdp
->cpu_CLWClean_req
= abstime
;
544 __builtin_arm_dmb(DMB_ISH
);
545 if (target_cdp
->cpu_CLW_active
== 0) {
547 target_cdp
->cpu_CLWFlush_req
= 0x0ULL
;
548 } else if (op
== LWClean
) {
549 target_cdp
->cpu_CLWClean_req
= 0x0ULL
;
554 if (target_cdp
== cdp
) {
558 if (KERN_SUCCESS
!= cpu_signal(target_cdp
, signal
, (void *)NULL
, NULL
)) {
560 target_cdp
->cpu_CLWFlush_req
= 0x0ULL
;
561 } else if (op
== LWClean
) {
562 target_cdp
->cpu_CLWClean_req
= 0x0ULL
;
565 if (cpu
== real_ncpus
) {
570 cache_xcall_handler(op
);
572 (void) ml_set_interrupts_enabled(intr
);
574 for (cpu
= 0; cpu
< MAX_CPUS
; cpu
++) {
575 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
576 if (target_cdp
== (cpu_data_t
*)NULL
) {
580 if (target_cdp
== cdp
) {
585 while ((target_cdp
->cpu_CLWFlush_req
!= 0x0ULL
) && (target_cdp
->cpu_CLWFlush_last
< abstime
)) {
588 } else if (op
== LWClean
) {
589 while ((target_cdp
->cpu_CLWClean_req
!= 0x0ULL
) && (target_cdp
->cpu_CLWClean_last
< abstime
)) {
594 if (cpu
== real_ncpus
) {
601 } else if (op
== LWClean
) {
608 #else /* __ARM_COHERENT_IO__ */
612 __unused vm_offset_t addr
,
613 __unused
unsigned length
,
614 __unused boolean_t phys
)
616 __builtin_arm_dsb(DSB_SY
);
621 __unused vm_offset_t addr
,
622 __unused
unsigned length
,
623 __unused boolean_t phys
)
625 __builtin_arm_dsb(DSB_SY
);
629 flush_dcache_syscall(
630 __unused vm_offset_t va
,
631 __unused
unsigned length
)
633 __builtin_arm_dsb(DSB_SY
);
637 dcache_incoherent_io_flush64(
638 __unused addr64_t pa
,
639 __unused
unsigned int size
,
640 __unused
unsigned int remaining
,
641 __unused
unsigned int *res
)
643 __builtin_arm_dsb(DSB_SY
);
649 dcache_incoherent_io_store64(
650 __unused addr64_t pa
,
651 __unused
unsigned int size
,
652 __unused
unsigned int remaining
,
653 __unused
unsigned int *res
)
655 __builtin_arm_dsb(DSB_SY
);
665 pmap_paddr_t paddr
= ptoa(pp
);
667 if (isphysmem(paddr
)) {
668 vm_offset_t vaddr
= phystokv(paddr
);
669 InvalidatePoU_IcacheRegion(vaddr
, PAGE_SIZE
);
680 platform_cache_flush(
686 platform_cache_clean(
692 platform_cache_shutdown(
698 platform_cache_idle_enter(
704 platform_cache_idle_exit(
710 platform_cache_batch_wimg(
711 __unused
unsigned int new_wimg
,
712 __unused
unsigned int size
719 platform_cache_flush_wimg(
720 __unused
unsigned int new_wimg
)
724 #endif /* __ARM_COHERENT_IO__ */