]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/caches.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / caches.c
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach_assert.h>
29 #include <mach/vm_types.h>
30 #include <mach/mach_time.h>
31 #include <kern/timer.h>
32 #include <kern/clock.h>
33 #include <kern/machine.h>
34 #include <mach/machine.h>
35 #include <mach/machine/vm_param.h>
36 #include <mach_kdp.h>
37 #include <kdp/kdp_udp.h>
38 #include <arm/caches_internal.h>
39 #include <arm/cpuid.h>
40 #include <arm/cpu_data.h>
41 #include <arm/cpu_data_internal.h>
42 #include <arm/cpu_internal.h>
43
44 #include <vm/vm_kern.h>
45 #include <vm/vm_map.h>
46 #include <vm/pmap.h>
47
48 #include <arm/misc_protos.h>
49
50 /*
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
52 */
53 #define LWOpDone 1
54 #define BWOpDone 3
55
56 #ifndef __ARM_COHERENT_IO__
57
58 TUNABLE(bool, up_style_idle_exit, "up_style_idle_exit", false);
59
60 void
61 flush_dcache(
62 vm_offset_t addr,
63 unsigned length,
64 boolean_t phys)
65 {
66 cpu_data_t *cpu_data_ptr = getCpuDatap();
67 vm_offset_t vaddr;
68 addr64_t paddr;
69 vm_size_t count;
70
71 while (length > 0) {
72 if (phys) {
73 count = length;
74 paddr = CAST_DOWN(pmap_paddr_t, addr);
75 vaddr = phystokv_range(paddr, &count);
76 } else {
77 paddr = kvtophys(addr);
78 vaddr = addr;
79 count = PAGE_SIZE - (addr & PAGE_MASK);
80 if (count > length) {
81 count = length;
82 }
83 }
84 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
85 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
86 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
87 }
88 addr += count;
89 length -= count;
90 }
91 return;
92 }
93
94 void
95 clean_dcache(
96 vm_offset_t addr,
97 unsigned length,
98 boolean_t phys)
99 {
100 cpu_data_t *cpu_data_ptr = getCpuDatap();
101 vm_offset_t vaddr;
102 addr64_t paddr;
103 vm_size_t count;
104
105 while (length > 0) {
106 if (phys) {
107 count = length;
108 paddr = CAST_DOWN(pmap_paddr_t, addr);
109 vaddr = phystokv_range(paddr, &count);
110 } else {
111 paddr = kvtophys(addr);
112 vaddr = addr;
113 count = PAGE_SIZE - (addr & PAGE_MASK);
114 if (count > length) {
115 count = length;
116 }
117 }
118 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
119 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
120 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
121 }
122 addr += count;
123 length -= count;
124 }
125 return;
126 }
127
128 void
129 flush_dcache_syscall(
130 vm_offset_t va,
131 unsigned length)
132 {
133 if ((cache_info()->c_bulksize_op != 0) && (length >= (cache_info()->c_bulksize_op))) {
134 #if defined(ARMA7)
135 cache_xcall(LWFlush);
136 #else
137 FlushPoC_Dcache();
138 if (getCpuDatap()->cpu_cache_dispatch != NULL) {
139 getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
140 }
141 #endif
142 } else {
143 FlushPoC_DcacheRegion((vm_offset_t) va, length);
144 }
145 return;
146 }
147
148 void
149 dcache_incoherent_io_flush64(
150 addr64_t pa,
151 unsigned int size,
152 unsigned int remaining,
153 unsigned int *res)
154 {
155 cpu_data_t *cpu_data_ptr = getCpuDatap();
156
157 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
158 #if defined (ARMA7)
159 cache_xcall(LWFlush);
160 #else
161 FlushPoC_Dcache();
162 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
163 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
164 }
165 #endif
166 *res = BWOpDone;
167 } else {
168 vm_offset_t vaddr;
169 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
170 vm_size_t count;
171 unsigned int wimg_bits, index;
172
173 while (size > 0) {
174 if (isphysmem(paddr)) {
175 count = size;
176 vaddr = phystokv_range(paddr, &count);
177 } else {
178 count = PAGE_SIZE - (paddr & PAGE_MASK);
179 if (count > size) {
180 count = size;
181 }
182
183 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
184 mp_disable_preemption();
185 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
186 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
187 }
188 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
189 if (isphysmem(paddr)) {
190 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
191 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
192 }
193 } else {
194 pmap_unmap_cpu_windows_copy(index);
195 mp_enable_preemption();
196 }
197 paddr += count;
198 size -= count;
199 }
200 }
201
202 return;
203 }
204
205 void
206 dcache_incoherent_io_store64(
207 addr64_t pa,
208 unsigned int size,
209 unsigned int remaining,
210 unsigned int *res)
211 {
212 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
213 cpu_data_t *cpu_data_ptr = getCpuDatap();
214
215 if (isphysmem(paddr)) {
216 unsigned int wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
217 if ((wimg_bits == VM_WIMG_IO) || (wimg_bits == VM_WIMG_WCOMB) || (wimg_bits == VM_WIMG_RT)) {
218 return;
219 }
220 }
221
222 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
223 #if defined (ARMA7)
224 cache_xcall(LWClean);
225 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
226 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
227 }
228 #else
229 CleanPoC_Dcache();
230 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
231 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
232 }
233 #endif
234 *res = BWOpDone;
235 } else {
236 vm_offset_t vaddr;
237 vm_size_t count;
238 unsigned int wimg_bits, index;
239
240 while (size > 0) {
241 if (isphysmem(paddr)) {
242 count = size;
243 vaddr = phystokv_range(paddr, &count);
244 } else {
245 count = PAGE_SIZE - (paddr & PAGE_MASK);
246 if (count > size) {
247 count = size;
248 }
249 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
250 mp_disable_preemption();
251 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
252 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
253 }
254 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
255 if (isphysmem(paddr)) {
256 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
257 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
258 }
259 } else {
260 pmap_unmap_cpu_windows_copy(index);
261 mp_enable_preemption();
262 }
263 paddr += count;
264 size -= count;
265 }
266 }
267
268 return;
269 }
270
271 void
272 cache_sync_page(
273 ppnum_t pp
274 )
275 {
276 pmap_paddr_t paddr = ptoa(pp);
277
278 if (isphysmem(paddr)) {
279 vm_offset_t vaddr = phystokv(paddr);
280 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
281 } else {
282 FlushPoC_Dcache();
283 InvalidatePoU_Icache();
284 };
285 }
286
287 void
288 platform_cache_init(
289 void)
290 {
291 cache_info_t *cpuid_cache_info;
292 unsigned int cache_size = 0x0UL;
293 cpu_data_t *cpu_data_ptr = getCpuDatap();
294
295 cpuid_cache_info = cache_info();
296
297 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
298 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL);
299
300 if (cpuid_cache_info->c_l2size == 0x0) {
301 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size);
302 cpuid_cache_info->c_l2size = cache_size;
303 }
304 }
305 }
306
307 void
308 platform_cache_flush(
309 void)
310 {
311 cpu_data_t *cpu_data_ptr = getCpuDatap();
312
313 FlushPoC_Dcache();
314
315 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
316 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
317 }
318 }
319
320 void
321 platform_cache_clean(
322 void)
323 {
324 cpu_data_t *cpu_data_ptr = getCpuDatap();
325
326 CleanPoC_Dcache();
327
328 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
329 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
330 }
331 }
332
333 void
334 platform_cache_shutdown(
335 void)
336 {
337 cpu_data_t *cpu_data_ptr = getCpuDatap();
338
339 CleanPoC_Dcache();
340
341 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
342 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL);
343 }
344 }
345
346 void
347 platform_cache_disable(void)
348 {
349 #if (__ARM_ARCH__ < 8)
350 uint32_t sctlr_value = 0;
351
352 /* Disable dcache allocation. */
353 sctlr_value = __builtin_arm_mrc(MRC_SCTLR);
354 sctlr_value &= ~SCTLR_DCACHE;
355 __builtin_arm_mcr(MCR_SCTLR(sctlr_value));
356 __builtin_arm_isb(ISB_SY);
357 #endif /* (__ARM_ARCH__ < 8) */
358 }
359
360 void
361 platform_cache_idle_enter(
362 void)
363 {
364 platform_cache_disable();
365
366 /*
367 * If we're only using a single CPU, just write back any
368 * dirty cachelines. We can avoid doing housekeeping
369 * on CPU data that would normally be modified by other
370 * CPUs.
371 */
372 if (up_style_idle_exit && (real_ncpus == 1)) {
373 CleanPoU_Dcache();
374 } else {
375 FlushPoU_Dcache();
376
377 #if (__ARM_ARCH__ < 8)
378 cpu_data_t *cpu_data_ptr = getCpuDatap();
379 cpu_data_ptr->cpu_CLW_active = 0;
380 __builtin_arm_dmb(DMB_ISH);
381 cpu_data_ptr->cpu_CLWFlush_req = 0;
382 cpu_data_ptr->cpu_CLWClean_req = 0;
383 CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
384 #endif /* (__ARM_ARCH__ < 8) */
385 }
386
387 #if defined(ARMA7)
388 uint32_t actlr_value = 0;
389
390 /* Leave the coherency domain */
391 __builtin_arm_clrex();
392 actlr_value = __builtin_arm_mrc(MRC_ACTLR);
393 actlr_value &= ~0x40;
394
395 __builtin_arm_mcr(MCR_ACTLR(actlr_value));
396 /* Ensures any pending fwd request gets serviced and ends up */
397 __builtin_arm_dsb(DSB_SY);
398 /* Forces the processor to re-fetch, so any pending fwd request gets into the core */
399 __builtin_arm_isb(ISB_SY);
400 /* Ensures the second possible pending fwd request ends up. */
401 __builtin_arm_dsb(DSB_SY);
402 #endif /* defined(ARMA7) */
403 }
404
405 void
406 platform_cache_idle_exit(
407 void)
408 {
409 #if defined(ARMA7)
410 uint32_t actlr_value = 0;
411
412 /* Flush L1 caches and TLB before rejoining the coherency domain */
413 FlushPoU_Dcache();
414 /*
415 * If we're only using a single CPU, we can avoid flushing the
416 * I-cache or the TLB, as neither program text nor pagetables
417 * should have been changed during the idle period. We still
418 * want to flush the D-cache to PoU (above), as memory contents
419 * may have been changed by DMA.
420 */
421 if (!up_style_idle_exit || (real_ncpus > 1)) {
422 InvalidatePoU_Icache();
423 flush_core_tlb();
424 }
425
426 /* Rejoin the coherency domain */
427 actlr_value = __builtin_arm_mrc(MRC_ACTLR);
428 actlr_value |= 0x40;
429 __builtin_arm_mcr(MCR_ACTLR(actlr_value));
430 __builtin_arm_isb(ISB_SY);
431
432 uint32_t sctlr_value = 0;
433
434 /* Enable dcache allocation. */
435 sctlr_value = __builtin_arm_mrc(MRC_SCTLR);
436 sctlr_value |= SCTLR_DCACHE;
437 __builtin_arm_mcr(MCR_SCTLR(sctlr_value));
438 __builtin_arm_isb(ISB_SY);
439 getCpuDatap()->cpu_CLW_active = 1;
440 #endif /* defined(ARMA7) */
441 }
442
443 boolean_t
444 platform_cache_batch_wimg(
445 __unused unsigned int new_wimg,
446 __unused unsigned int size
447 )
448 {
449 boolean_t do_cache_op = FALSE;
450
451 if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) {
452 do_cache_op = TRUE;
453 }
454
455 return do_cache_op;
456 }
457
458 void
459 platform_cache_flush_wimg(
460 __unused unsigned int new_wimg
461 )
462 {
463 #if defined (ARMA7)
464 cache_xcall(LWFlush);
465 #else
466 FlushPoC_Dcache();
467 if (getCpuDatap()->cpu_cache_dispatch != NULL) {
468 getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
469 }
470 #endif
471 }
472
473 #if defined(ARMA7)
474 void
475 cache_xcall_handler(unsigned int op)
476 {
477 cpu_data_t *cdp;
478 uint64_t abstime;
479
480 cdp = getCpuDatap();
481
482 if ((op == LWFlush) && (cdp->cpu_CLWFlush_req > cdp->cpu_CLWFlush_last)) {
483 FlushPoU_Dcache();
484 abstime = ml_get_timebase();
485 cdp->cpu_CLWFlush_last = abstime;
486 cdp->cpu_CLWClean_last = abstime;
487 } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) {
488 CleanPoU_Dcache();
489 abstime = ml_get_timebase();
490 cdp->cpu_CLWClean_last = abstime;
491 }
492 }
493
494
495 void
496 cache_xcall(unsigned int op)
497 {
498 boolean_t intr;
499 cpu_data_t *cdp;
500 cpu_data_t *target_cdp;
501 unsigned int cpu;
502 unsigned int signal;
503 uint64_t abstime;
504
505 intr = ml_set_interrupts_enabled(FALSE);
506 cdp = getCpuDatap();
507 abstime = ml_get_timebase();
508 if (op == LWClean) {
509 signal = SIGPLWClean;
510 } else {
511 signal = SIGPLWFlush;
512 }
513
514 const unsigned int max_cpu_id = ml_get_max_cpu_number();
515 for (cpu = 0; cpu <= max_cpu_id; cpu++) {
516 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
517 if (target_cdp == (cpu_data_t *)NULL) {
518 break;
519 }
520
521 if (target_cdp->cpu_CLW_active == 0) {
522 continue;
523 }
524
525 if (op == LWFlush) {
526 target_cdp->cpu_CLWFlush_req = abstime;
527 } else if (op == LWClean) {
528 target_cdp->cpu_CLWClean_req = abstime;
529 }
530 __builtin_arm_dmb(DMB_ISH);
531 if (target_cdp->cpu_CLW_active == 0) {
532 if (op == LWFlush) {
533 target_cdp->cpu_CLWFlush_req = 0x0ULL;
534 } else if (op == LWClean) {
535 target_cdp->cpu_CLWClean_req = 0x0ULL;
536 }
537 continue;
538 }
539
540 if (target_cdp == cdp) {
541 continue;
542 }
543
544 if (KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) {
545 if (op == LWFlush) {
546 target_cdp->cpu_CLWFlush_req = 0x0ULL;
547 } else if (op == LWClean) {
548 target_cdp->cpu_CLWClean_req = 0x0ULL;
549 }
550 }
551 if (cpu == real_ncpus) {
552 break;
553 }
554 }
555
556 cache_xcall_handler(op);
557
558 (void) ml_set_interrupts_enabled(intr);
559
560 for (cpu = 0; cpu <= max_cpu_id; cpu++) {
561 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
562 if (target_cdp == (cpu_data_t *)NULL) {
563 break;
564 }
565
566 if (target_cdp == cdp) {
567 continue;
568 }
569
570 if (op == LWFlush) {
571 while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime)) {
572 ;
573 }
574 } else if (op == LWClean) {
575 while ((target_cdp->cpu_CLWClean_req != 0x0ULL) && (target_cdp->cpu_CLWClean_last < abstime)) {
576 ;
577 }
578 }
579
580 if (cpu == real_ncpus) {
581 break;
582 }
583 }
584
585 if (op == LWFlush) {
586 FlushPoC_Dcache();
587 } else if (op == LWClean) {
588 CleanPoC_Dcache();
589 }
590 }
591 #endif
592
593
594 #else /* __ARM_COHERENT_IO__ */
595
596 void
597 flush_dcache(
598 __unused vm_offset_t addr,
599 __unused unsigned length,
600 __unused boolean_t phys)
601 {
602 __builtin_arm_dsb(DSB_SY);
603 }
604
605 void
606 clean_dcache(
607 __unused vm_offset_t addr,
608 __unused unsigned length,
609 __unused boolean_t phys)
610 {
611 __builtin_arm_dsb(DSB_SY);
612 }
613
614 void
615 flush_dcache_syscall(
616 __unused vm_offset_t va,
617 __unused unsigned length)
618 {
619 __builtin_arm_dsb(DSB_SY);
620 }
621
622 void
623 dcache_incoherent_io_flush64(
624 __unused addr64_t pa,
625 __unused unsigned int size,
626 __unused unsigned int remaining,
627 __unused unsigned int *res)
628 {
629 __builtin_arm_dsb(DSB_SY);
630 *res = LWOpDone;
631 return;
632 }
633
634 void
635 dcache_incoherent_io_store64(
636 __unused addr64_t pa,
637 __unused unsigned int size,
638 __unused unsigned int remaining,
639 __unused unsigned int *res)
640 {
641 __builtin_arm_dsb(DSB_SY);
642 *res = LWOpDone;
643 return;
644 }
645
646 void
647 cache_sync_page(
648 ppnum_t pp
649 )
650 {
651 pmap_paddr_t paddr = ptoa(pp);
652
653 if (isphysmem(paddr)) {
654 vm_offset_t vaddr = phystokv(paddr);
655 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
656 }
657 }
658
659 void
660 platform_cache_init(
661 void)
662 {
663 }
664
665 void
666 platform_cache_flush(
667 void)
668 {
669 }
670
671 void
672 platform_cache_clean(
673 void)
674 {
675 }
676
677 void
678 platform_cache_shutdown(
679 void)
680 {
681 }
682
683 void
684 platform_cache_idle_enter(
685 void)
686 {
687 }
688
689 void
690 platform_cache_idle_exit(
691 void)
692 {
693 }
694
695 boolean_t
696 platform_cache_batch_wimg(
697 __unused unsigned int new_wimg,
698 __unused unsigned int size
699 )
700 {
701 return TRUE;
702 }
703
704 void
705 platform_cache_flush_wimg(
706 __unused unsigned int new_wimg)
707 {
708 }
709
710 #endif /* __ARM_COHERENT_IO__ */