2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
38 #include <kdp/kdp_udp.h>
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
50 #include <arm/misc_protos.h>
52 #include <sys/errno.h>
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
56 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
57 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
58 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
59 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
62 bcopy_phys_internal(addr64_t src
, addr64_t dst
, vm_size_t bytes
, int flags
)
64 unsigned int src_index
;
65 unsigned int dst_index
;
66 vm_offset_t src_offset
;
67 vm_offset_t dst_offset
;
68 unsigned int wimg_bits_src
, wimg_bits_dst
;
69 unsigned int cpu_num
= 0;
72 addr64_t end __assert_only
;
73 kern_return_t res
= KERN_SUCCESS
;
75 assert(!__improbable(os_add_overflow(src
, bytes
, &end
)));
76 assert(!__improbable(os_add_overflow(dst
, bytes
, &end
)));
78 while ((bytes
> 0) && (res
== KERN_SUCCESS
)) {
79 src_offset
= src
& PAGE_MASK
;
80 dst_offset
= dst
& PAGE_MASK
;
81 boolean_t use_copy_window_src
= FALSE
;
82 boolean_t use_copy_window_dst
= FALSE
;
83 vm_size_t count
= bytes
;
84 vm_size_t count2
= bytes
;
85 if (BCOPY_PHYS_SRC_IS_PHYS(flags
)) {
86 use_copy_window_src
= !pmap_valid_address(src
);
87 pn_src
= (ppnum_t
)(src
>> PAGE_SHIFT
);
88 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
89 count
= PAGE_SIZE
- src_offset
;
90 wimg_bits_src
= pmap_cache_attributes(pn_src
);
91 if ((wimg_bits_src
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
92 use_copy_window_src
= TRUE
;
95 if (use_copy_window_src
) {
96 wimg_bits_src
= pmap_cache_attributes(pn_src
);
97 count
= PAGE_SIZE
- src_offset
;
101 if (BCOPY_PHYS_DST_IS_PHYS(flags
)) {
102 // write preflighting needed for things like dtrace which may write static read-only mappings
103 use_copy_window_dst
= (!pmap_valid_address(dst
) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t
)dst
)));
104 pn_dst
= (ppnum_t
)(dst
>> PAGE_SHIFT
);
105 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
106 count2
= PAGE_SIZE
- dst_offset
;
107 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
108 if ((wimg_bits_dst
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
109 use_copy_window_dst
= TRUE
;
112 if (use_copy_window_dst
) {
113 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
114 count2
= PAGE_SIZE
- dst_offset
;
122 if (use_copy_window_src
|| use_copy_window_dst
) {
123 mp_disable_preemption();
124 cpu_num
= cpu_number();
127 if (use_copy_window_src
) {
128 src_index
= pmap_map_cpu_windows_copy(pn_src
, VM_PROT_READ
, wimg_bits_src
);
129 tmp_src
= (char*)(pmap_cpu_windows_copy_addr(cpu_num
, src_index
) + src_offset
);
130 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags
)) {
131 tmp_src
= (char*)phystokv_range((pmap_paddr_t
)src
, &count
);
133 tmp_src
= (char*)src
;
135 if (use_copy_window_dst
) {
136 dst_index
= pmap_map_cpu_windows_copy(pn_dst
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits_dst
);
137 tmp_dst
= (char*)(pmap_cpu_windows_copy_addr(cpu_num
, dst_index
) + dst_offset
);
138 } else if (BCOPY_PHYS_DST_IS_PHYS(flags
)) {
139 tmp_dst
= (char*)phystokv_range((pmap_paddr_t
)dst
, &count2
);
141 tmp_dst
= (char*)dst
;
144 if (count
> count2
) {
151 if (BCOPY_PHYS_SRC_IS_USER(flags
)) {
152 res
= copyin((user_addr_t
)src
, tmp_dst
, count
);
153 } else if (BCOPY_PHYS_DST_IS_USER(flags
)) {
154 res
= copyout(tmp_src
, (user_addr_t
)dst
, count
);
156 bcopy(tmp_src
, tmp_dst
, count
);
159 if (use_copy_window_src
) {
160 pmap_unmap_cpu_windows_copy(src_index
);
162 if (use_copy_window_dst
) {
163 pmap_unmap_cpu_windows_copy(dst_index
);
165 if (use_copy_window_src
|| use_copy_window_dst
) {
166 mp_enable_preemption();
177 bcopy_phys(addr64_t src
, addr64_t dst
, vm_size_t bytes
)
179 bcopy_phys_internal(src
, dst
, bytes
, cppvPsrc
| cppvPsnk
);
183 bzero_phys_nc(addr64_t src64
, vm_size_t bytes
)
185 bzero_phys(src64
, bytes
);
188 extern void *secure_memset(void *, int, size_t);
190 /* Zero bytes starting at a physical address */
192 bzero_phys(addr64_t src
, vm_size_t bytes
)
194 unsigned int wimg_bits
;
195 unsigned int cpu_num
= cpu_number();
197 addr64_t end __assert_only
;
199 assert(!__improbable(os_add_overflow(src
, bytes
, &end
)));
201 vm_offset_t offset
= src
& PAGE_MASK
;
203 vm_size_t count
= bytes
;
205 boolean_t use_copy_window
= !pmap_valid_address(src
);
206 pn
= (ppnum_t
)(src
>> PAGE_SHIFT
);
207 wimg_bits
= pmap_cache_attributes(pn
);
208 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
209 count
= PAGE_SIZE
- offset
;
210 if ((wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
211 use_copy_window
= TRUE
;
214 if (use_copy_window
) {
215 count
= PAGE_SIZE
- offset
;
220 if (use_copy_window
) {
221 mp_disable_preemption();
222 cpu_num
= cpu_number();
223 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
224 buf
= (char *)(pmap_cpu_windows_copy_addr(cpu_num
, index
) + offset
);
226 buf
= (char *)phystokv_range((pmap_paddr_t
)src
, &count
);
233 switch (wimg_bits
& VM_WIMG_MASK
) {
234 case VM_WIMG_DEFAULT
:
236 case VM_WIMG_INNERWBACK
:
241 /* 'dc zva' performed by bzero is not safe for device memory */
242 secure_memset((void*)buf
, 0, count
);
245 if (use_copy_window
) {
246 pmap_unmap_cpu_windows_copy(index
);
247 mp_enable_preemption();
257 * Read data from a physical address.
261 static unsigned long long
262 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
265 unsigned int wimg_bits
;
266 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
267 ppnum_t pn_end
= (ppnum_t
)((paddr
+ size
- 1) >> PAGE_SHIFT
);
268 unsigned long long result
= 0;
269 vm_offset_t copywindow_vaddr
= 0;
274 if (__improbable(pn_end
!= pn
)) {
275 panic("%s: paddr 0x%llx spans a page boundary", __func__
, (uint64_t)paddr
);
278 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
279 if (pmap_valid_address(paddr
)) {
282 s1
= *(volatile unsigned char *)phystokv(paddr
);
286 s2
= *(volatile unsigned short *)phystokv(paddr
);
290 s4
= *(volatile unsigned int *)phystokv(paddr
);
294 result
= *(volatile unsigned long long *)phystokv(paddr
);
297 panic("Invalid size %d for ml_phys_read_data\n", size
);
304 mp_disable_preemption();
305 wimg_bits
= pmap_cache_attributes(pn
);
306 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
, wimg_bits
);
307 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
311 s1
= *(volatile unsigned char *)copywindow_vaddr
;
315 s2
= *(volatile unsigned short *)copywindow_vaddr
;
319 s4
= *(volatile unsigned int *)copywindow_vaddr
;
323 result
= *(volatile unsigned long long*)copywindow_vaddr
;
326 panic("Invalid size %d for ml_phys_read_data\n", size
);
330 pmap_unmap_cpu_windows_copy(index
);
331 mp_enable_preemption();
337 ml_phys_read( vm_offset_t paddr
)
339 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
343 ml_phys_read_word(vm_offset_t paddr
)
345 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
349 ml_phys_read_64(addr64_t paddr64
)
351 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
355 ml_phys_read_word_64(addr64_t paddr64
)
357 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
361 ml_phys_read_half(vm_offset_t paddr
)
363 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
367 ml_phys_read_half_64(addr64_t paddr64
)
369 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
373 ml_phys_read_byte(vm_offset_t paddr
)
375 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
379 ml_phys_read_byte_64(addr64_t paddr64
)
381 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
385 ml_phys_read_double(vm_offset_t paddr
)
387 return ml_phys_read_data((pmap_paddr_t
)paddr
, 8);
391 ml_phys_read_double_64(addr64_t paddr64
)
393 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 8);
399 * Write data to a physical address.
403 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long long data
, int size
)
406 unsigned int wimg_bits
;
407 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
408 ppnum_t pn_end
= (ppnum_t
)((paddr
+ size
- 1) >> PAGE_SHIFT
);
409 vm_offset_t copywindow_vaddr
= 0;
411 if (__improbable(pn_end
!= pn
)) {
412 panic("%s: paddr 0x%llx spans a page boundary", __func__
, (uint64_t)paddr
);
415 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
416 if (pmap_valid_address(paddr
)) {
419 *(volatile unsigned char *)phystokv(paddr
) = (unsigned char)data
;
422 *(volatile unsigned short *)phystokv(paddr
) = (unsigned short)data
;
425 *(volatile unsigned int *)phystokv(paddr
) = (unsigned int)data
;
428 *(volatile unsigned long long *)phystokv(paddr
) = data
;
431 panic("Invalid size %d for ml_phys_write_data\n", size
);
436 mp_disable_preemption();
437 wimg_bits
= pmap_cache_attributes(pn
);
438 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
439 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
443 *(volatile unsigned char *)(copywindow_vaddr
) =
447 *(volatile unsigned short *)(copywindow_vaddr
) =
448 (unsigned short)data
;
451 *(volatile unsigned int *)(copywindow_vaddr
) =
455 *(volatile unsigned long long *)(copywindow_vaddr
) =
456 (unsigned long long)data
;
459 panic("Invalid size %d for ml_phys_write_data\n", size
);
463 pmap_unmap_cpu_windows_copy(index
);
464 mp_enable_preemption();
468 ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
470 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
474 ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
476 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
480 ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
482 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
486 ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
488 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
492 ml_phys_write(vm_offset_t paddr
, unsigned int data
)
494 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
498 ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
500 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
504 ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
506 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
510 ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
512 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
516 ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
518 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 8);
522 ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
524 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 8);
529 * Set indicated bit in bit string.
532 setbit(int bitno
, int *s
)
534 s
[bitno
/ INT_SIZE
] |= 1U << (bitno
% INT_SIZE
);
538 * Clear indicated bit in bit string.
541 clrbit(int bitno
, int *s
)
543 s
[bitno
/ INT_SIZE
] &= ~(1U << (bitno
% INT_SIZE
));
547 * Test if indicated bit is set in bit string.
550 testbit(int bitno
, int *s
)
552 return s
[bitno
/ INT_SIZE
] & (1U << (bitno
% INT_SIZE
));
556 * Find first bit set in bit string.
563 for (offset
= 0; !*s
; offset
+= INT_SIZE
, ++s
) {
566 return offset
+ __builtin_ctz(*s
);
570 ffs(unsigned int mask
)
577 * NOTE: cannot use __builtin_ffs because it generates a call to
580 return 1 + __builtin_ctz(mask
);
584 ffsll(unsigned long long mask
)
591 * NOTE: cannot use __builtin_ffsll because it generates a call to
594 return 1 + __builtin_ctzll(mask
);
598 * Find last bit set in bit string.
601 fls(unsigned int mask
)
607 return (sizeof(mask
) << 3) - __builtin_clz(mask
);
611 flsll(unsigned long long mask
)
617 return (sizeof(mask
) << 3) - __builtin_clzll(mask
);
627 const char *a
= (const char *) pa
;
628 const char *b
= (const char *) pb
;
641 * Check for the overflow case but continue to handle the non-overflow
642 * case the same way just in case someone is using the return value
643 * as more than zero/non-zero
645 if ((len
& 0xFFFFFFFF00000000ULL
) && !(len
& 0x00000000FFFFFFFFULL
)) {
654 memcmp(const void *s1
, const void *s2
, size_t n
)
657 const unsigned char *p1
= s1
, *p2
= s2
;
660 if (*p1
++ != *p2
++) {
661 return *--p1
- *--p2
;
669 copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
)
671 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0) { /* Make sure that only one is virtual */
672 panic("%s: no more than 1 parameter may be virtual", __func__
);
675 kern_return_t res
= bcopy_phys_internal(source
, sink
, size
, which
);
677 #ifndef __ARM_COHERENT_IO__
678 if (which
& cppvFsrc
) {
679 flush_dcache64(source
, size
, ((which
& cppvPsrc
) == cppvPsrc
));
682 if (which
& cppvFsnk
) {
683 flush_dcache64(sink
, size
, ((which
& cppvPsnk
) == cppvPsnk
));
692 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
695 * Machine-dependent routine to fill in an array with up to callstack_max
696 * levels of return pc information.
701 vm_size_t callstack_max
)
703 /* Captures the USER call stack */
706 struct arm_saved_state
*state
= find_user_regs(current_thread());
709 while (i
< callstack_max
) {
713 if (is_saved_state64(state
)) {
715 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
716 frame
[0] = get_saved_state_fp(state
);
717 while (i
< callstack_max
&& frame
[0] != 0) {
718 if (copyinframe(frame
[0], (void*) frame
, TRUE
)) {
721 buf
[i
++] = (uintptr_t)frame
[1];
725 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
726 frame
[0] = (uint32_t)get_saved_state_fp(state
);
727 while (i
< callstack_max
&& frame
[0] != 0) {
728 if (copyinframe(frame
[0], (void*) frame
, FALSE
)) {
731 buf
[i
++] = (uintptr_t)frame
[1];
735 while (i
< callstack_max
) {
741 #endif /* MACH_ASSERT */
752 __unused vm_offset_t paddr
,
753 __unused
unsigned int *val
)
755 panic("ml_probe_read() unimplemented");
761 __unused addr64_t paddr
,
762 __unused
unsigned int *val
)
764 panic("ml_probe_read_64() unimplemented");
771 __unused thread_t thread
,
772 __unused
unsigned policy_id
,
773 __unused
unsigned policy_info
)
775 // <rdar://problem/7141284>: Reduce print noise
776 // kprintf("ml_thread_policy() unimplemented\n");
781 panic_unimplemented(void)
783 panic("Not yet implemented.");
786 /* ARM64_TODO <rdar://problem/9198953> */
787 void abort(void) __dead2
;
798 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
800 #pragma unused(fn,arg)