2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
38 #include <kdp/kdp_udp.h>
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
50 #include <arm/misc_protos.h>
52 #include <sys/errno.h>
53 #include <libkern/section_keywords.h>
55 #define INT_SIZE (BYTE_SIZE * sizeof (int))
57 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
58 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
59 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
60 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
63 bcopy_phys_internal(addr64_t src
, addr64_t dst
, vm_size_t bytes
, int flags
)
65 unsigned int src_index
;
66 unsigned int dst_index
;
67 vm_offset_t src_offset
;
68 vm_offset_t dst_offset
;
69 unsigned int wimg_bits_src
, wimg_bits_dst
;
70 unsigned int cpu_num
= 0;
73 addr64_t end __assert_only
;
74 kern_return_t res
= KERN_SUCCESS
;
76 if (!BCOPY_PHYS_SRC_IS_USER(flags
)) {
77 assert(!__improbable(os_add_overflow(src
, bytes
, &end
)));
79 if (!BCOPY_PHYS_DST_IS_USER(flags
)) {
80 assert(!__improbable(os_add_overflow(dst
, bytes
, &end
)));
83 while ((bytes
> 0) && (res
== KERN_SUCCESS
)) {
84 src_offset
= src
& PAGE_MASK
;
85 dst_offset
= dst
& PAGE_MASK
;
86 boolean_t use_copy_window_src
= FALSE
;
87 boolean_t use_copy_window_dst
= FALSE
;
88 vm_size_t count
= bytes
;
89 vm_size_t count2
= bytes
;
90 if (BCOPY_PHYS_SRC_IS_PHYS(flags
)) {
91 use_copy_window_src
= !pmap_valid_address(src
);
92 pn_src
= (ppnum_t
)(src
>> PAGE_SHIFT
);
93 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
94 count
= PAGE_SIZE
- src_offset
;
95 wimg_bits_src
= pmap_cache_attributes(pn_src
);
96 if ((wimg_bits_src
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
97 use_copy_window_src
= TRUE
;
100 if (use_copy_window_src
) {
101 wimg_bits_src
= pmap_cache_attributes(pn_src
);
102 count
= PAGE_SIZE
- src_offset
;
106 if (BCOPY_PHYS_DST_IS_PHYS(flags
)) {
107 // write preflighting needed for things like dtrace which may write static read-only mappings
108 use_copy_window_dst
= (!pmap_valid_address(dst
) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t
)dst
)));
109 pn_dst
= (ppnum_t
)(dst
>> PAGE_SHIFT
);
110 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
111 count2
= PAGE_SIZE
- dst_offset
;
112 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
113 if ((wimg_bits_dst
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
114 use_copy_window_dst
= TRUE
;
117 if (use_copy_window_dst
) {
118 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
119 count2
= PAGE_SIZE
- dst_offset
;
127 if (use_copy_window_src
|| use_copy_window_dst
) {
128 mp_disable_preemption();
129 cpu_num
= cpu_number();
132 if (use_copy_window_src
) {
133 src_index
= pmap_map_cpu_windows_copy(pn_src
, VM_PROT_READ
, wimg_bits_src
);
134 tmp_src
= (char*)(pmap_cpu_windows_copy_addr(cpu_num
, src_index
) + src_offset
);
135 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags
)) {
136 tmp_src
= (char*)phystokv_range((pmap_paddr_t
)src
, &count
);
138 tmp_src
= (char*)src
;
140 if (use_copy_window_dst
) {
141 dst_index
= pmap_map_cpu_windows_copy(pn_dst
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits_dst
);
142 tmp_dst
= (char*)(pmap_cpu_windows_copy_addr(cpu_num
, dst_index
) + dst_offset
);
143 } else if (BCOPY_PHYS_DST_IS_PHYS(flags
)) {
144 tmp_dst
= (char*)phystokv_range((pmap_paddr_t
)dst
, &count2
);
146 tmp_dst
= (char*)dst
;
149 if (count
> count2
) {
156 if (BCOPY_PHYS_SRC_IS_USER(flags
)) {
157 res
= copyin((user_addr_t
)src
, tmp_dst
, count
);
158 } else if (BCOPY_PHYS_DST_IS_USER(flags
)) {
159 res
= copyout(tmp_src
, (user_addr_t
)dst
, count
);
161 bcopy(tmp_src
, tmp_dst
, count
);
164 if (use_copy_window_src
) {
165 pmap_unmap_cpu_windows_copy(src_index
);
167 if (use_copy_window_dst
) {
168 pmap_unmap_cpu_windows_copy(dst_index
);
170 if (use_copy_window_src
|| use_copy_window_dst
) {
171 mp_enable_preemption();
182 bcopy_phys(addr64_t src
, addr64_t dst
, vm_size_t bytes
)
184 bcopy_phys_internal(src
, dst
, bytes
, cppvPsrc
| cppvPsnk
);
188 bzero_phys_nc(addr64_t src64
, vm_size_t bytes
)
190 bzero_phys(src64
, bytes
);
193 extern void *secure_memset(void *, int, size_t);
195 /* Zero bytes starting at a physical address */
197 bzero_phys(addr64_t src
, vm_size_t bytes
)
199 unsigned int wimg_bits
;
200 unsigned int cpu_num
= cpu_number();
202 addr64_t end __assert_only
;
204 assert(!__improbable(os_add_overflow(src
, bytes
, &end
)));
206 vm_offset_t offset
= src
& PAGE_MASK
;
208 vm_size_t count
= bytes
;
210 boolean_t use_copy_window
= !pmap_valid_address(src
);
211 pn
= (ppnum_t
)(src
>> PAGE_SHIFT
);
212 wimg_bits
= pmap_cache_attributes(pn
);
213 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
214 count
= PAGE_SIZE
- offset
;
215 if ((wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
216 use_copy_window
= TRUE
;
219 if (use_copy_window
) {
220 count
= PAGE_SIZE
- offset
;
225 if (use_copy_window
) {
226 mp_disable_preemption();
227 cpu_num
= cpu_number();
228 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
229 buf
= (char *)(pmap_cpu_windows_copy_addr(cpu_num
, index
) + offset
);
231 buf
= (char *)phystokv_range((pmap_paddr_t
)src
, &count
);
238 switch (wimg_bits
& VM_WIMG_MASK
) {
239 case VM_WIMG_DEFAULT
:
241 case VM_WIMG_INNERWBACK
:
246 /* 'dc zva' performed by bzero is not safe for device memory */
247 secure_memset((void*)buf
, 0, count
);
250 if (use_copy_window
) {
251 pmap_unmap_cpu_windows_copy(index
);
252 mp_enable_preemption();
262 * Read data from a physical address.
266 static unsigned long long
267 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
270 unsigned int wimg_bits
;
271 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
272 ppnum_t pn_end
= (ppnum_t
)((paddr
+ size
- 1) >> PAGE_SHIFT
);
273 unsigned long long result
= 0;
274 vm_offset_t copywindow_vaddr
= 0;
279 if (__improbable(pn_end
!= pn
)) {
280 panic("%s: paddr 0x%llx spans a page boundary", __func__
, (uint64_t)paddr
);
283 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
284 if (pmap_valid_address(paddr
)) {
287 s1
= *(volatile unsigned char *)phystokv(paddr
);
291 s2
= *(volatile unsigned short *)phystokv(paddr
);
295 s4
= *(volatile unsigned int *)phystokv(paddr
);
299 result
= *(volatile unsigned long long *)phystokv(paddr
);
302 panic("Invalid size %d for ml_phys_read_data\n", size
);
309 mp_disable_preemption();
310 wimg_bits
= pmap_cache_attributes(pn
);
311 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
, wimg_bits
);
312 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
316 s1
= *(volatile unsigned char *)copywindow_vaddr
;
320 s2
= *(volatile unsigned short *)copywindow_vaddr
;
324 s4
= *(volatile unsigned int *)copywindow_vaddr
;
328 result
= *(volatile unsigned long long*)copywindow_vaddr
;
331 panic("Invalid size %d for ml_phys_read_data\n", size
);
335 pmap_unmap_cpu_windows_copy(index
);
336 mp_enable_preemption();
342 ml_phys_read( vm_offset_t paddr
)
344 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
348 ml_phys_read_word(vm_offset_t paddr
)
350 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
354 ml_phys_read_64(addr64_t paddr64
)
356 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
360 ml_phys_read_word_64(addr64_t paddr64
)
362 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
366 ml_phys_read_half(vm_offset_t paddr
)
368 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
372 ml_phys_read_half_64(addr64_t paddr64
)
374 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
378 ml_phys_read_byte(vm_offset_t paddr
)
380 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
384 ml_phys_read_byte_64(addr64_t paddr64
)
386 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
390 ml_phys_read_double(vm_offset_t paddr
)
392 return ml_phys_read_data((pmap_paddr_t
)paddr
, 8);
396 ml_phys_read_double_64(addr64_t paddr64
)
398 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 8);
404 * Write data to a physical address.
408 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long long data
, int size
)
411 unsigned int wimg_bits
;
412 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
413 ppnum_t pn_end
= (ppnum_t
)((paddr
+ size
- 1) >> PAGE_SHIFT
);
414 vm_offset_t copywindow_vaddr
= 0;
416 if (__improbable(pn_end
!= pn
)) {
417 panic("%s: paddr 0x%llx spans a page boundary", __func__
, (uint64_t)paddr
);
420 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
421 if (pmap_valid_address(paddr
)) {
424 *(volatile unsigned char *)phystokv(paddr
) = (unsigned char)data
;
427 *(volatile unsigned short *)phystokv(paddr
) = (unsigned short)data
;
430 *(volatile unsigned int *)phystokv(paddr
) = (unsigned int)data
;
433 *(volatile unsigned long long *)phystokv(paddr
) = data
;
436 panic("Invalid size %d for ml_phys_write_data\n", size
);
441 mp_disable_preemption();
442 wimg_bits
= pmap_cache_attributes(pn
);
443 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
444 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
448 *(volatile unsigned char *)(copywindow_vaddr
) =
452 *(volatile unsigned short *)(copywindow_vaddr
) =
453 (unsigned short)data
;
456 *(volatile unsigned int *)(copywindow_vaddr
) =
460 *(volatile unsigned long long *)(copywindow_vaddr
) =
461 (unsigned long long)data
;
464 panic("Invalid size %d for ml_phys_write_data\n", size
);
468 pmap_unmap_cpu_windows_copy(index
);
469 mp_enable_preemption();
473 ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
475 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
479 ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
481 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
485 ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
487 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
491 ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
493 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
497 ml_phys_write(vm_offset_t paddr
, unsigned int data
)
499 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
503 ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
505 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
509 ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
511 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
515 ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
517 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
521 ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
523 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 8);
527 ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
529 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 8);
534 * Set indicated bit in bit string.
537 setbit(int bitno
, int *s
)
539 s
[bitno
/ INT_SIZE
] |= 1U << (bitno
% INT_SIZE
);
543 * Clear indicated bit in bit string.
546 clrbit(int bitno
, int *s
)
548 s
[bitno
/ INT_SIZE
] &= ~(1U << (bitno
% INT_SIZE
));
552 * Test if indicated bit is set in bit string.
555 testbit(int bitno
, int *s
)
557 return s
[bitno
/ INT_SIZE
] & (1U << (bitno
% INT_SIZE
));
561 * Find first bit set in bit string.
568 for (offset
= 0; !*s
; offset
+= INT_SIZE
, ++s
) {
571 return offset
+ __builtin_ctz(*s
);
575 ffs(unsigned int mask
)
582 * NOTE: cannot use __builtin_ffs because it generates a call to
585 return 1 + __builtin_ctz(mask
);
589 ffsll(unsigned long long mask
)
596 * NOTE: cannot use __builtin_ffsll because it generates a call to
599 return 1 + __builtin_ctzll(mask
);
603 * Find last bit set in bit string.
606 fls(unsigned int mask
)
612 return (sizeof(mask
) << 3) - __builtin_clz(mask
);
616 flsll(unsigned long long mask
)
622 return (sizeof(mask
) << 3) - __builtin_clzll(mask
);
632 const char *a
= (const char *) pa
;
633 const char *b
= (const char *) pb
;
646 * Check for the overflow case but continue to handle the non-overflow
647 * case the same way just in case someone is using the return value
648 * as more than zero/non-zero
650 if ((len
& 0xFFFFFFFF00000000ULL
) && !(len
& 0x00000000FFFFFFFFULL
)) {
658 MARK_AS_HIBERNATE_TEXT
660 memcmp(const void *s1
, const void *s2
, size_t n
)
663 const unsigned char *p1
= s1
, *p2
= s2
;
666 if (*p1
++ != *p2
++) {
667 return *--p1
- *--p2
;
675 copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
)
677 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0) { /* Make sure that only one is virtual */
678 panic("%s: no more than 1 parameter may be virtual", __func__
);
681 kern_return_t res
= bcopy_phys_internal(source
, sink
, size
, which
);
683 #ifndef __ARM_COHERENT_IO__
684 if (which
& cppvFsrc
) {
685 flush_dcache64(source
, size
, ((which
& cppvPsrc
) == cppvPsrc
));
688 if (which
& cppvFsnk
) {
689 flush_dcache64(sink
, size
, ((which
& cppvPsnk
) == cppvPsnk
));
698 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
701 * Machine-dependent routine to fill in an array with up to callstack_max
702 * levels of return pc information.
707 vm_size_t callstack_max
)
709 /* Captures the USER call stack */
712 struct arm_saved_state
*state
= find_user_regs(current_thread());
715 while (i
< callstack_max
) {
719 if (is_saved_state64(state
)) {
721 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
722 frame
[0] = get_saved_state_fp(state
);
723 while (i
< callstack_max
&& frame
[0] != 0) {
724 if (copyinframe(frame
[0], (void*) frame
, TRUE
)) {
727 buf
[i
++] = (uintptr_t)frame
[1];
731 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
732 frame
[0] = (uint32_t)get_saved_state_fp(state
);
733 while (i
< callstack_max
&& frame
[0] != 0) {
734 if (copyinframe(frame
[0], (void*) frame
, FALSE
)) {
737 buf
[i
++] = (uintptr_t)frame
[1];
741 while (i
< callstack_max
) {
747 #endif /* MACH_ASSERT */
758 __unused vm_offset_t paddr
,
759 __unused
unsigned int *val
)
761 panic("ml_probe_read() unimplemented");
767 __unused addr64_t paddr
,
768 __unused
unsigned int *val
)
770 panic("ml_probe_read_64() unimplemented");
777 __unused thread_t thread
,
778 __unused
unsigned policy_id
,
779 __unused
unsigned policy_info
)
781 // <rdar://problem/7141284>: Reduce print noise
782 // kprintf("ml_thread_policy() unimplemented\n");
787 panic_unimplemented(void)
789 panic("Not yet implemented.");
792 /* ARM64_TODO <rdar://problem/9198953> */
793 void abort(void) __dead2
;
804 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
806 #pragma unused(fn,arg)
811 * Get a quick virtual mapping of a physical page and run a callback on that
812 * page's virtual address.
814 * @param dst64 Physical address to access (doesn't need to be page-aligned).
815 * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
816 * @param func Callback function to call with the page's virtual address.
817 * @param arg Argument passed directly to `func`.
819 * @return The return value from `func`.
825 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
828 /* The physical aperture is only guaranteed to work with kernel-managed addresses. */
829 if (!pmap_valid_address(dst64
)) {
830 panic("%s address error: passed in address (%#llx) not a kernel managed address",
831 __FUNCTION__
, dst64
);
834 /* Ensure we stay within a single page */
835 if (((((uint32_t)dst64
& (ARM_PGBYTES
- 1)) + bytes
) > ARM_PGBYTES
)) {
836 panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
837 __FUNCTION__
, dst64
, bytes
);
840 return func((void*)phystokv(dst64
), bytes
, arg
);