2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
38 #include <kdp/kdp_udp.h>
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
50 #include <arm/misc_protos.h>
52 #include <sys/errno.h>
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
56 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
57 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
58 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
59 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
62 bcopy_phys_internal(addr64_t src
, addr64_t dst
, vm_size_t bytes
, int flags
)
64 unsigned int src_index
;
65 unsigned int dst_index
;
66 vm_offset_t src_offset
;
67 vm_offset_t dst_offset
;
68 unsigned int wimg_bits_src
, wimg_bits_dst
;
69 unsigned int cpu_num
= 0;
72 addr64_t end __assert_only
;
73 kern_return_t res
= KERN_SUCCESS
;
75 assert(!__improbable(os_add_overflow(src
, bytes
, &end
)));
76 assert(!__improbable(os_add_overflow(dst
, bytes
, &end
)));
78 while ((bytes
> 0) && (res
== KERN_SUCCESS
)) {
79 src_offset
= src
& PAGE_MASK
;
80 dst_offset
= dst
& PAGE_MASK
;
81 boolean_t use_copy_window_src
= FALSE
;
82 boolean_t use_copy_window_dst
= FALSE
;
83 vm_size_t count
= bytes
;
84 vm_size_t count2
= bytes
;
85 if (BCOPY_PHYS_SRC_IS_PHYS(flags
)) {
86 use_copy_window_src
= !pmap_valid_address(src
);
87 pn_src
= (ppnum_t
)(src
>> PAGE_SHIFT
);
88 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
89 count
= PAGE_SIZE
- src_offset
;
90 wimg_bits_src
= pmap_cache_attributes(pn_src
);
91 if ((wimg_bits_src
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
92 use_copy_window_src
= TRUE
;
95 if (use_copy_window_src
) {
96 wimg_bits_src
= pmap_cache_attributes(pn_src
);
97 count
= PAGE_SIZE
- src_offset
;
101 if (BCOPY_PHYS_DST_IS_PHYS(flags
)) {
102 // write preflighting needed for things like dtrace which may write static read-only mappings
103 use_copy_window_dst
= (!pmap_valid_address(dst
) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t
)dst
)));
104 pn_dst
= (ppnum_t
)(dst
>> PAGE_SHIFT
);
105 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
106 count2
= PAGE_SIZE
- dst_offset
;
107 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
108 if ((wimg_bits_dst
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
109 use_copy_window_dst
= TRUE
;
112 if (use_copy_window_dst
) {
113 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
114 count2
= PAGE_SIZE
- dst_offset
;
122 if (use_copy_window_src
|| use_copy_window_dst
) {
123 mp_disable_preemption();
124 cpu_num
= cpu_number();
127 if (use_copy_window_src
) {
128 src_index
= pmap_map_cpu_windows_copy(pn_src
, VM_PROT_READ
, wimg_bits_src
);
129 tmp_src
= (char*)(pmap_cpu_windows_copy_addr(cpu_num
, src_index
) + src_offset
);
130 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags
)) {
131 tmp_src
= (char*)phystokv_range((pmap_paddr_t
)src
, &count
);
133 tmp_src
= (char*)src
;
135 if (use_copy_window_dst
) {
136 dst_index
= pmap_map_cpu_windows_copy(pn_dst
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits_dst
);
137 tmp_dst
= (char*)(pmap_cpu_windows_copy_addr(cpu_num
, dst_index
) + dst_offset
);
138 } else if (BCOPY_PHYS_DST_IS_PHYS(flags
)) {
139 tmp_dst
= (char*)phystokv_range((pmap_paddr_t
)dst
, &count2
);
141 tmp_dst
= (char*)dst
;
144 if (count
> count2
) {
151 if (BCOPY_PHYS_SRC_IS_USER(flags
)) {
152 res
= copyin((user_addr_t
)src
, tmp_dst
, count
);
153 } else if (BCOPY_PHYS_DST_IS_USER(flags
)) {
154 res
= copyout(tmp_src
, (user_addr_t
)dst
, count
);
156 bcopy(tmp_src
, tmp_dst
, count
);
159 if (use_copy_window_src
) {
160 pmap_unmap_cpu_windows_copy(src_index
);
162 if (use_copy_window_dst
) {
163 pmap_unmap_cpu_windows_copy(dst_index
);
165 if (use_copy_window_src
|| use_copy_window_dst
) {
166 mp_enable_preemption();
177 bcopy_phys(addr64_t src
, addr64_t dst
, vm_size_t bytes
)
179 bcopy_phys_internal(src
, dst
, bytes
, cppvPsrc
| cppvPsnk
);
183 bzero_phys_nc(addr64_t src64
, vm_size_t bytes
)
185 bzero_phys(src64
, bytes
);
188 /* Zero bytes starting at a physical address */
190 bzero_phys(addr64_t src
, vm_size_t bytes
)
192 unsigned int wimg_bits
;
193 unsigned int cpu_num
= cpu_number();
195 addr64_t end __assert_only
;
197 assert(!__improbable(os_add_overflow(src
, bytes
, &end
)));
199 vm_offset_t offset
= src
& PAGE_MASK
;
201 vm_size_t count
= bytes
;
203 boolean_t use_copy_window
= !pmap_valid_address(src
);
204 pn
= (ppnum_t
)(src
>> PAGE_SHIFT
);
205 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
206 count
= PAGE_SIZE
- offset
;
207 wimg_bits
= pmap_cache_attributes(pn
);
208 if ((wimg_bits
& VM_WIMG_MASK
) != VM_WIMG_DEFAULT
) {
209 use_copy_window
= TRUE
;
212 if (use_copy_window
) {
213 wimg_bits
= pmap_cache_attributes(pn
);
214 count
= PAGE_SIZE
- offset
;
219 if (use_copy_window
) {
220 mp_disable_preemption();
221 cpu_num
= cpu_number();
222 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
223 buf
= (char *)(pmap_cpu_windows_copy_addr(cpu_num
, index
) + offset
);
225 buf
= (char *)phystokv_range((pmap_paddr_t
)src
, &count
);
234 if (use_copy_window
) {
235 pmap_unmap_cpu_windows_copy(index
);
236 mp_enable_preemption();
246 * Read data from a physical address.
250 static unsigned long long
251 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
254 unsigned int wimg_bits
;
255 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
256 ppnum_t pn_end
= (ppnum_t
)((paddr
+ size
- 1) >> PAGE_SHIFT
);
257 unsigned long long result
= 0;
258 vm_offset_t copywindow_vaddr
= 0;
263 if (__improbable(pn_end
!= pn
)) {
264 panic("%s: paddr 0x%llx spans a page boundary", __func__
, (uint64_t)paddr
);
267 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
268 if (pmap_valid_address(paddr
)) {
271 s1
= *(volatile unsigned char *)phystokv(paddr
);
275 s2
= *(volatile unsigned short *)phystokv(paddr
);
279 s4
= *(volatile unsigned int *)phystokv(paddr
);
283 result
= *(volatile unsigned long long *)phystokv(paddr
);
286 panic("Invalid size %d for ml_phys_read_data\n", size
);
293 mp_disable_preemption();
294 wimg_bits
= pmap_cache_attributes(pn
);
295 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
, wimg_bits
);
296 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
300 s1
= *(volatile unsigned char *)copywindow_vaddr
;
304 s2
= *(volatile unsigned short *)copywindow_vaddr
;
308 s4
= *(volatile unsigned int *)copywindow_vaddr
;
312 result
= *(volatile unsigned long long*)copywindow_vaddr
;
315 panic("Invalid size %d for ml_phys_read_data\n", size
);
319 pmap_unmap_cpu_windows_copy(index
);
320 mp_enable_preemption();
326 ml_phys_read( vm_offset_t paddr
)
328 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
332 ml_phys_read_word(vm_offset_t paddr
)
334 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
338 ml_phys_read_64(addr64_t paddr64
)
340 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
344 ml_phys_read_word_64(addr64_t paddr64
)
346 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
350 ml_phys_read_half(vm_offset_t paddr
)
352 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
356 ml_phys_read_half_64(addr64_t paddr64
)
358 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
362 ml_phys_read_byte(vm_offset_t paddr
)
364 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
368 ml_phys_read_byte_64(addr64_t paddr64
)
370 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
374 ml_phys_read_double(vm_offset_t paddr
)
376 return ml_phys_read_data((pmap_paddr_t
)paddr
, 8);
380 ml_phys_read_double_64(addr64_t paddr64
)
382 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 8);
388 * Write data to a physical address.
392 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long long data
, int size
)
395 unsigned int wimg_bits
;
396 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
397 ppnum_t pn_end
= (ppnum_t
)((paddr
+ size
- 1) >> PAGE_SHIFT
);
398 vm_offset_t copywindow_vaddr
= 0;
400 if (__improbable(pn_end
!= pn
)) {
401 panic("%s: paddr 0x%llx spans a page boundary", __func__
, (uint64_t)paddr
);
404 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
405 if (pmap_valid_address(paddr
)) {
408 *(volatile unsigned char *)phystokv(paddr
) = (unsigned char)data
;
411 *(volatile unsigned short *)phystokv(paddr
) = (unsigned short)data
;
414 *(volatile unsigned int *)phystokv(paddr
) = (unsigned int)data
;
417 *(volatile unsigned long long *)phystokv(paddr
) = data
;
420 panic("Invalid size %d for ml_phys_write_data\n", size
);
425 mp_disable_preemption();
426 wimg_bits
= pmap_cache_attributes(pn
);
427 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
428 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
432 *(volatile unsigned char *)(copywindow_vaddr
) =
436 *(volatile unsigned short *)(copywindow_vaddr
) =
437 (unsigned short)data
;
440 *(volatile unsigned int *)(copywindow_vaddr
) =
444 *(volatile unsigned long long *)(copywindow_vaddr
) =
445 (unsigned long long)data
;
448 panic("Invalid size %d for ml_phys_write_data\n", size
);
452 pmap_unmap_cpu_windows_copy(index
);
453 mp_enable_preemption();
457 ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
459 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
463 ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
465 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
469 ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
471 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
475 ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
477 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
481 ml_phys_write(vm_offset_t paddr
, unsigned int data
)
483 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
487 ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
489 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
493 ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
495 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
499 ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
501 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
505 ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
507 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 8);
511 ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
513 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 8);
518 * Set indicated bit in bit string.
521 setbit(int bitno
, int *s
)
523 s
[bitno
/ INT_SIZE
] |= 1U << (bitno
% INT_SIZE
);
527 * Clear indicated bit in bit string.
530 clrbit(int bitno
, int *s
)
532 s
[bitno
/ INT_SIZE
] &= ~(1U << (bitno
% INT_SIZE
));
536 * Test if indicated bit is set in bit string.
539 testbit(int bitno
, int *s
)
541 return s
[bitno
/ INT_SIZE
] & (1U << (bitno
% INT_SIZE
));
545 * Find first bit set in bit string.
552 for (offset
= 0; !*s
; offset
+= INT_SIZE
, ++s
) {
555 return offset
+ __builtin_ctz(*s
);
559 ffs(unsigned int mask
)
566 * NOTE: cannot use __builtin_ffs because it generates a call to
569 return 1 + __builtin_ctz(mask
);
573 ffsll(unsigned long long mask
)
580 * NOTE: cannot use __builtin_ffsll because it generates a call to
583 return 1 + __builtin_ctzll(mask
);
587 * Find last bit set in bit string.
590 fls(unsigned int mask
)
596 return (sizeof(mask
) << 3) - __builtin_clz(mask
);
600 flsll(unsigned long long mask
)
606 return (sizeof(mask
) << 3) - __builtin_clzll(mask
);
616 const char *a
= (const char *) pa
;
617 const char *b
= (const char *) pb
;
630 * Check for the overflow case but continue to handle the non-overflow
631 * case the same way just in case someone is using the return value
632 * as more than zero/non-zero
634 if ((len
& 0xFFFFFFFF00000000ULL
) && !(len
& 0x00000000FFFFFFFFULL
)) {
643 memcmp(const void *s1
, const void *s2
, size_t n
)
646 const unsigned char *p1
= s1
, *p2
= s2
;
649 if (*p1
++ != *p2
++) {
650 return *--p1
- *--p2
;
658 copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
)
660 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0) { /* Make sure that only one is virtual */
661 panic("%s: no more than 1 parameter may be virtual", __func__
);
664 kern_return_t res
= bcopy_phys_internal(source
, sink
, size
, which
);
666 #ifndef __ARM_COHERENT_IO__
667 if (which
& cppvFsrc
) {
668 flush_dcache64(source
, size
, ((which
& cppvPsrc
) == cppvPsrc
));
671 if (which
& cppvFsnk
) {
672 flush_dcache64(sink
, size
, ((which
& cppvPsnk
) == cppvPsnk
));
681 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
684 * Machine-dependent routine to fill in an array with up to callstack_max
685 * levels of return pc information.
690 vm_size_t callstack_max
)
692 /* Captures the USER call stack */
695 struct arm_saved_state
*state
= find_user_regs(current_thread());
698 while (i
< callstack_max
) {
702 if (is_saved_state64(state
)) {
704 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
705 frame
[0] = get_saved_state_fp(state
);
706 while (i
< callstack_max
&& frame
[0] != 0) {
707 if (copyinframe(frame
[0], (void*) frame
, TRUE
)) {
710 buf
[i
++] = (uintptr_t)frame
[1];
714 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
715 frame
[0] = (uint32_t)get_saved_state_fp(state
);
716 while (i
< callstack_max
&& frame
[0] != 0) {
717 if (copyinframe(frame
[0], (void*) frame
, FALSE
)) {
720 buf
[i
++] = (uintptr_t)frame
[1];
724 while (i
< callstack_max
) {
730 #endif /* MACH_ASSERT */
741 __unused vm_offset_t paddr
,
742 __unused
unsigned int *val
)
744 panic("ml_probe_read() unimplemented");
750 __unused addr64_t paddr
,
751 __unused
unsigned int *val
)
753 panic("ml_probe_read_64() unimplemented");
760 __unused thread_t thread
,
761 __unused
unsigned policy_id
,
762 __unused
unsigned policy_info
)
764 // <rdar://problem/7141284>: Reduce print noise
765 // kprintf("ml_thread_policy() unimplemented\n");
770 panic_unimplemented(void)
772 panic("Not yet implemented.");
775 /* ARM64_TODO <rdar://problem/9198953> */
776 void abort(void) __dead2
;
787 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
789 #pragma unused(fn,arg)