2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
38 #include <kdp/kdp_udp.h>
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
50 #include <arm/misc_protos.h>
52 #include <sys/errno.h>
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
57 bcopy_phys(addr64_t src
, addr64_t dst
, vm_size_t bytes
)
59 unsigned int src_index
;
60 unsigned int dst_index
;
61 vm_offset_t src_offset
;
62 vm_offset_t dst_offset
;
63 unsigned int wimg_bits_src
, wimg_bits_dst
;
64 unsigned int cpu_num
= 0;
65 ppnum_t pn_src
= (ppnum_t
)(src
>> PAGE_SHIFT
);
66 ppnum_t pn_dst
= (ppnum_t
)(dst
>> PAGE_SHIFT
);
68 #ifdef __ARM_COHERENT_IO__
69 if (pmap_valid_address(src
) &&
70 pmap_valid_address(dst
) &&
71 (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t
) dst
)))) {
72 bcopy((char *)phystokv((pmap_paddr_t
) src
), (char *)phystokv((pmap_paddr_t
) dst
), bytes
);
77 wimg_bits_src
= pmap_cache_attributes(pn_src
);
78 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
80 #ifndef __ARM_COHERENT_IO__
81 if (((wimg_bits_src
& VM_WIMG_MASK
) == VM_WIMG_DEFAULT
) &&
82 ((wimg_bits_dst
& VM_WIMG_MASK
) == VM_WIMG_DEFAULT
) &&
83 (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t
) dst
)))) {
84 /* Fast path - dst is writable and both source and destination have default attributes */
85 bcopy((char *)phystokv((pmap_paddr_t
) src
), (char *)phystokv((pmap_paddr_t
) dst
), bytes
);
90 src_offset
= src
& PAGE_MASK
;
91 dst_offset
= dst
& PAGE_MASK
;
93 if ((src_offset
+ bytes
) > PAGE_SIZE
|| (dst_offset
+ bytes
) > PAGE_SIZE
)
94 panic("bcopy extends beyond copy windows");
96 mp_disable_preemption();
97 cpu_num
= cpu_number();
98 src_index
= pmap_map_cpu_windows_copy(pn_src
, VM_PROT_READ
, wimg_bits_src
);
99 dst_index
= pmap_map_cpu_windows_copy(pn_dst
, VM_PROT_READ
|VM_PROT_WRITE
, wimg_bits_dst
);
101 bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num
, src_index
) + src_offset
),
102 (char *)(pmap_cpu_windows_copy_addr(cpu_num
, dst_index
) + dst_offset
),
105 pmap_unmap_cpu_windows_copy(src_index
);
106 pmap_unmap_cpu_windows_copy(dst_index
);
107 mp_enable_preemption();
111 bzero_phys_nc(addr64_t src64
, vm_size_t bytes
)
113 bzero_phys(src64
, bytes
);
116 /* Zero bytes starting at a physical address */
118 bzero_phys(addr64_t src
, vm_size_t bytes
)
120 unsigned int wimg_bits
;
121 unsigned int cpu_num
= cpu_number();
122 ppnum_t pn
= (ppnum_t
)(src
>> PAGE_SHIFT
);
124 #ifdef __ARM_COHERENT_IO__
125 if (pmap_valid_address(src
)) {
126 bzero((char *)phystokv((pmap_paddr_t
) src
), bytes
);
131 wimg_bits
= pmap_cache_attributes(pn
);
133 #ifndef __ARM_COHERENT_IO__
134 if ((wimg_bits
& VM_WIMG_MASK
) == VM_WIMG_DEFAULT
) {
135 /* Fast path - default attributes */
136 bzero((char *)phystokv((pmap_paddr_t
) src
), bytes
);
141 mp_disable_preemption();
142 cpu_num
= cpu_number();
145 vm_offset_t offset
= src
& PAGE_MASK
;
146 uint64_t count
= PAGE_SIZE
- offset
;
151 pn
= (ppnum_t
)(src
>> PAGE_SHIFT
);
153 unsigned int index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
155 bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num
, index
) + offset
), count
);
157 pmap_unmap_cpu_windows_copy(index
);
163 mp_enable_preemption();
167 * Read data from a physical address.
171 static unsigned long long
172 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
175 unsigned int wimg_bits
;
176 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
177 unsigned long long result
= 0;
178 vm_offset_t copywindow_vaddr
= 0;
183 #ifdef __ARM_COHERENT_IO__
184 if (pmap_valid_address(paddr
)) {
187 s1
= *(volatile unsigned char *)phystokv(paddr
);
191 s2
= *(volatile unsigned short *)phystokv(paddr
);
195 s4
= *(volatile unsigned int *)phystokv(paddr
);
199 result
= *(volatile unsigned long long *)phystokv(paddr
);
202 panic("Invalid size %d for ml_phys_read_data\n", size
);
209 mp_disable_preemption();
210 wimg_bits
= pmap_cache_attributes(pn
);
211 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
, wimg_bits
);
212 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
216 s1
= *(volatile unsigned char *)copywindow_vaddr
;
220 s2
= *(volatile unsigned short *)copywindow_vaddr
;
224 s4
= *(volatile unsigned int *)copywindow_vaddr
;
228 result
= *(volatile unsigned long long*)copywindow_vaddr
;
231 panic("Invalid size %d for ml_phys_read_data\n", size
);
236 pmap_unmap_cpu_windows_copy(index
);
237 mp_enable_preemption();
242 unsigned int ml_phys_read( vm_offset_t paddr
)
244 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
247 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
249 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
252 unsigned int ml_phys_read_64(addr64_t paddr64
)
254 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
257 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
259 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
262 unsigned int ml_phys_read_half(vm_offset_t paddr
)
264 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
267 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
269 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
272 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
274 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
277 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
279 return (unsigned int)ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
282 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
284 return ml_phys_read_data((pmap_paddr_t
)paddr
, 8);
287 unsigned long long ml_phys_read_double_64(addr64_t paddr64
)
289 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 8);
295 * Write data to a physical address.
299 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long long data
, int size
)
302 unsigned int wimg_bits
;
303 ppnum_t pn
= (ppnum_t
)(paddr
>> PAGE_SHIFT
);
304 vm_offset_t copywindow_vaddr
= 0;
306 #ifdef __ARM_COHERENT_IO__
307 if (pmap_valid_address(paddr
)) {
310 *(volatile unsigned char *)phystokv(paddr
) = (unsigned char)data
;
313 *(volatile unsigned short *)phystokv(paddr
) = (unsigned short)data
;
316 *(volatile unsigned int *)phystokv(paddr
) = (unsigned int)data
;
319 *(volatile unsigned long long *)phystokv(paddr
) = data
;
322 panic("Invalid size %d for ml_phys_write_data\n", size
);
327 mp_disable_preemption();
328 wimg_bits
= pmap_cache_attributes(pn
);
329 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
|VM_PROT_WRITE
, wimg_bits
);
330 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);
334 *(volatile unsigned char *)(copywindow_vaddr
) =
338 *(volatile unsigned short *)(copywindow_vaddr
) =
339 (unsigned short)data
;
342 *(volatile unsigned int *)(copywindow_vaddr
) =
346 *(volatile unsigned long long *)(copywindow_vaddr
) =
347 (unsigned long long)data
;
350 panic("Invalid size %d for ml_phys_write_data\n", size
);
354 pmap_unmap_cpu_windows_copy(index
);
355 mp_enable_preemption();
358 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
360 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
363 void ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
365 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
368 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
370 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
373 void ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
375 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
378 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
380 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
383 void ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
385 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
388 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
390 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
393 void ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
395 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
398 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
400 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 8);
403 void ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
405 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 8);
410 * Set indicated bit in bit string.
413 setbit(int bitno
, int *s
)
415 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
419 * Clear indicated bit in bit string.
422 clrbit(int bitno
, int *s
)
424 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
428 * Test if indicated bit is set in bit string.
431 testbit(int bitno
, int *s
)
433 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
437 * Find first bit set in bit string.
444 for (offset
= 0; !*s
; offset
+= INT_SIZE
, ++s
);
445 return offset
+ __builtin_ctz(*s
);
449 ffs(unsigned int mask
)
455 * NOTE: cannot use __builtin_ffs because it generates a call to
458 return 1 + __builtin_ctz(mask
);
462 ffsll(unsigned long long mask
)
468 * NOTE: cannot use __builtin_ffsll because it generates a call to
471 return 1 + __builtin_ctzll(mask
);
475 * Find last bit set in bit string.
478 fls(unsigned int mask
)
483 return (sizeof (mask
) << 3) - __builtin_clz(mask
);
487 flsll(unsigned long long mask
)
492 return (sizeof (mask
) << 3) - __builtin_clzll(mask
);
502 const char *a
= (const char *) pa
;
503 const char *b
= (const char *) pb
;
514 * Check for the overflow case but continue to handle the non-overflow
515 * case the same way just in case someone is using the return value
516 * as more than zero/non-zero
518 if ((len
& 0xFFFFFFFF00000000ULL
) && !(len
& 0x00000000FFFFFFFFULL
))
526 memcmp(const void *s1
, const void *s2
, size_t n
)
529 const unsigned char *p1
= s1
, *p2
= s2
;
533 return (*--p1
- *--p2
);
540 copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
)
542 kern_return_t retval
= KERN_SUCCESS
;
544 #ifndef __ARM_COHERENT_IO__
545 unsigned int from_wimg_bits
, to_wimg_bits
;
548 from
= CAST_DOWN(void *, source
);
549 to
= CAST_DOWN(void *, sink
);
551 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0) /* Make sure that only
553 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
555 if (which
& cppvPsrc
)
556 from
= (void *)phystokv(from
);
557 if (which
& cppvPsnk
)
558 to
= (void *)phystokv(to
);
560 if ((which
& (cppvPsrc
| cppvKmap
)) == 0) /* Source is virtual in
562 retval
= copyin((user_addr_t
) from
, to
, size
);
563 else if ((which
& (cppvPsnk
| cppvKmap
)) == 0) /* Sink is virtual in
565 retval
= copyout(from
, (user_addr_t
) to
, size
);
566 else /* both addresses are physical or kernel map */
567 bcopy(from
, to
, size
);
569 #ifndef __ARM_COHERENT_IO__
570 if (which
& cppvFsrc
) {
571 flush_dcache64(source
, size
, ((which
& cppvPsrc
) == cppvPsrc
));
572 } else if (which
& cppvPsrc
) {
573 from_wimg_bits
= pmap_cache_attributes(source
>> PAGE_SHIFT
);
574 if ((from_wimg_bits
!= VM_WIMG_COPYBACK
) && (from_wimg_bits
!= VM_WIMG_WTHRU
))
575 flush_dcache64(source
, size
, TRUE
);
578 if (which
& cppvFsnk
) {
579 flush_dcache64(sink
, size
, ((which
& cppvPsnk
) == cppvPsnk
));
580 } else if (which
& cppvPsnk
) {
581 to_wimg_bits
= pmap_cache_attributes(sink
>> PAGE_SHIFT
);
582 if (to_wimg_bits
!= VM_WIMG_COPYBACK
)
583 flush_dcache64(sink
, size
, TRUE
);
592 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
595 * Machine-dependent routine to fill in an array with up to callstack_max
596 * levels of return pc information.
601 vm_size_t callstack_max
)
603 /* Captures the USER call stack */
606 struct arm_saved_state
*state
= find_user_regs(current_thread());
609 while (i
<callstack_max
)
612 if (is_saved_state64(state
)) {
614 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
615 frame
[0] = get_saved_state_fp(state
);
616 while (i
<callstack_max
&& frame
[0] != 0) {
617 if (copyinframe(frame
[0], (void*) frame
, TRUE
))
619 buf
[i
++] = (uintptr_t)frame
[1];
624 buf
[i
++] = (uintptr_t)get_saved_state_pc(state
);
625 frame
[0] = (uint32_t)get_saved_state_fp(state
);
626 while (i
<callstack_max
&& frame
[0] != 0) {
627 if (copyinframe(frame
[0], (void*) frame
, FALSE
))
629 buf
[i
++] = (uintptr_t)frame
[1];
633 while (i
<callstack_max
)
638 #endif /* MACH_ASSERT */
649 __unused vm_offset_t paddr
,
650 __unused
unsigned int *val
)
652 panic("ml_probe_read() unimplemented");
658 __unused addr64_t paddr
,
659 __unused
unsigned int *val
)
661 panic("ml_probe_read_64() unimplemented");
668 __unused thread_t thread
,
669 __unused
unsigned policy_id
,
670 __unused
unsigned policy_info
)
672 // <rdar://problem/7141284>: Reduce print noise
673 // kprintf("ml_thread_policy() unimplemented\n");
677 panic_unimplemented()
679 panic("Not yet implemented.");
682 /* ARM64_TODO <rdar://problem/9198953> */
694 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
696 #pragma unused(fn,arg)