2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
38 #include <kdp/kdp_udp.h>
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
50 #include <arm/misc_protos.h>
52 #include <sys/errno.h>
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
56 /* machine_routines_asm.s calls these */
57 extern int copyin_validate(const user_addr_t
, uintptr_t, vm_size_t
);
58 extern int copyin_user_validate(const user_addr_t
, uintptr_t, vm_size_t
);
59 extern int copyout_validate(uintptr_t, const user_addr_t
, vm_size_t
);
60 extern int copyio_user_validate(int, int, user_addr_t
, vm_size_t
);
61 extern int copyoutstr_prevalidate(const void *, user_addr_t
, size_t);
64 bcopy_phys(addr64_t src
, addr64_t dst
, vm_size_t bytes
)
66 unsigned int src_index
;
67 unsigned int dst_index
;
68 vm_offset_t src_offset
;
69 vm_offset_t dst_offset
;
71 unsigned int wimg_bits_src
, wimg_bits_dst
;
72 ppnum_t pn_src
= (src
>> PAGE_SHIFT
);
73 ppnum_t pn_dst
= (dst
>> PAGE_SHIFT
);
75 wimg_bits_src
= pmap_cache_attributes(pn_src
);
76 wimg_bits_dst
= pmap_cache_attributes(pn_dst
);
78 if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t
) dst
)) &&
79 ((wimg_bits_src
& VM_WIMG_MASK
) == VM_WIMG_DEFAULT
) &&
80 ((wimg_bits_dst
& VM_WIMG_MASK
) == VM_WIMG_DEFAULT
)) {
81 /* Fast path - dst is writable and both source and destination have default attributes */
82 bcopy((char *)phystokv((pmap_paddr_t
) src
), (char *)phystokv((pmap_paddr_t
) dst
), bytes
);
86 src_offset
= src
& PAGE_MASK
;
87 dst_offset
= dst
& PAGE_MASK
;
89 if ((src_offset
+ bytes
) > PAGE_SIZE
|| (dst_offset
+ bytes
) > PAGE_SIZE
) {
90 panic("bcopy extends beyond copy windows");
93 mp_disable_preemption();
94 cpu_num
= cpu_number();
95 src_index
= pmap_map_cpu_windows_copy(pn_src
, VM_PROT_READ
, wimg_bits_src
);
96 dst_index
= pmap_map_cpu_windows_copy(pn_dst
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits_dst
);
98 bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num
, src_index
) + src_offset
),
99 (char *)(pmap_cpu_windows_copy_addr(cpu_num
, dst_index
) + dst_offset
),
102 pmap_unmap_cpu_windows_copy(src_index
);
103 pmap_unmap_cpu_windows_copy(dst_index
);
104 mp_enable_preemption();
108 bzero_phys_nc(addr64_t src64
, vm_size_t bytes
)
110 bzero_phys(src64
, bytes
);
113 /* Zero bytes starting at a physical address */
115 bzero_phys(addr64_t src
, vm_size_t bytes
)
117 unsigned int wimg_bits
;
118 ppnum_t pn
= (src
>> PAGE_SHIFT
);
120 wimg_bits
= pmap_cache_attributes(pn
);
121 if (__probable((wimg_bits
& VM_WIMG_MASK
) == VM_WIMG_DEFAULT
)) {
122 /* Fast path - default attributes */
123 bzero((char *)phystokv((pmap_paddr_t
) src
), bytes
);
125 mp_disable_preemption();
127 unsigned int cpu_num
= cpu_number();
130 vm_offset_t offset
= src
& PAGE_MASK
;
131 uint32_t count
= PAGE_SIZE
- offset
;
137 unsigned int index
= pmap_map_cpu_windows_copy(src
>> PAGE_SHIFT
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
139 bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num
, index
) + offset
), count
);
141 pmap_unmap_cpu_windows_copy(index
);
147 mp_enable_preemption();
152 * Read data from a physical address.
157 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
161 unsigned int wimg_bits
;
162 ppnum_t pn
= (paddr
>> PAGE_SHIFT
);
165 vm_offset_t copywindow_vaddr
= 0;
167 mp_disable_preemption();
168 wimg_bits
= pmap_cache_attributes(pn
);
169 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
, wimg_bits
);
170 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t)paddr
& PAGE_MASK
);;
174 s1
= *(volatile unsigned char *)(copywindow_vaddr
);
178 s2
= *(volatile unsigned short *)(copywindow_vaddr
);
183 result
= *(volatile unsigned int *)(copywindow_vaddr
);
187 pmap_unmap_cpu_windows_copy(index
);
188 mp_enable_preemption();
193 static unsigned long long
194 ml_phys_read_long_long(pmap_paddr_t paddr
)
198 unsigned int wimg_bits
;
199 ppnum_t pn
= (paddr
>> PAGE_SHIFT
);
201 mp_disable_preemption();
202 wimg_bits
= pmap_cache_attributes(pn
);
203 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
, wimg_bits
);
205 result
= *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index
)
206 | ((uint32_t)paddr
& PAGE_MASK
));
208 pmap_unmap_cpu_windows_copy(index
);
209 mp_enable_preemption();
215 ml_phys_read( vm_offset_t paddr
)
217 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
221 ml_phys_read_word(vm_offset_t paddr
)
223 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
227 ml_phys_read_64(addr64_t paddr64
)
229 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
233 ml_phys_read_word_64(addr64_t paddr64
)
235 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
239 ml_phys_read_half(vm_offset_t paddr
)
241 return ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
245 ml_phys_read_half_64(addr64_t paddr64
)
247 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
251 ml_phys_read_byte(vm_offset_t paddr
)
253 return ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
257 ml_phys_read_byte_64(addr64_t paddr64
)
259 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
263 ml_phys_read_double(vm_offset_t paddr
)
265 return ml_phys_read_long_long((pmap_paddr_t
)paddr
);
269 ml_phys_read_double_64(addr64_t paddr64
)
271 return ml_phys_read_long_long((pmap_paddr_t
)paddr64
);
277 * Write data to a physical address.
281 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long data
, int size
)
284 unsigned int wimg_bits
;
285 ppnum_t pn
= (paddr
>> PAGE_SHIFT
);
286 vm_offset_t copywindow_vaddr
= 0;
288 mp_disable_preemption();
289 wimg_bits
= pmap_cache_attributes(pn
);
290 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
291 copywindow_vaddr
= pmap_cpu_windows_copy_addr(cpu_number(), index
) | ((uint32_t) paddr
& PAGE_MASK
);
295 *(volatile unsigned char *)(copywindow_vaddr
) = (unsigned char)data
;
298 *(volatile unsigned short *)(copywindow_vaddr
) = (unsigned short)data
;
302 *(volatile unsigned int *)(copywindow_vaddr
) = (uint32_t)data
;
306 pmap_unmap_cpu_windows_copy(index
);
307 mp_enable_preemption();
311 ml_phys_write_long_long(pmap_paddr_t paddr
, unsigned long long data
)
314 unsigned int wimg_bits
;
315 ppnum_t pn
= (paddr
>> PAGE_SHIFT
);
317 mp_disable_preemption();
318 wimg_bits
= pmap_cache_attributes(pn
);
319 index
= pmap_map_cpu_windows_copy(pn
, VM_PROT_READ
| VM_PROT_WRITE
, wimg_bits
);
321 *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index
)
322 | ((uint32_t)paddr
& PAGE_MASK
)) = data
;
324 pmap_unmap_cpu_windows_copy(index
);
325 mp_enable_preemption();
331 ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
333 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
337 ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
339 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
343 ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
345 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
349 ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
351 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
355 ml_phys_write(vm_offset_t paddr
, unsigned int data
)
357 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
361 ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
363 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
367 ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
369 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
373 ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
375 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
379 ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
381 ml_phys_write_long_long((pmap_paddr_t
)paddr
, data
);
385 ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
387 ml_phys_write_long_long((pmap_paddr_t
)paddr64
, data
);
392 * Set indicated bit in bit string.
395 setbit(int bitno
, int *s
)
397 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
401 * Clear indicated bit in bit string.
404 clrbit(int bitno
, int *s
)
406 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
410 * Test if indicated bit is set in bit string.
413 testbit(int bitno
, int *s
)
415 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
419 * Find first bit set in bit string.
426 for (offset
= 0; !*s
; offset
+= INT_SIZE
, ++s
) {
429 return offset
+ __builtin_ctz(*s
);
433 ffs(unsigned int mask
)
440 * NOTE: cannot use __builtin_ffs because it generates a call to
443 return 1 + __builtin_ctz(mask
);
447 ffsll(unsigned long long mask
)
454 * NOTE: cannot use __builtin_ffsll because it generates a call to
457 return 1 + __builtin_ctzll(mask
);
461 * Find last bit set in bit string.
464 fls(unsigned int mask
)
470 return (sizeof(mask
) << 3) - __builtin_clz(mask
);
474 flsll(unsigned long long mask
)
480 return (sizeof(mask
) << 3) - __builtin_clzll(mask
);
489 const char *a
= (const char *) pa
;
490 const char *b
= (const char *) pb
;
506 memcmp(const void *s1
, const void *s2
, size_t n
)
509 const unsigned char *p1
= s1
, *p2
= s2
;
512 if (*p1
++ != *p2
++) {
513 return *--p1
- *--p2
;
521 memcmp_zero_ptr_aligned(const void *s
, size_t n
)
523 uintptr_t p
= (uintptr_t)s
;
524 uintptr_t end
= (uintptr_t)s
+ n
;
527 static_assert(sizeof(unsigned long) == sizeof(uint32_t));
529 a
= *(const uint32_t *)p
;
530 b
= *(const uint32_t *)(end
- sizeof(uint32_t));
533 * align p to the next 64bit boundary
534 * align end to the previous 64bit boundary
536 * and do a nice ldrd loop.
538 p
= (p
+ sizeof(uint64_t) - 1) & -sizeof(uint64_t);
539 end
&= -sizeof(uint64_t);
541 for (; p
< end
; p
+= sizeof(uint64_t)) {
542 uint64_t v
= *(const uint64_t *)p
;
544 b
|= (uint32_t)(v
>> 32);
551 copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
)
553 kern_return_t retval
= KERN_SUCCESS
;
555 unsigned int from_wimg_bits
, to_wimg_bits
;
557 from
= CAST_DOWN(void *, source
);
558 to
= CAST_DOWN(void *, sink
);
560 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0) { /* Make sure that only
562 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
564 if (which
& cppvPsrc
) {
565 from
= (void *)phystokv((pmap_paddr_t
)from
);
567 if (which
& cppvPsnk
) {
568 to
= (void *)phystokv((pmap_paddr_t
)to
);
571 if ((which
& (cppvPsrc
| cppvKmap
)) == 0) { /* Source is virtual in
573 retval
= copyin((user_addr_t
) from
, to
, size
);
574 } else if ((which
& (cppvPsnk
| cppvKmap
)) == 0) { /* Sink is virtual in
576 retval
= copyout(from
, (user_addr_t
) to
, size
);
577 } else { /* both addresses are physical or kernel map */
578 bcopy(from
, to
, size
);
581 if (which
& cppvFsrc
) {
582 flush_dcache64(source
, size
, ((which
& cppvPsrc
) == cppvPsrc
));
583 } else if (which
& cppvPsrc
) {
584 from_wimg_bits
= pmap_cache_attributes(source
>> PAGE_SHIFT
);
585 if ((from_wimg_bits
!= VM_WIMG_COPYBACK
) && (from_wimg_bits
!= VM_WIMG_WTHRU
)) {
586 flush_dcache64(source
, size
, TRUE
);
590 if (which
& cppvFsnk
) {
591 flush_dcache64(sink
, size
, ((which
& cppvPsnk
) == cppvPsnk
));
592 } else if (which
& cppvPsnk
) {
593 to_wimg_bits
= pmap_cache_attributes(sink
>> PAGE_SHIFT
);
594 if (to_wimg_bits
!= VM_WIMG_COPYBACK
) {
595 flush_dcache64(sink
, size
, TRUE
);
602 * Copy sizes bigger than this value will cause a kernel panic.
604 * Yes, this is an arbitrary fixed limit, but it's almost certainly
605 * a programming error to be copying more than this amount between
606 * user and wired kernel memory in a single invocation on this
609 const int copysize_limit_panic
= (64 * 1024 * 1024);
612 is_kernel_to_kernel_copy()
614 return current_thread()->map
->pmap
== kernel_pmap
;
618 copy_validate_user(const user_addr_t user_addr
, vm_size_t nbytes
, bool kern_to_kern_allowed
)
620 user_addr_t user_addr_last
= user_addr
+ nbytes
;
621 thread_t self
= current_thread();
623 if (__improbable(!kern_to_kern_allowed
&& is_kernel_to_kernel_copy())) {
627 if (__improbable((user_addr_last
< user_addr
) ||
628 ((user_addr
+ nbytes
) > vm_map_max(self
->map
)) ||
629 (user_addr
< vm_map_min(self
->map
)))) {
633 if (__improbable(nbytes
> copysize_limit_panic
)) {
634 panic("%s(%p, ..., %u) - transfer too large", __func__
,
635 (void *)user_addr
, nbytes
);
642 * Validate the arguments to copy{in,out} on this platform.
644 * Called when nbytes is "large" e.g. more than a page. Such sizes are
645 * infrequent, and very large sizes are likely indications of attempts
646 * to exploit kernel programming errors (bugs).
649 copy_validate(const user_addr_t user_addr
,
650 uintptr_t kernel_addr
, vm_size_t nbytes
, bool kern_to_kern_allowed
)
652 uintptr_t kernel_addr_last
= kernel_addr
+ nbytes
;
654 if (__improbable(kernel_addr
< VM_MIN_KERNEL_ADDRESS
||
655 kernel_addr
> VM_MAX_KERNEL_ADDRESS
||
656 kernel_addr_last
< kernel_addr
||
657 kernel_addr_last
> VM_MAX_KERNEL_ADDRESS
)) {
658 panic("%s(%p, %p, %u) - kaddr not in kernel", __func__
,
659 (void *)user_addr
, (void *)kernel_addr
, nbytes
);
662 return copy_validate_user(user_addr
, nbytes
, kern_to_kern_allowed
);
666 copyin_validate(const user_addr_t ua
, uintptr_t ka
, vm_size_t nbytes
)
668 return copy_validate(ua
, ka
, nbytes
, true);
672 copyin_user_validate(const user_addr_t ua
, uintptr_t ka
, vm_size_t nbytes
)
674 return copy_validate(ua
, ka
, nbytes
, false);
678 copyout_validate(uintptr_t ka
, const user_addr_t ua
, vm_size_t nbytes
)
680 return copy_validate(ua
, ka
, nbytes
, true);
684 copyio_user_validate(int a __unused
, int b __unused
,
685 user_addr_t ua
, vm_size_t nbytes
)
687 return copy_validate_user(ua
, nbytes
, false);
691 copyoutstr_prevalidate(const void *__unused kaddr
, user_addr_t __unused uaddr
, size_t __unused len
)
693 if (__improbable(is_kernel_to_kernel_copy())) {
702 extern int copyinframe(vm_address_t fp
, char *frame
);
705 * Machine-dependent routine to fill in an array with up to callstack_max
706 * levels of return pc information.
711 vm_size_t callstack_max
)
713 /* Captures the USER call stack */
717 struct arm_saved_state
* state
= find_user_regs(current_thread());
720 while (i
< callstack_max
) {
724 buf
[i
++] = (uintptr_t)state
->pc
;
725 frame
[0] = state
->r
[7];
727 while (i
< callstack_max
&& frame
[0] != 0) {
728 if (copyinframe(frame
[0], (void*) frame
)) {
731 buf
[i
++] = (uintptr_t)frame
[1];
734 while (i
< callstack_max
) {
740 #endif /* MACH_ASSERT */
751 __unused vm_offset_t paddr
,
752 __unused
unsigned int *val
)
754 panic("ml_probe_read() unimplemented");
760 __unused addr64_t paddr
,
761 __unused
unsigned int *val
)
763 panic("ml_probe_read_64() unimplemented");
770 __unused thread_t thread
,
771 __unused
unsigned policy_id
,
772 __unused
unsigned policy_info
)
774 // <rdar://problem/7141284>: Reduce print noise
775 // kprintf("ml_thread_policy() unimplemented\n");
780 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
782 #pragma unused(fn,arg)