#define INT_SIZE (BYTE_SIZE * sizeof (int))
+/* machine_routines_asm.s calls these */
+extern int copyin_validate(const user_addr_t, uintptr_t, vm_size_t);
+extern int copyin_user_validate(const user_addr_t, uintptr_t, vm_size_t);
+extern int copyout_validate(uintptr_t, const user_addr_t, vm_size_t);
+extern int copyio_user_validate(int, int, user_addr_t, vm_size_t);
+extern int copyoutstr_prevalidate(const void *, user_addr_t, size_t);
void
bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
ppnum_t pn = (src >> PAGE_SHIFT);
wimg_bits = pmap_cache_attributes(pn);
- if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
+ if (__probable((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
/* Fast path - default attributes */
bzero((char *)phystokv((pmap_paddr_t) src), bytes);
} else {
return 0;
}
+unsigned long
+memcmp_zero_ptr_aligned(const void *s, size_t n)
+{
+ uintptr_t p = (uintptr_t)s;
+ uintptr_t end = (uintptr_t)s + n;
+ uint32_t a, b;
+
+ static_assert(sizeof(unsigned long) == sizeof(uint32_t));
+
+ a = *(const uint32_t *)p;
+ b = *(const uint32_t *)(end - sizeof(uint32_t));
+
+ /*
+ * align p to the next 64bit boundary
+ * align end to the previous 64bit boundary
+ *
+ * and do a nice ldrd loop.
+ */
+ p = (p + sizeof(uint64_t) - 1) & -sizeof(uint64_t);
+ end &= -sizeof(uint64_t);
+
+ for (; p < end; p += sizeof(uint64_t)) {
+ uint64_t v = *(const uint64_t *)p;
+ a |= (uint32_t)v;
+ b |= (uint32_t)(v >> 32);
+ }
+
+ return a | b;
+}
+
kern_return_t
copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
{
*/
const int copysize_limit_panic = (64 * 1024 * 1024);
+static inline bool
+is_kernel_to_kernel_copy()
+{
+ return current_thread()->map->pmap == kernel_pmap;
+}
+
+static int
+copy_validate_user(const user_addr_t user_addr, vm_size_t nbytes, bool kern_to_kern_allowed)
+{
+ user_addr_t user_addr_last = user_addr + nbytes;
+ thread_t self = current_thread();
+
+ if (__improbable(!kern_to_kern_allowed && is_kernel_to_kernel_copy())) {
+ return EFAULT;
+ }
+
+ if (__improbable((user_addr_last < user_addr) ||
+ ((user_addr + nbytes) > vm_map_max(self->map)) ||
+ (user_addr < vm_map_min(self->map)))) {
+ return EFAULT;
+ }
+
+ if (__improbable(nbytes > copysize_limit_panic)) {
+ panic("%s(%p, ..., %u) - transfer too large", __func__,
+ (void *)user_addr, nbytes);
+ }
+
+ return 0;
+}
+
/*
* Validate the arguments to copy{in,out} on this platform.
*
*/
static int
copy_validate(const user_addr_t user_addr,
- uintptr_t kernel_addr, vm_size_t nbytes)
+ uintptr_t kernel_addr, vm_size_t nbytes, bool kern_to_kern_allowed)
{
uintptr_t kernel_addr_last = kernel_addr + nbytes;
(void *)user_addr, (void *)kernel_addr, nbytes);
}
- user_addr_t user_addr_last = user_addr + nbytes;
+ return copy_validate_user(user_addr, nbytes, kern_to_kern_allowed);
+}
- if (__improbable((user_addr_last < user_addr) || ((user_addr + nbytes) > vm_map_max(current_thread()->map)) ||
- (user_addr < vm_map_min(current_thread()->map)))) {
- return EFAULT;
- }
+int
+copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
+{
+ return copy_validate(ua, ka, nbytes, true);
+}
- if (__improbable(nbytes > copysize_limit_panic)) {
- panic("%s(%p, %p, %u) - transfer too large", __func__,
- (void *)user_addr, (void *)kernel_addr, nbytes);
- }
+int
+copyin_user_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
+{
+ return copy_validate(ua, ka, nbytes, false);
+}
- return 0;
+int
+copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
+{
+ return copy_validate(ua, ka, nbytes, true);
}
int
-copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
+copyio_user_validate(int a __unused, int b __unused,
+ user_addr_t ua, vm_size_t nbytes)
{
- return copy_validate(ua, ka, nbytes);
+ return copy_validate_user(ua, nbytes, false);
}
int
-copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
+copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
{
- return copy_validate(ua, ka, nbytes);
+ if (__improbable(is_kernel_to_kernel_copy())) {
+ return EFAULT;
+ }
+
+ return 0;
}
#if MACH_ASSERT