2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <sys/errno.h>
34 #include <vm/vm_map.h>
35 #include <san/kasan.h>
37 extern int _bcopyin(const char *src
, char *dst
, vm_size_t len
);
38 extern int _bcopyinstr(const char *src
, char *dst
, vm_size_t max
, vm_size_t
*actual
);
39 extern int _bcopyout(const char *src
, char *dst
, vm_size_t len
);
40 extern int _copyin_word(const char *src
, uint64_t *dst
, vm_size_t len
);
42 extern pmap_t kernel_pmap
;
44 typedef enum copyio_type
{
52 copyio_check_user_addr(user_addr_t user_addr
, vm_size_t nbytes
)
54 if (nbytes
&& (user_addr
+ nbytes
<= user_addr
))
57 if ((user_addr
+ nbytes
) > vm_map_max(current_thread()->map
))
64 user_access_enable(void)
66 #if __ARM_PAN_AVAILABLE__
67 __builtin_arm_wsr("pan", 0);
68 #endif /* __ARM_PAN_AVAILABLE__ */
72 user_access_disable(void)
74 #if __ARM_PAN_AVAILABLE__
75 __builtin_arm_wsr("pan", 1);
76 #endif /* __ARM_PAN_AVAILABLE__ */
80 copyio(copyio_type_t copytype
, const char *src
, char *dst
,
81 vm_size_t nbytes
, vm_size_t
*lencopied
)
84 vm_size_t bytes_copied
= 0;
86 /* Reject TBI addresses */
87 if (copytype
== COPYIO_OUT
) {
88 if ((uintptr_t)dst
& TBI_MASK
)
91 if ((uintptr_t)src
& TBI_MASK
)
100 /* For user copies, asan-check the kernel-side buffer */
101 if (copytype
== COPYIO_IN
|| copytype
== COPYIO_INSTR
|| copytype
== COPYIO_IN_WORD
) {
102 __asan_storeN((uintptr_t)dst
, nbytes
);
103 } else if (copytype
== COPYIO_OUT
) {
104 __asan_loadN((uintptr_t)src
, nbytes
);
108 user_access_enable();
110 /* Select copy routines based on direction:
111 * COPYIO_IN - Use unprivileged loads to read from user address
112 * COPYIO_OUT - Use unprivleged stores to write to user address
117 result
= _bcopyin(src
, dst
, nbytes
);
120 result
= _bcopyinstr(src
, dst
, nbytes
, &bytes_copied
);
121 if (result
!= EFAULT
) {
122 *lencopied
= bytes_copied
;
126 result
= _copyin_word(src
, (uint64_t *)(uintptr_t)dst
, nbytes
);
129 result
= _bcopyout(src
, dst
, nbytes
);
135 user_access_disable();
140 copyin_kern(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
)
142 bcopy((const char*)(uintptr_t)user_addr
, kernel_addr
, nbytes
);
148 copyout_kern(const char *kernel_addr
, user_addr_t user_addr
, vm_size_t nbytes
)
150 bcopy(kernel_addr
, (char *)(uintptr_t)user_addr
, nbytes
);
156 copyin(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
)
160 if (user_addr
>= VM_MIN_KERNEL_ADDRESS
|| user_addr
+ nbytes
>= VM_MIN_KERNEL_ADDRESS
) {
161 if (current_thread()->map
->pmap
== kernel_pmap
)
162 return copyin_kern(user_addr
, kernel_addr
, nbytes
);
167 if (nbytes
>= 4096) {
168 result
= copyin_validate(user_addr
, (uintptr_t)kernel_addr
, nbytes
);
169 if (result
) return result
;
172 result
= copyio_check_user_addr(user_addr
, nbytes
);
174 if (result
) return result
;
176 return copyio(COPYIO_IN
, (const char *)(uintptr_t)user_addr
, kernel_addr
, nbytes
, NULL
);
181 * Read an aligned value from userspace as a single memory transaction.
182 * This function supports userspace synchronization features
185 copyin_word(const user_addr_t user_addr
, uint64_t *kernel_addr
, vm_size_t nbytes
)
190 if ((nbytes
!= 4) && (nbytes
!= 8))
194 if (user_addr
& (nbytes
- 1))
197 /* Address must be user */
198 if (user_addr
>= VM_MIN_KERNEL_ADDRESS
|| user_addr
+ nbytes
>= VM_MIN_KERNEL_ADDRESS
)
201 result
= copyio_check_user_addr(user_addr
, nbytes
);
205 return copyio(COPYIO_IN_WORD
, (const char *)user_addr
, (char *)(uintptr_t)kernel_addr
, nbytes
, NULL
);
209 copyinstr(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
, vm_size_t
*lencopied
)
213 if (user_addr
>= VM_MIN_KERNEL_ADDRESS
|| user_addr
+ nbytes
>= VM_MIN_KERNEL_ADDRESS
) {
217 result
= copyio_check_user_addr(user_addr
, nbytes
);
219 if (result
) return result
;
225 return copyio(COPYIO_INSTR
, (const char *)(uintptr_t)user_addr
, kernel_addr
, nbytes
, lencopied
);
229 copyout(const void *kernel_addr
, user_addr_t user_addr
, vm_size_t nbytes
)
233 if (user_addr
>= VM_MIN_KERNEL_ADDRESS
|| user_addr
+ nbytes
>= VM_MIN_KERNEL_ADDRESS
) {
234 if (current_thread()->map
->pmap
== kernel_pmap
)
235 return copyout_kern(kernel_addr
, user_addr
, nbytes
);
240 if (nbytes
>= 4096) {
241 result
= copyout_validate((uintptr_t)kernel_addr
, user_addr
, nbytes
);
242 if (result
) return result
;
245 result
= copyio_check_user_addr(user_addr
, nbytes
);
247 if (result
) return result
;
249 return copyio(COPYIO_OUT
, kernel_addr
, (char *)(uintptr_t)user_addr
, nbytes
, NULL
);
254 * Copy sizes bigger than this value will cause a kernel panic.
256 * Yes, this is an arbitrary fixed limit, but it's almost certainly
257 * a programming error to be copying more than this amount between
258 * user and wired kernel memory in a single invocation on this
261 const int copysize_limit_panic
= (64 * 1024 * 1024);
264 * Validate the arguments to copy{in,out} on this platform.
266 * Called when nbytes is "large" e.g. more than a page. Such sizes are
267 * infrequent, and very large sizes are likely indications of attempts
268 * to exploit kernel programming errors (bugs).
271 copy_validate(const user_addr_t user_addr
,
272 uintptr_t kernel_addr
, vm_size_t nbytes
)
274 uintptr_t kernel_addr_last
= kernel_addr
+ nbytes
;
276 if (kernel_addr
< VM_MIN_KERNEL_ADDRESS
||
277 kernel_addr
> VM_MAX_KERNEL_ADDRESS
||
278 kernel_addr_last
< kernel_addr
||
279 kernel_addr_last
> VM_MAX_KERNEL_ADDRESS
)
280 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__
,
281 (void *)user_addr
, (void *)kernel_addr
, nbytes
);
283 user_addr_t user_addr_last
= user_addr
+ nbytes
;
285 if (user_addr_last
< user_addr
|| user_addr_last
> VM_MIN_KERNEL_ADDRESS
)
288 if (__improbable(nbytes
> copysize_limit_panic
))
289 panic("%s(%p, %p, %lu) - transfer too large", __func__
,
290 (void *)user_addr
, (void *)kernel_addr
, nbytes
);
296 copyin_validate(const user_addr_t ua
, uintptr_t ka
, vm_size_t nbytes
)
298 return (copy_validate(ua
, ka
, nbytes
));
302 copyout_validate(uintptr_t ka
, const user_addr_t ua
, vm_size_t nbytes
)
304 return (copy_validate(ua
, ka
, nbytes
));