2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
35 #include <vm/vm_map.h>
36 #include <san/kasan.h>
41 extern int _bcopyin(const char *src
, char *dst
, vm_size_t len
);
42 extern int _bcopyinstr(const char *src
, char *dst
, vm_size_t max
, vm_size_t
*actual
);
43 extern int _bcopyout(const char *src
, char *dst
, vm_size_t len
);
44 extern int _copyin_atomic32(const char *src
, uint32_t *dst
);
45 extern int _copyin_atomic32_wait_if_equals(const char *src
, uint32_t dst
);
46 extern int _copyin_atomic64(const char *src
, uint64_t *dst
);
47 extern int _copyout_atomic32(uint32_t u32
, const char *dst
);
48 extern int _copyout_atomic64(uint64_t u64
, const char *dst
);
50 extern int copyoutstr_prevalidate(const void *kaddr
, user_addr_t uaddr
, size_t len
);
52 extern pmap_t kernel_pmap
;
54 extern const vm_map_address_t physmap_base
;
55 extern const vm_map_address_t physmap_end
;
58 * @typedef copyio_flags_t
61 * The copy is user -> kernel.
62 * One of COPYIO_IN or COPYIO_OUT should always be specified.
65 * The copy is kernel -> user
66 * One of COPYIO_IN or COPYIO_OUT should always be specified.
68 * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
69 * The "user_address" is allowed to be in the VA space of the kernel.
71 * @const COPYIO_VALIDATE_USER_ONLY
72 * There isn't really a kernel address used, and only the user address
73 * needs to be validated.
75 * @const COPYIO_ATOMIC
76 * The copyio operation is atomic, ensure that it is properly aligned.
78 __options_decl(copyio_flags_t
, uint32_t, {
81 COPYIO_ALLOW_KERNEL_TO_KERNEL
= 0x0004,
82 COPYIO_VALIDATE_USER_ONLY
= 0x0008,
83 COPYIO_ATOMIC
= 0x0010,
87 user_access_enable(void)
89 #if __ARM_PAN_AVAILABLE__
90 assert(__builtin_arm_rsr("pan") != 0);
91 __builtin_arm_wsr("pan", 0);
92 #endif /* __ARM_PAN_AVAILABLE__ */
96 user_access_disable(void)
98 #if __ARM_PAN_AVAILABLE__
99 __builtin_arm_wsr("pan", 1);
100 #endif /* __ARM_PAN_AVAILABLE__ */
104 * Copy sizes bigger than this value will cause a kernel panic.
106 * Yes, this is an arbitrary fixed limit, but it's almost certainly
107 * a programming error to be copying more than this amount between
108 * user and wired kernel memory in a single invocation on this
111 const int copysize_limit_panic
= (64 * 1024 * 1024);
114 is_kernel_to_kernel_copy()
116 return current_thread()->map
->pmap
== kernel_pmap
;
120 * Validate the arguments to copy{in,out} on this platform.
122 * Returns EXDEV when the current thread pmap is the kernel's
123 * which is non fatal for certain routines.
126 copy_validate(const user_addr_t user_addr
, uintptr_t kernel_addr
,
127 vm_size_t nbytes
, copyio_flags_t flags
)
129 thread_t self
= current_thread();
131 user_addr_t user_addr_last
;
132 uintptr_t kernel_addr_last
;
134 if (__improbable(nbytes
> copysize_limit_panic
)) {
135 panic("%s(%p, %p, %lu) - transfer too large", __func__
,
136 (void *)user_addr
, (void *)kernel_addr
, nbytes
);
139 if (__improbable((user_addr
< vm_map_min(self
->map
)) ||
140 os_add_overflow(user_addr
, nbytes
, &user_addr_last
) ||
141 (user_addr_last
> vm_map_max(self
->map
)))) {
145 if (flags
& COPYIO_ATOMIC
) {
146 if (__improbable(user_addr
& (nbytes
- 1))) {
151 if ((flags
& COPYIO_VALIDATE_USER_ONLY
) == 0) {
152 if (__improbable(os_add_overflow(kernel_addr
, nbytes
, &kernel_addr_last
))) {
153 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__
,
154 (void *)user_addr
, (void *)kernel_addr
, nbytes
);
157 bool in_kva
= (kernel_addr
>= VM_MIN_KERNEL_ADDRESS
) && (kernel_addr_last
<= VM_MAX_KERNEL_ADDRESS
);
158 bool in_physmap
= (kernel_addr
>= physmap_base
) && (kernel_addr_last
<= physmap_end
);
160 if (__improbable(!(in_kva
|| in_physmap
))) {
161 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__
,
162 (void *)user_addr
, (void *)kernel_addr
, nbytes
);
166 if (is_kernel_to_kernel_copy()) {
167 if (__improbable((flags
& COPYIO_ALLOW_KERNEL_TO_KERNEL
) == 0)) {
173 if (__improbable(user_addr
& TBI_MASK
)) {
177 if ((flags
& COPYIO_VALIDATE_USER_ONLY
) == 0) {
178 if (__probable(!zalloc_disable_copyio_check
)) {
179 zone_t src_zone
= NULL
;
180 vm_size_t kernel_buf_size
= zone_element_size((void *)kernel_addr
, &src_zone
);
182 * Size of elements in the permanent zone is not saved as a part of the
185 if (__improbable(src_zone
&& !src_zone
->permanent
&&
186 kernel_buf_size
< nbytes
)) {
187 panic("copyio_preflight: kernel buffer 0x%lx has size %lu < nbytes %lu",
188 kernel_addr
, kernel_buf_size
, nbytes
);
193 /* For user copies, asan-check the kernel-side buffer */
194 if (flags
& COPYIO_IN
) {
195 __asan_storeN(kernel_addr
, nbytes
);
197 __asan_loadN(kernel_addr
, nbytes
);
198 kasan_check_uninitialized((vm_address_t
)kernel_addr
, nbytes
);
206 copyin_kern(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
)
208 bcopy((const char*)(uintptr_t)user_addr
, kernel_addr
, nbytes
);
214 copyout_kern(const char *kernel_addr
, user_addr_t user_addr
, vm_size_t nbytes
)
216 bcopy(kernel_addr
, (char *)(uintptr_t)user_addr
, nbytes
);
222 copyin(const user_addr_t user_addr
, void *kernel_addr
, vm_size_t nbytes
)
226 if (__improbable(nbytes
== 0)) {
230 result
= copy_validate(user_addr
, (uintptr_t)kernel_addr
, nbytes
,
231 COPYIO_IN
| COPYIO_ALLOW_KERNEL_TO_KERNEL
);
232 if (result
== EXDEV
) {
233 return copyin_kern(user_addr
, kernel_addr
, nbytes
);
235 if (__improbable(result
)) {
239 user_access_enable();
240 result
= _bcopyin((const char *)user_addr
, kernel_addr
, nbytes
);
241 user_access_disable();
246 * copy{in,out}_atomic{32,64}
247 * Read or store an aligned value from userspace as a single memory transaction.
248 * These functions support userspace synchronization features
251 copyin_atomic32(const user_addr_t user_addr
, uint32_t *kernel_addr
)
253 int result
= copy_validate(user_addr
, (uintptr_t)kernel_addr
, 4,
254 COPYIO_IN
| COPYIO_ATOMIC
);
255 if (__improbable(result
)) {
258 user_access_enable();
259 result
= _copyin_atomic32((const char *)user_addr
, kernel_addr
);
260 user_access_disable();
265 copyin_atomic32_wait_if_equals(const user_addr_t user_addr
, uint32_t value
)
267 int result
= copy_validate(user_addr
, 0, 4,
268 COPYIO_OUT
| COPYIO_ATOMIC
| COPYIO_VALIDATE_USER_ONLY
);
269 if (__improbable(result
)) {
272 user_access_enable();
273 result
= _copyin_atomic32_wait_if_equals((const char *)user_addr
, value
);
274 user_access_disable();
279 copyin_atomic64(const user_addr_t user_addr
, uint64_t *kernel_addr
)
281 int result
= copy_validate(user_addr
, (uintptr_t)kernel_addr
, 8,
282 COPYIO_IN
| COPYIO_ATOMIC
);
283 if (__improbable(result
)) {
286 user_access_enable();
287 result
= _copyin_atomic64((const char *)user_addr
, kernel_addr
);
288 user_access_disable();
293 copyout_atomic32(uint32_t value
, user_addr_t user_addr
)
295 int result
= copy_validate(user_addr
, 0, 4,
296 COPYIO_OUT
| COPYIO_ATOMIC
| COPYIO_VALIDATE_USER_ONLY
);
297 if (__improbable(result
)) {
300 user_access_enable();
301 result
= _copyout_atomic32(value
, (const char *)user_addr
);
302 user_access_disable();
307 copyout_atomic64(uint64_t value
, user_addr_t user_addr
)
309 int result
= copy_validate(user_addr
, 0, 8,
310 COPYIO_OUT
| COPYIO_ATOMIC
| COPYIO_VALIDATE_USER_ONLY
);
311 if (__improbable(result
)) {
314 user_access_enable();
315 result
= _copyout_atomic64(value
, (const char *)user_addr
);
316 user_access_disable();
321 copyinstr(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
, vm_size_t
*lencopied
)
324 vm_size_t bytes_copied
= 0;
327 if (__improbable(nbytes
== 0)) {
331 result
= copy_validate(user_addr
, (uintptr_t)kernel_addr
, nbytes
, COPYIO_IN
);
332 if (__improbable(result
)) {
335 user_access_enable();
336 result
= _bcopyinstr((const char *)user_addr
, kernel_addr
, nbytes
,
338 user_access_disable();
339 if (result
!= EFAULT
) {
340 *lencopied
= bytes_copied
;
346 copyout(const void *kernel_addr
, user_addr_t user_addr
, vm_size_t nbytes
)
354 result
= copy_validate(user_addr
, (uintptr_t)kernel_addr
, nbytes
,
355 COPYIO_OUT
| COPYIO_ALLOW_KERNEL_TO_KERNEL
);
356 if (result
== EXDEV
) {
357 return copyout_kern(kernel_addr
, user_addr
, nbytes
);
359 if (__improbable(result
)) {
362 user_access_enable();
363 result
= _bcopyout(kernel_addr
, (char *)user_addr
, nbytes
);
364 user_access_disable();
369 copyoutstr_prevalidate(const void *__unused kaddr
, user_addr_t __unused uaddr
, size_t __unused len
)
371 if (__improbable(is_kernel_to_kernel_copy())) {