]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/copyio.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / copyio.c
1 /*
2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <sys/errno.h>
33 #include <vm/pmap.h>
34 #include <vm/vm_map.h>
35 #include <san/kasan.h>
36
37 #undef copyin
38 #undef copyout
39
40 extern int _bcopyin(const char *src, char *dst, vm_size_t len);
41 extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual);
42 extern int _bcopyout(const char *src, char *dst, vm_size_t len);
43 extern int _copyin_atomic32(const char *src, uint32_t *dst);
44 extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst);
45 extern int _copyin_atomic64(const char *src, uint64_t *dst);
46 extern int _copyout_atomic32(uint32_t u32, const char *dst);
47 extern int _copyout_atomic64(uint64_t u64, const char *dst);
48
49 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
50
51 extern pmap_t kernel_pmap;
52
53 /* On by default, optionally disabled by boot-arg */
54 extern boolean_t copyio_zalloc_check;
55
56 /*!
57 * @typedef copyio_flags_t
58 *
59 * @const COPYIO_IN
60 * The copy is user -> kernel.
61 * One of COPYIO_IN or COPYIO_OUT should always be specified.
62 *
63 * @const COPYIO_OUT
64 * The copy is kernel -> user
65 * One of COPYIO_IN or COPYIO_OUT should always be specified.
66 *
67 * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
68 * The "user_address" is allowed to be in the VA space of the kernel.
69 *
70 * @const COPYIO_VALIDATE_USER_ONLY
71 * There isn't really a kernel address used, and only the user address
72 * needs to be validated.
73 *
74 * @const COPYIO_ATOMIC
75 * The copyio operation is atomic, ensure that it is properly aligned.
76 */
77 __options_decl(copyio_flags_t, uint32_t, {
78 COPYIO_IN = 0x0001,
79 COPYIO_OUT = 0x0002,
80 COPYIO_ALLOW_KERNEL_TO_KERNEL = 0x0004,
81 COPYIO_VALIDATE_USER_ONLY = 0x0008,
82 COPYIO_ATOMIC = 0x0010,
83 });
84
85 static inline void
86 user_access_enable(void)
87 {
88 #if __ARM_PAN_AVAILABLE__
89 assert(__builtin_arm_rsr("pan") != 0);
90 __builtin_arm_wsr("pan", 0);
91 #endif /* __ARM_PAN_AVAILABLE__ */
92 }
93
94 static inline void
95 user_access_disable(void)
96 {
97 #if __ARM_PAN_AVAILABLE__
98 __builtin_arm_wsr("pan", 1);
99 #endif /* __ARM_PAN_AVAILABLE__ */
100 }
101
102 /*
103 * Copy sizes bigger than this value will cause a kernel panic.
104 *
105 * Yes, this is an arbitrary fixed limit, but it's almost certainly
106 * a programming error to be copying more than this amount between
107 * user and wired kernel memory in a single invocation on this
108 * platform.
109 */
110 const int copysize_limit_panic = (64 * 1024 * 1024);
111
112 static inline bool
113 is_kernel_to_kernel_copy()
114 {
115 return current_thread()->map->pmap == kernel_pmap;
116 }
117
118 /*
119 * Validate the arguments to copy{in,out} on this platform.
120 *
121 * Returns EXDEV when the current thread pmap is the kernel's
122 * which is non fatal for certain routines.
123 */
124 static int
125 copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr,
126 vm_size_t nbytes, copyio_flags_t flags)
127 {
128 thread_t self = current_thread();
129
130 user_addr_t user_addr_last;
131 uintptr_t kernel_addr_last;
132
133 if (__improbable(nbytes > copysize_limit_panic)) {
134 panic("%s(%p, %p, %lu) - transfer too large", __func__,
135 (void *)user_addr, (void *)kernel_addr, nbytes);
136 }
137
138 if (__improbable((user_addr < vm_map_min(self->map)) ||
139 os_add_overflow(user_addr, nbytes, &user_addr_last) ||
140 (user_addr_last > vm_map_max(self->map)))) {
141 return EFAULT;
142 }
143
144 if (flags & COPYIO_ATOMIC) {
145 if (__improbable(user_addr & (nbytes - 1))) {
146 return EINVAL;
147 }
148 }
149
150 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
151 if (__improbable((kernel_addr < VM_MIN_KERNEL_ADDRESS) ||
152 os_add_overflow(kernel_addr, nbytes, &kernel_addr_last) ||
153 (kernel_addr_last > VM_MAX_KERNEL_ADDRESS))) {
154 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
155 (void *)user_addr, (void *)kernel_addr, nbytes);
156 }
157 }
158
159 if (is_kernel_to_kernel_copy()) {
160 if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
161 return EFAULT;
162 }
163 return EXDEV;
164 }
165
166 if (__improbable(user_addr & TBI_MASK)) {
167 return EINVAL;
168 }
169
170 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
171 if (__probable(copyio_zalloc_check)) {
172 vm_size_t kernel_buf_size = zone_element_size((void *)kernel_addr, NULL);
173 if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) {
174 panic("copyio_preflight: kernel buffer 0x%lx has size %lu < nbytes %lu",
175 kernel_addr, kernel_buf_size, nbytes);
176 }
177 }
178
179 #if KASAN
180 /* For user copies, asan-check the kernel-side buffer */
181 if (flags & COPYIO_IN) {
182 __asan_storeN(kernel_addr, nbytes);
183 } else {
184 __asan_loadN(kernel_addr, nbytes);
185 kasan_check_uninitialized((vm_address_t)kernel_addr, nbytes);
186 }
187 #endif
188 }
189 return 0;
190 }
191
192 int
193 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
194 {
195 bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
196
197 return 0;
198 }
199
200 int
201 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
202 {
203 bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
204
205 return 0;
206 }
207
208 int
209 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
210 {
211 int result;
212
213 if (__improbable(nbytes == 0)) {
214 return 0;
215 }
216
217 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
218 COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
219 if (result == EXDEV) {
220 return copyin_kern(user_addr, kernel_addr, nbytes);
221 }
222 if (__improbable(result)) {
223 return result;
224 }
225
226 user_access_enable();
227 result = _bcopyin((const char *)user_addr, kernel_addr, nbytes);
228 user_access_disable();
229 return result;
230 }
231
232 /*
233 * copy{in,out}_atomic{32,64}
234 * Read or store an aligned value from userspace as a single memory transaction.
235 * These functions support userspace synchronization features
236 */
237 int
238 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
239 {
240 int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 4,
241 COPYIO_IN | COPYIO_ATOMIC);
242 if (__improbable(result)) {
243 return result;
244 }
245 user_access_enable();
246 result = _copyin_atomic32((const char *)user_addr, kernel_addr);
247 user_access_disable();
248 return result;
249 }
250
251 int
252 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
253 {
254 int result = copy_validate(user_addr, 0, 4,
255 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
256 if (__improbable(result)) {
257 return result;
258 }
259 user_access_enable();
260 result = _copyin_atomic32_wait_if_equals((const char *)user_addr, value);
261 user_access_disable();
262 return result;
263 }
264
265 int
266 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
267 {
268 int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 8,
269 COPYIO_IN | COPYIO_ATOMIC);
270 if (__improbable(result)) {
271 return result;
272 }
273 user_access_enable();
274 result = _copyin_atomic64((const char *)user_addr, kernel_addr);
275 user_access_disable();
276 return result;
277 }
278
279 int
280 copyout_atomic32(uint32_t value, user_addr_t user_addr)
281 {
282 int result = copy_validate(user_addr, 0, 4,
283 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
284 if (__improbable(result)) {
285 return result;
286 }
287 user_access_enable();
288 result = _copyout_atomic32(value, (const char *)user_addr);
289 user_access_disable();
290 return result;
291 }
292
293 int
294 copyout_atomic64(uint64_t value, user_addr_t user_addr)
295 {
296 int result = copy_validate(user_addr, 0, 8,
297 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
298 if (__improbable(result)) {
299 return result;
300 }
301 user_access_enable();
302 result = _copyout_atomic64(value, (const char *)user_addr);
303 user_access_disable();
304 return result;
305 }
306
307 int
308 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
309 {
310 int result;
311 vm_size_t bytes_copied = 0;
312
313 *lencopied = 0;
314 if (__improbable(nbytes == 0)) {
315 return ENAMETOOLONG;
316 }
317
318 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
319 if (__improbable(result)) {
320 return result;
321 }
322 user_access_enable();
323 result = _bcopyinstr((const char *)user_addr, kernel_addr, nbytes,
324 &bytes_copied);
325 user_access_disable();
326 if (result != EFAULT) {
327 *lencopied = bytes_copied;
328 }
329 return result;
330 }
331
332 int
333 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
334 {
335 int result;
336
337 if (nbytes == 0) {
338 return 0;
339 }
340
341 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
342 COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
343 if (result == EXDEV) {
344 return copyout_kern(kernel_addr, user_addr, nbytes);
345 }
346 if (__improbable(result)) {
347 return result;
348 }
349 user_access_enable();
350 result = _bcopyout(kernel_addr, (char *)user_addr, nbytes);
351 user_access_disable();
352 return result;
353 }
354
355 int
356 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
357 {
358 if (__improbable(is_kernel_to_kernel_copy())) {
359 return EFAULT;
360 }
361
362 return 0;
363 }