]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2012-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/cpu_data_internal.h> | |
30 | #include <arm/misc_protos.h> | |
31 | #include <kern/thread.h> | |
32 | #include <kern/zalloc_internal.h> | |
33 | #include <sys/errno.h> | |
34 | #include <vm/pmap.h> | |
35 | #include <vm/vm_map.h> | |
36 | #include <san/kasan.h> | |
37 | ||
38 | #undef copyin | |
39 | #undef copyout | |
40 | ||
41 | extern int _bcopyin(const char *src, char *dst, vm_size_t len); | |
42 | extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual); | |
43 | extern int _bcopyout(const char *src, char *dst, vm_size_t len); | |
44 | extern int _copyin_atomic32(const char *src, uint32_t *dst); | |
45 | extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst); | |
46 | extern int _copyin_atomic64(const char *src, uint64_t *dst); | |
47 | extern int _copyout_atomic32(uint32_t u32, const char *dst); | |
48 | extern int _copyout_atomic64(uint64_t u64, const char *dst); | |
49 | ||
50 | extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len); | |
51 | ||
52 | extern const vm_map_address_t physmap_base; | |
53 | extern const vm_map_address_t physmap_end; | |
54 | ||
55 | /*! | |
56 | * @typedef copyio_flags_t | |
57 | * | |
58 | * @const COPYIO_IN | |
59 | * The copy is user -> kernel. | |
60 | * One of COPYIO_IN or COPYIO_OUT should always be specified. | |
61 | * | |
62 | * @const COPYIO_OUT | |
63 | * The copy is kernel -> user | |
64 | * One of COPYIO_IN or COPYIO_OUT should always be specified. | |
65 | * | |
66 | * @const COPYIO_ALLOW_KERNEL_TO_KERNEL | |
67 | * The "user_address" is allowed to be in the VA space of the kernel. | |
68 | * | |
69 | * @const COPYIO_VALIDATE_USER_ONLY | |
70 | * There isn't really a kernel address used, and only the user address | |
71 | * needs to be validated. | |
72 | * | |
73 | * @const COPYIO_ATOMIC | |
74 | * The copyio operation is atomic, ensure that it is properly aligned. | |
75 | */ | |
76 | __options_decl(copyio_flags_t, uint32_t, { | |
77 | COPYIO_IN = 0x0001, | |
78 | COPYIO_OUT = 0x0002, | |
79 | COPYIO_ALLOW_KERNEL_TO_KERNEL = 0x0004, | |
80 | COPYIO_VALIDATE_USER_ONLY = 0x0008, | |
81 | COPYIO_ATOMIC = 0x0010, | |
82 | }); | |
83 | ||
84 | static inline void | |
85 | user_access_enable(void) | |
86 | { | |
87 | #if __ARM_PAN_AVAILABLE__ | |
88 | assert(__builtin_arm_rsr("pan") != 0); | |
89 | __builtin_arm_wsr("pan", 0); | |
90 | #endif /* __ARM_PAN_AVAILABLE__ */ | |
91 | } | |
92 | ||
93 | static inline void | |
94 | user_access_disable(void) | |
95 | { | |
96 | #if __ARM_PAN_AVAILABLE__ | |
97 | __builtin_arm_wsr("pan", 1); | |
98 | #endif /* __ARM_PAN_AVAILABLE__ */ | |
99 | } | |
100 | ||
101 | /* | |
102 | * Copy sizes bigger than this value will cause a kernel panic. | |
103 | * | |
104 | * Yes, this is an arbitrary fixed limit, but it's almost certainly | |
105 | * a programming error to be copying more than this amount between | |
106 | * user and wired kernel memory in a single invocation on this | |
107 | * platform. | |
108 | */ | |
109 | const int copysize_limit_panic = (64 * 1024 * 1024); | |
110 | ||
111 | static inline bool | |
112 | is_kernel_to_kernel_copy() | |
113 | { | |
114 | return current_thread()->map->pmap == kernel_pmap; | |
115 | } | |
116 | ||
117 | /* | |
118 | * Validate the arguments to copy{in,out} on this platform. | |
119 | * | |
120 | * Returns EXDEV when the current thread pmap is the kernel's | |
121 | * which is non fatal for certain routines. | |
122 | */ | |
123 | static int | |
124 | copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr, | |
125 | vm_size_t nbytes, copyio_flags_t flags) | |
126 | { | |
127 | thread_t self = current_thread(); | |
128 | ||
129 | user_addr_t user_addr_last; | |
130 | uintptr_t kernel_addr_last; | |
131 | ||
132 | if (__improbable(nbytes > copysize_limit_panic)) { | |
133 | panic("%s(%p, %p, %lu) - transfer too large", __func__, | |
134 | (void *)user_addr, (void *)kernel_addr, nbytes); | |
135 | } | |
136 | ||
137 | if (__improbable((user_addr < vm_map_min(self->map)) || | |
138 | os_add_overflow(user_addr, nbytes, &user_addr_last) || | |
139 | (user_addr_last > vm_map_max(self->map)))) { | |
140 | return EFAULT; | |
141 | } | |
142 | ||
143 | if (flags & COPYIO_ATOMIC) { | |
144 | if (__improbable(user_addr & (nbytes - 1))) { | |
145 | return EINVAL; | |
146 | } | |
147 | } | |
148 | ||
149 | if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) { | |
150 | if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) { | |
151 | panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__, | |
152 | (void *)user_addr, (void *)kernel_addr, nbytes); | |
153 | } | |
154 | ||
155 | bool in_kva = (kernel_addr >= VM_MIN_KERNEL_ADDRESS) && (kernel_addr_last <= VM_MAX_KERNEL_ADDRESS); | |
156 | bool in_physmap = (kernel_addr >= physmap_base) && (kernel_addr_last <= physmap_end); | |
157 | ||
158 | if (__improbable(!(in_kva || in_physmap))) { | |
159 | panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__, | |
160 | (void *)user_addr, (void *)kernel_addr, nbytes); | |
161 | } | |
162 | } | |
163 | ||
164 | if (is_kernel_to_kernel_copy()) { | |
165 | if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) { | |
166 | return EFAULT; | |
167 | } | |
168 | return EXDEV; | |
169 | } | |
170 | ||
171 | if (__improbable(user_addr & TBI_MASK)) { | |
172 | return EINVAL; | |
173 | } | |
174 | ||
175 | if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) { | |
176 | if (__probable(!zalloc_disable_copyio_check)) { | |
177 | zone_t src_zone = NULL; | |
178 | vm_size_t kernel_buf_size = zone_element_size((void *)kernel_addr, &src_zone); | |
179 | /* | |
180 | * Size of elements in the permanent zone is not saved as a part of the | |
181 | * zone's info | |
182 | */ | |
183 | if (__improbable(src_zone && !src_zone->z_permanent && | |
184 | kernel_buf_size < nbytes)) { | |
185 | panic("copyio_preflight: kernel buffer 0x%lx has size %lu < nbytes %lu", | |
186 | kernel_addr, kernel_buf_size, nbytes); | |
187 | } | |
188 | } | |
189 | ||
190 | #if KASAN | |
191 | /* For user copies, asan-check the kernel-side buffer */ | |
192 | if (flags & COPYIO_IN) { | |
193 | __asan_storeN(kernel_addr, nbytes); | |
194 | } else { | |
195 | __asan_loadN(kernel_addr, nbytes); | |
196 | kasan_check_uninitialized((vm_address_t)kernel_addr, nbytes); | |
197 | } | |
198 | #endif | |
199 | } | |
200 | return 0; | |
201 | } | |
202 | ||
203 | int | |
204 | copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
205 | { | |
206 | bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes); | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | int | |
212 | copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
213 | { | |
214 | bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes); | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | int | |
220 | copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes) | |
221 | { | |
222 | int result; | |
223 | ||
224 | if (__improbable(nbytes == 0)) { | |
225 | return 0; | |
226 | } | |
227 | ||
228 | result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, | |
229 | COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL); | |
230 | if (result == EXDEV) { | |
231 | return copyin_kern(user_addr, kernel_addr, nbytes); | |
232 | } | |
233 | if (__improbable(result)) { | |
234 | return result; | |
235 | } | |
236 | ||
237 | user_access_enable(); | |
238 | result = _bcopyin((const char *)user_addr, kernel_addr, nbytes); | |
239 | user_access_disable(); | |
240 | return result; | |
241 | } | |
242 | ||
243 | /* | |
244 | * copy{in,out}_atomic{32,64} | |
245 | * Read or store an aligned value from userspace as a single memory transaction. | |
246 | * These functions support userspace synchronization features | |
247 | */ | |
248 | int | |
249 | copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr) | |
250 | { | |
251 | int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 4, | |
252 | COPYIO_IN | COPYIO_ATOMIC); | |
253 | if (__improbable(result)) { | |
254 | return result; | |
255 | } | |
256 | user_access_enable(); | |
257 | result = _copyin_atomic32((const char *)user_addr, kernel_addr); | |
258 | user_access_disable(); | |
259 | return result; | |
260 | } | |
261 | ||
262 | int | |
263 | copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value) | |
264 | { | |
265 | int result = copy_validate(user_addr, 0, 4, | |
266 | COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY); | |
267 | if (__improbable(result)) { | |
268 | return result; | |
269 | } | |
270 | user_access_enable(); | |
271 | result = _copyin_atomic32_wait_if_equals((const char *)user_addr, value); | |
272 | user_access_disable(); | |
273 | return result; | |
274 | } | |
275 | ||
276 | int | |
277 | copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr) | |
278 | { | |
279 | int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 8, | |
280 | COPYIO_IN | COPYIO_ATOMIC); | |
281 | if (__improbable(result)) { | |
282 | return result; | |
283 | } | |
284 | user_access_enable(); | |
285 | result = _copyin_atomic64((const char *)user_addr, kernel_addr); | |
286 | user_access_disable(); | |
287 | return result; | |
288 | } | |
289 | ||
290 | int | |
291 | copyout_atomic32(uint32_t value, user_addr_t user_addr) | |
292 | { | |
293 | int result = copy_validate(user_addr, 0, 4, | |
294 | COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY); | |
295 | if (__improbable(result)) { | |
296 | return result; | |
297 | } | |
298 | user_access_enable(); | |
299 | result = _copyout_atomic32(value, (const char *)user_addr); | |
300 | user_access_disable(); | |
301 | return result; | |
302 | } | |
303 | ||
304 | int | |
305 | copyout_atomic64(uint64_t value, user_addr_t user_addr) | |
306 | { | |
307 | int result = copy_validate(user_addr, 0, 8, | |
308 | COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY); | |
309 | if (__improbable(result)) { | |
310 | return result; | |
311 | } | |
312 | user_access_enable(); | |
313 | result = _copyout_atomic64(value, (const char *)user_addr); | |
314 | user_access_disable(); | |
315 | return result; | |
316 | } | |
317 | ||
318 | int | |
319 | copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied) | |
320 | { | |
321 | int result; | |
322 | vm_size_t bytes_copied = 0; | |
323 | ||
324 | *lencopied = 0; | |
325 | if (__improbable(nbytes == 0)) { | |
326 | return ENAMETOOLONG; | |
327 | } | |
328 | ||
329 | result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN); | |
330 | if (__improbable(result)) { | |
331 | return result; | |
332 | } | |
333 | user_access_enable(); | |
334 | result = _bcopyinstr((const char *)user_addr, kernel_addr, nbytes, | |
335 | &bytes_copied); | |
336 | user_access_disable(); | |
337 | if (result != EFAULT) { | |
338 | *lencopied = bytes_copied; | |
339 | } | |
340 | return result; | |
341 | } | |
342 | ||
343 | int | |
344 | copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
345 | { | |
346 | int result; | |
347 | ||
348 | if (nbytes == 0) { | |
349 | return 0; | |
350 | } | |
351 | ||
352 | result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, | |
353 | COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL); | |
354 | if (result == EXDEV) { | |
355 | return copyout_kern(kernel_addr, user_addr, nbytes); | |
356 | } | |
357 | if (__improbable(result)) { | |
358 | return result; | |
359 | } | |
360 | user_access_enable(); | |
361 | result = _bcopyout(kernel_addr, (char *)user_addr, nbytes); | |
362 | user_access_disable(); | |
363 | return result; | |
364 | } | |
365 | ||
366 | int | |
367 | copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len) | |
368 | { | |
369 | if (__improbable(is_kernel_to_kernel_copy())) { | |
370 | return EFAULT; | |
371 | } | |
372 | ||
373 | return 0; | |
374 | } |