]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/copyio.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / x86_64 / copyio.c
CommitLineData
6d2010ae 1/*
39037602 2 * Copyright (c) 2009-2016 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach_assert.h>
29
30#include <sys/errno.h>
31#include <i386/param.h>
32#include <i386/misc_protos.h>
33#include <i386/cpu_data.h>
34#include <i386/machine_routines.h>
35#include <i386/cpuid.h>
36#include <i386/vmx.h>
37#include <vm/pmap.h>
38#include <vm/vm_map.h>
39#include <vm/vm_kern.h>
40#include <vm/vm_fault.h>
5ba3f43e 41#include <san/kasan.h>
6d2010ae
A
42
43#include <sys/kdebug.h>
44
5ba3f43e
A
45#include <kern/copyout_shim.h>
46
47
48
6d2010ae
A
49static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int);
50static int copyio_phys(addr64_t, addr64_t, vm_size_t, int);
51
3e170ce0
A
52/*
53 * Copy sizes bigger than this value will cause a kernel panic.
54 *
55 * Yes, this is an arbitrary fixed limit, but it's almost certainly
56 * a programming error to be copying more than this amount between
57 * user and wired kernel memory in a single invocation on this
58 * platform.
59 */
5ba3f43e 60const int copysize_limit_panic = (64 * MB);
3e170ce0 61
6d2010ae
A
62/*
63 * The copy engine has the following characteristics
64 * - copyio() handles copies to/from user or kernel space
65 * - copypv() deals with physical or virtual addresses
66 *
67 * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
68 * point describing the full glory of the copy window implementation. In K64,
69 * however, there is no need for windowing. Thanks to the vast shared address
70 * space, the kernel has direct access to userspace and to physical memory.
71 *
72 * User virtual addresses are accessible provided the user's cr3 is loaded.
73 * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
74 * translation.
75 *
76 * Copyin/out variants all boil done to just these 2 routines in locore.s which
77 * provide fault-recoverable copying:
78 */
79extern int _bcopy(const void *, void *, vm_size_t);
80extern int _bcopystr(const void *, void *, vm_size_t, vm_size_t *);
39037602 81extern int _copyin_word(const char *src, uint64_t *dst, vm_size_t len);
6d2010ae
A
82
83
84/*
85 * Types of copies:
86 */
87#define COPYIN 0 /* from user virtual to kernel virtual */
88#define COPYOUT 1 /* from kernel virtual to user virtual */
89#define COPYINSTR 2 /* string variant of copyout */
90#define COPYINPHYS 3 /* from user virtual to kernel physical */
91#define COPYOUTPHYS 4 /* from kernel physical to user virtual */
39037602 92#define COPYINWORD 5 /* from user virtual to kernel virtual */
6d2010ae 93
39037602 94#if ENABLE_SMAPLOG
04b8595b
A
95typedef struct {
96 uint64_t timestamp;
97 thread_t thread;
98 uintptr_t cr4;
99 uint8_t cpuid;
100 uint8_t smap_state;
101 uint8_t copyio_active;
102} smaplog_entry_t;
103
104#define SMAPLOG_BUFFER_SIZE (50)
105static smaplog_entry_t smaplog_cbuf[SMAPLOG_BUFFER_SIZE];
106static uint32_t smaplog_head = 0;
107
108static void
109smaplog_add_entry(boolean_t enabling)
110{
111 uint32_t index = 0;
112 thread_t thread = current_thread();
113
114 do {
115 index = smaplog_head;
116 } while (!OSCompareAndSwap(index, (index + 1) % SMAPLOG_BUFFER_SIZE, &smaplog_head));
117
118 assert(index < SMAPLOG_BUFFER_SIZE);
119 assert(smaplog_head < SMAPLOG_BUFFER_SIZE);
120 assert(thread);
121
122 smaplog_cbuf[index].timestamp = mach_absolute_time();
123 smaplog_cbuf[index].thread = thread;
124 smaplog_cbuf[index].cpuid = cpu_number();
125 smaplog_cbuf[index].cr4 = get_cr4();
126 smaplog_cbuf[index].smap_state = enabling;
127 smaplog_cbuf[index].copyio_active = (thread->machine.specFlags & CopyIOActive) ? 1 : 0;
128}
39037602 129#endif /* ENABLE_SMAPLOG */
04b8595b
A
130
131extern boolean_t pmap_smap_enabled;
132static inline void user_access_enable(void) {
133 if (pmap_smap_enabled) {
134 stac();
39037602 135#if ENABLE_SMAPLOG
04b8595b
A
136 smaplog_add_entry(TRUE);
137#endif
138 }
139}
140static inline void user_access_disable(void) {
141 if (pmap_smap_enabled) {
142 clac();
39037602 143#if ENABLE_SMAPLOG
04b8595b
A
144 smaplog_add_entry(FALSE);
145#endif
146 }
147}
a1c7dba1 148
39037602
A
149#if COPYIO_TRACE_ENABLED
150#define COPYIO_TRACE(x, a, b, c, d, e) KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e)
151#else
152#define COPYIO_TRACE(x, a, b, c, d, e) do { } while(0)
153#endif
154
6d2010ae
A
155static int
156copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
157 vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
158{
39037602 159 thread_t thread = current_thread();
6d2010ae
A
160 pmap_t pmap;
161 vm_size_t bytes_copied;
162 int error = 0;
163 boolean_t istate = FALSE;
164 boolean_t recursive_CopyIOActive;
39037602 165#if COPYIO_TRACE_ENABLED
6d2010ae
A
166 int debug_type = 0xeff70010;
167 debug_type += (copy_type << 2);
168#endif
169
5ba3f43e
A
170 if (__improbable(nbytes > copysize_limit_panic))
171 panic("%s(%p, %p, %lu) - transfer too large", __func__,
172 (void *)user_addr, (void *)kernel_addr, nbytes);
3e170ce0 173
39037602
A
174 COPYIO_TRACE(debug_type | DBG_FUNC_START,
175 user_addr, kernel_addr, nbytes, use_kernel_map, 0);
6d2010ae 176
39037602 177 if (__improbable(nbytes == 0))
6d2010ae
A
178 goto out;
179
180 pmap = thread->map->pmap;
5c9f4661 181 boolean_t nopagezero = pmap->pagezero_accessible;
6d2010ae 182
39037602 183 if (__improbable((copy_type != COPYINPHYS) && (copy_type != COPYOUTPHYS) && ((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS))) {
6d2010ae
A
184 panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type, kernel_addr);
185 }
186
187 /* Sanity and security check for addresses to/from a user */
188
39037602
A
189 if (__improbable(((pmap != kernel_pmap) && (use_kernel_map == 0)) &&
190 ((nbytes && (user_addr+nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map))))) {
6d2010ae
A
191 error = EFAULT;
192 goto out;
193 }
194
5ba3f43e
A
195#if KASAN
196 if (copy_type == COPYIN || copy_type == COPYINSTR || copy_type == COPYINWORD) {
197 __asan_storeN((uptr)kernel_addr, nbytes);
198 } else if (copy_type == COPYOUT) {
199 __asan_loadN((uptr)kernel_addr, nbytes);
200 }
201#endif
202
6d2010ae
A
203 /*
204 * If the no_shared_cr3 boot-arg is set (true), the kernel runs on
205 * its own pmap and cr3 rather than the user's -- so that wild accesses
206 * from kernel or kexts can be trapped. So, during copyin and copyout,
207 * we need to switch back to the user's map/cr3. The thread is flagged
208 * "CopyIOActive" at this time so that if the thread is pre-empted,
209 * we will later restore the correct cr3.
210 */
211 recursive_CopyIOActive = thread->machine.specFlags & CopyIOActive;
39037602
A
212
213 boolean_t pdswitch = no_shared_cr3 || nopagezero;
214
215 if (__improbable(pdswitch)) {
6d2010ae 216 istate = ml_set_interrupts_enabled(FALSE);
39037602
A
217 if (nopagezero && pmap_pcid_ncpus) {
218 pmap_pcid_activate(pmap, cpu_number(), TRUE, TRUE);
219 } else if (get_cr3_base() != pmap->pm_cr3) {
6d2010ae 220 set_cr3_raw(pmap->pm_cr3);
39037602
A
221 }
222 thread->machine.specFlags |= CopyIOActive;
223 } else {
224 thread->machine.specFlags |= CopyIOActive;
6d2010ae
A
225 }
226
39037602
A
227 user_access_enable();
228
229#if DEVELOPMENT || DEBUG
6d2010ae
A
230 /*
231 * Ensure that we're running on the target thread's cr3.
232 */
233 if ((pmap != kernel_pmap) && !use_kernel_map &&
234 (get_cr3_base() != pmap->pm_cr3)) {
235 panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
236 copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map,
237 (void *) get_cr3_raw(), (void *) pmap->pm_cr3);
238 }
39037602
A
239#endif
240
241 if (__improbable(pdswitch)) {
6d2010ae 242 (void) ml_set_interrupts_enabled(istate);
39037602 243 }
6d2010ae 244
39037602
A
245 COPYIO_TRACE(0xeff70044 | DBG_FUNC_NONE, user_addr,
246 kernel_addr, nbytes, 0, 0);
6d2010ae
A
247
248 switch (copy_type) {
249
250 case COPYIN:
251 error = _bcopy((const void *) user_addr,
252 kernel_addr,
253 nbytes);
254 break;
255
256 case COPYOUT:
257 error = _bcopy(kernel_addr,
258 (void *) user_addr,
259 nbytes);
260 break;
261
262 case COPYINPHYS:
263 error = _bcopy((const void *) user_addr,
264 PHYSMAP_PTOV(kernel_addr),
265 nbytes);
266 break;
267
268 case COPYOUTPHYS:
269 error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr),
270 (void *) user_addr,
271 nbytes);
272 break;
273
39037602
A
274 case COPYINWORD:
275 error = _copyin_word((const void *) user_addr,
276 (void *) kernel_addr,
277 nbytes);
278 break;
279
6d2010ae
A
280 case COPYINSTR:
281 error = _bcopystr((const void *) user_addr,
282 kernel_addr,
283 (int) nbytes,
284 &bytes_copied);
285
286 /*
287 * lencopied should be updated on success
288 * or ENAMETOOLONG... but not EFAULT
289 */
290 if (error != EFAULT)
291 *lencopied = bytes_copied;
292
293 if (error) {
294#if KDEBUG
295 nbytes = *lencopied;
296#endif
297 break;
298 }
299 if (*(kernel_addr + bytes_copied - 1) == 0) {
300 /*
301 * we found a NULL terminator... we're done
302 */
303#if KDEBUG
304 nbytes = *lencopied;
305#endif
306 break;
307 } else {
308 /*
309 * no more room in the buffer and we haven't
310 * yet come across a NULL terminator
311 */
312#if KDEBUG
313 nbytes = *lencopied;
314#endif
315 error = ENAMETOOLONG;
316 break;
317 }
6d2010ae
A
318 }
319
04b8595b 320 user_access_disable();
39037602
A
321
322 if (__improbable(pdswitch)) {
6d2010ae 323 istate = ml_set_interrupts_enabled(FALSE);
39037602
A
324 if (!recursive_CopyIOActive && (get_cr3_raw() != kernel_pmap->pm_cr3)) {
325 if (nopagezero && pmap_pcid_ncpus) {
326 pmap_pcid_activate(pmap, cpu_number(), TRUE, FALSE);
327 } else {
328 set_cr3_raw(kernel_pmap->pm_cr3);
329 }
330 }
331
332 if (!recursive_CopyIOActive) {
333 thread->machine.specFlags &= ~CopyIOActive;
334 }
6d2010ae 335 (void) ml_set_interrupts_enabled(istate);
39037602
A
336 } else if (!recursive_CopyIOActive) {
337 thread->machine.specFlags &= ~CopyIOActive;
6d2010ae
A
338 }
339
340out:
39037602 341 COPYIO_TRACE(debug_type | DBG_FUNC_END, user_addr, kernel_addr, nbytes, error, 0);
6d2010ae
A
342
343 return (error);
344}
345
346
347static int
348copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which)
349{
350 char *paddr;
351 user_addr_t vaddr;
352 int ctype;
353
354 if (which & cppvPsnk) {
355 paddr = (char *)sink;
356 vaddr = (user_addr_t)source;
357 ctype = COPYINPHYS;
358 } else {
359 paddr = (char *)source;
360 vaddr = (user_addr_t)sink;
361 ctype = COPYOUTPHYS;
5ba3f43e 362 CALL_COPYOUT_SHIM_PHYS((void *)PHYSMAP_PTOV(source),sink,csize)
6d2010ae
A
363 }
364 return copyio(ctype, vaddr, paddr, csize, NULL, which & cppvKmap);
365}
366
367int
368copyinmsg(const user_addr_t user_addr, char *kernel_addr, mach_msg_size_t nbytes)
369{
370 return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
371}
372
373int
374copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
375{
376 return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
377}
378
39037602
A
379/*
380 * copyin_word
381 * Read an aligned value from userspace as a single memory transaction.
382 * This function supports userspace synchronization features
383 */
384int
385copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes)
386{
387 /* Verify sizes */
388 if ((nbytes != 4) && (nbytes != 8))
389 return EINVAL;
390
391 /* Test alignment */
392 if (user_addr & (nbytes - 1))
393 return EINVAL;
394 return copyio(COPYINWORD, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
395}
396
6d2010ae
A
397int
398copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
399{
400 *lencopied = 0;
401
402 return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0);
403}
404
405int
406copyoutmsg(const char *kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes)
407{
5ba3f43e 408 CALL_COPYOUT_SHIM_MSG(kernel_addr,user_addr,(vm_size_t)nbytes)
6d2010ae
A
409 return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
410}
411
412int
413copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
414{
5ba3f43e 415 CALL_COPYOUT_SHIM_NRML(kernel_addr,user_addr,nbytes)
6d2010ae
A
416 return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
417}
418
419
420kern_return_t
421copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which)
422{
423 unsigned int lop, csize;
424 int bothphys = 0;
425
426 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64,
427 (unsigned)snk64, size, which, 0);
428
429 if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */
430 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
431
432 if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk))
433 bothphys = 1; /* both are physical */
434
435 while (size) {
436
437 if (bothphys) {
438 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */
439
440 if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))))
441 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */
442 } else {
443 /*
444 * only need to compute the resid for the physical page
445 * address... we don't care about where we start/finish in
446 * the virtual since we just call the normal copyin/copyout
447 */
448 if (which & cppvPsrc)
449 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));
450 else
451 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));
452 }
453 csize = size; /* Assume we can copy it all */
454 if (lop < size)
455 csize = lop; /* Nope, we can't do it all */
456#if 0
457 /*
458 * flush_dcache64 is currently a nop on the i386...
459 * it's used when copying to non-system memory such
460 * as video capture cards... on PPC there was a need
461 * to flush due to how we mapped this memory... not
462 * sure if it's needed on i386.
463 */
464 if (which & cppvFsrc)
465 flush_dcache64(src64, csize, 1); /* If requested, flush source before move */
466 if (which & cppvFsnk)
467 flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */
468#endif
469 if (bothphys)
470 bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */
471 else {
472 if (copyio_phys(src64, snk64, csize, which))
473 return (KERN_FAILURE);
474 }
475#if 0
476 if (which & cppvFsrc)
477 flush_dcache64(src64, csize, 1); /* If requested, flush source after move */
478 if (which & cppvFsnk)
479 flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */
480#endif
481 size -= csize; /* Calculate what is left */
482 snk64 += csize; /* Bump sink to next physical address */
483 src64 += csize; /* Bump source to next physical address */
484 }
485 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64,
486 (unsigned)snk64, size, which, 0);
487
488 return KERN_SUCCESS;
489}