]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/copyio.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / x86_64 / copyio.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach_assert.h>
29
30 #include <sys/errno.h>
31 #include <i386/param.h>
32 #include <i386/misc_protos.h>
33 #include <i386/cpu_data.h>
34 #include <i386/machine_routines.h>
35 #include <i386/cpuid.h>
36 #include <i386/vmx.h>
37 #include <vm/pmap.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_fault.h>
41
42 #include <sys/kdebug.h>
43
44 static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int);
45 static int copyio_phys(addr64_t, addr64_t, vm_size_t, int);
46
47 /*
48 * Copy sizes bigger than this value will cause a kernel panic.
49 *
50 * Yes, this is an arbitrary fixed limit, but it's almost certainly
51 * a programming error to be copying more than this amount between
52 * user and wired kernel memory in a single invocation on this
53 * platform.
54 */
55 #define COPYSIZELIMIT_PANIC (64*MB)
56
57 /*
58 * The copy engine has the following characteristics
59 * - copyio() handles copies to/from user or kernel space
60 * - copypv() deals with physical or virtual addresses
61 *
62 * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
63 * point describing the full glory of the copy window implementation. In K64,
64 * however, there is no need for windowing. Thanks to the vast shared address
65 * space, the kernel has direct access to userspace and to physical memory.
66 *
67 * User virtual addresses are accessible provided the user's cr3 is loaded.
68 * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
69 * translation.
70 *
71 * Copyin/out variants all boil done to just these 2 routines in locore.s which
72 * provide fault-recoverable copying:
73 */
74 extern int _bcopy(const void *, void *, vm_size_t);
75 extern int _bcopystr(const void *, void *, vm_size_t, vm_size_t *);
76
77
78 /*
79 * Types of copies:
80 */
81 #define COPYIN 0 /* from user virtual to kernel virtual */
82 #define COPYOUT 1 /* from kernel virtual to user virtual */
83 #define COPYINSTR 2 /* string variant of copyout */
84 #define COPYINPHYS 3 /* from user virtual to kernel physical */
85 #define COPYOUTPHYS 4 /* from kernel physical to user virtual */
86
87 #if DEVELOPMENT
88 typedef struct {
89 uint64_t timestamp;
90 thread_t thread;
91 uintptr_t cr4;
92 uint8_t cpuid;
93 uint8_t smap_state;
94 uint8_t copyio_active;
95 } smaplog_entry_t;
96
97 #define SMAPLOG_BUFFER_SIZE (50)
98 static smaplog_entry_t smaplog_cbuf[SMAPLOG_BUFFER_SIZE];
99 static uint32_t smaplog_head = 0;
100
101 static void
102 smaplog_add_entry(boolean_t enabling)
103 {
104 uint32_t index = 0;
105 thread_t thread = current_thread();
106
107 do {
108 index = smaplog_head;
109 } while (!OSCompareAndSwap(index, (index + 1) % SMAPLOG_BUFFER_SIZE, &smaplog_head));
110
111 assert(index < SMAPLOG_BUFFER_SIZE);
112 assert(smaplog_head < SMAPLOG_BUFFER_SIZE);
113 assert(thread);
114
115 smaplog_cbuf[index].timestamp = mach_absolute_time();
116 smaplog_cbuf[index].thread = thread;
117 smaplog_cbuf[index].cpuid = cpu_number();
118 smaplog_cbuf[index].cr4 = get_cr4();
119 smaplog_cbuf[index].smap_state = enabling;
120 smaplog_cbuf[index].copyio_active = (thread->machine.specFlags & CopyIOActive) ? 1 : 0;
121 }
122 #endif /* DEVELOPMENT */
123
124 extern boolean_t pmap_smap_enabled;
125 static inline void user_access_enable(void) {
126 if (pmap_smap_enabled) {
127 stac();
128 #if DEVELOPMENT
129 smaplog_add_entry(TRUE);
130 #endif
131 }
132 }
133 static inline void user_access_disable(void) {
134 if (pmap_smap_enabled) {
135 clac();
136 #if DEVELOPMENT
137 smaplog_add_entry(FALSE);
138 #endif
139 }
140 }
141
142 static int
143 copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
144 vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
145 {
146 thread_t thread;
147 pmap_t pmap;
148 vm_size_t bytes_copied;
149 int error = 0;
150 boolean_t istate = FALSE;
151 boolean_t recursive_CopyIOActive;
152 #if KDEBUG
153 int debug_type = 0xeff70010;
154 debug_type += (copy_type << 2);
155 #endif
156
157 assert(nbytes < COPYSIZELIMIT_PANIC);
158
159 thread = current_thread();
160
161 KERNEL_DEBUG(debug_type | DBG_FUNC_START,
162 (unsigned)(user_addr >> 32), (unsigned)user_addr,
163 nbytes, thread->machine.copyio_state, 0);
164
165 if (nbytes == 0)
166 goto out;
167
168 pmap = thread->map->pmap;
169
170 if ((copy_type != COPYINPHYS) && (copy_type != COPYOUTPHYS) && ((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) {
171 panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type, kernel_addr);
172 }
173
174 /* Sanity and security check for addresses to/from a user */
175
176 if (((pmap != kernel_pmap) && (use_kernel_map == 0)) &&
177 ((nbytes && (user_addr+nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map)))) {
178 error = EFAULT;
179 goto out;
180 }
181
182 /*
183 * If the no_shared_cr3 boot-arg is set (true), the kernel runs on
184 * its own pmap and cr3 rather than the user's -- so that wild accesses
185 * from kernel or kexts can be trapped. So, during copyin and copyout,
186 * we need to switch back to the user's map/cr3. The thread is flagged
187 * "CopyIOActive" at this time so that if the thread is pre-empted,
188 * we will later restore the correct cr3.
189 */
190 recursive_CopyIOActive = thread->machine.specFlags & CopyIOActive;
191 thread->machine.specFlags |= CopyIOActive;
192 user_access_enable();
193 if (no_shared_cr3) {
194 istate = ml_set_interrupts_enabled(FALSE);
195 if (get_cr3_base() != pmap->pm_cr3)
196 set_cr3_raw(pmap->pm_cr3);
197 }
198
199 /*
200 * Ensure that we're running on the target thread's cr3.
201 */
202 if ((pmap != kernel_pmap) && !use_kernel_map &&
203 (get_cr3_base() != pmap->pm_cr3)) {
204 panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
205 copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map,
206 (void *) get_cr3_raw(), (void *) pmap->pm_cr3);
207 }
208 if (no_shared_cr3)
209 (void) ml_set_interrupts_enabled(istate);
210
211 KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE, (unsigned)user_addr,
212 (unsigned)kernel_addr, nbytes, 0, 0);
213
214 switch (copy_type) {
215
216 case COPYIN:
217 error = _bcopy((const void *) user_addr,
218 kernel_addr,
219 nbytes);
220 break;
221
222 case COPYOUT:
223 error = _bcopy(kernel_addr,
224 (void *) user_addr,
225 nbytes);
226 break;
227
228 case COPYINPHYS:
229 error = _bcopy((const void *) user_addr,
230 PHYSMAP_PTOV(kernel_addr),
231 nbytes);
232 break;
233
234 case COPYOUTPHYS:
235 error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr),
236 (void *) user_addr,
237 nbytes);
238 break;
239
240 case COPYINSTR:
241 error = _bcopystr((const void *) user_addr,
242 kernel_addr,
243 (int) nbytes,
244 &bytes_copied);
245
246 /*
247 * lencopied should be updated on success
248 * or ENAMETOOLONG... but not EFAULT
249 */
250 if (error != EFAULT)
251 *lencopied = bytes_copied;
252
253 if (error) {
254 #if KDEBUG
255 nbytes = *lencopied;
256 #endif
257 break;
258 }
259 if (*(kernel_addr + bytes_copied - 1) == 0) {
260 /*
261 * we found a NULL terminator... we're done
262 */
263 #if KDEBUG
264 nbytes = *lencopied;
265 #endif
266 break;
267 } else {
268 /*
269 * no more room in the buffer and we haven't
270 * yet come across a NULL terminator
271 */
272 #if KDEBUG
273 nbytes = *lencopied;
274 #endif
275 error = ENAMETOOLONG;
276 break;
277 }
278 break;
279 }
280
281 user_access_disable();
282 if (!recursive_CopyIOActive) {
283 thread->machine.specFlags &= ~CopyIOActive;
284 }
285 if (no_shared_cr3) {
286 istate = ml_set_interrupts_enabled(FALSE);
287 if (get_cr3_raw() != kernel_pmap->pm_cr3)
288 set_cr3_raw(kernel_pmap->pm_cr3);
289 (void) ml_set_interrupts_enabled(istate);
290 }
291
292 out:
293 KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)user_addr,
294 (unsigned)kernel_addr, (unsigned)nbytes, error, 0);
295
296 return (error);
297 }
298
299
300 static int
301 copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which)
302 {
303 char *paddr;
304 user_addr_t vaddr;
305 int ctype;
306
307 if (which & cppvPsnk) {
308 paddr = (char *)sink;
309 vaddr = (user_addr_t)source;
310 ctype = COPYINPHYS;
311 } else {
312 paddr = (char *)source;
313 vaddr = (user_addr_t)sink;
314 ctype = COPYOUTPHYS;
315 }
316 return copyio(ctype, vaddr, paddr, csize, NULL, which & cppvKmap);
317 }
318
319 int
320 copyinmsg(const user_addr_t user_addr, char *kernel_addr, mach_msg_size_t nbytes)
321 {
322 return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
323 }
324
325 int
326 copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
327 {
328 return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
329 }
330
331 int
332 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
333 {
334 *lencopied = 0;
335
336 return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0);
337 }
338
339 int
340 copyoutmsg(const char *kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes)
341 {
342 return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
343 }
344
345 int
346 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
347 {
348 return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
349 }
350
351
352 kern_return_t
353 copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which)
354 {
355 unsigned int lop, csize;
356 int bothphys = 0;
357
358 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64,
359 (unsigned)snk64, size, which, 0);
360
361 if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */
362 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
363
364 if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk))
365 bothphys = 1; /* both are physical */
366
367 while (size) {
368
369 if (bothphys) {
370 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */
371
372 if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))))
373 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */
374 } else {
375 /*
376 * only need to compute the resid for the physical page
377 * address... we don't care about where we start/finish in
378 * the virtual since we just call the normal copyin/copyout
379 */
380 if (which & cppvPsrc)
381 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));
382 else
383 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));
384 }
385 csize = size; /* Assume we can copy it all */
386 if (lop < size)
387 csize = lop; /* Nope, we can't do it all */
388 #if 0
389 /*
390 * flush_dcache64 is currently a nop on the i386...
391 * it's used when copying to non-system memory such
392 * as video capture cards... on PPC there was a need
393 * to flush due to how we mapped this memory... not
394 * sure if it's needed on i386.
395 */
396 if (which & cppvFsrc)
397 flush_dcache64(src64, csize, 1); /* If requested, flush source before move */
398 if (which & cppvFsnk)
399 flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */
400 #endif
401 if (bothphys)
402 bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */
403 else {
404 if (copyio_phys(src64, snk64, csize, which))
405 return (KERN_FAILURE);
406 }
407 #if 0
408 if (which & cppvFsrc)
409 flush_dcache64(src64, csize, 1); /* If requested, flush source after move */
410 if (which & cppvFsnk)
411 flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */
412 #endif
413 size -= csize; /* Calculate what is left */
414 snk64 += csize; /* Bump sink to next physical address */
415 src64 += csize; /* Bump source to next physical address */
416 }
417 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64,
418 (unsigned)snk64, size, which, 0);
419
420 return KERN_SUCCESS;
421 }