]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/copyio.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / x86_64 / copyio.c
CommitLineData
6d2010ae 1/*
f427ee49 2 * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach_assert.h>
29
30#include <sys/errno.h>
31#include <i386/param.h>
32#include <i386/misc_protos.h>
33#include <i386/cpu_data.h>
cb323159 34#include <i386/machine_cpu.h>
6d2010ae
A
35#include <i386/machine_routines.h>
36#include <i386/cpuid.h>
37#include <i386/vmx.h>
38#include <vm/pmap.h>
39#include <vm/vm_map.h>
40#include <vm/vm_kern.h>
41#include <vm/vm_fault.h>
5ba3f43e 42#include <san/kasan.h>
6d2010ae
A
43
44#include <sys/kdebug.h>
45
5ba3f43e 46#include <kern/copyout_shim.h>
f427ee49 47#include <kern/zalloc_internal.h>
5ba3f43e 48
d9a64523
A
49#undef copyin
50#undef copyout
5ba3f43e 51
6d2010ae
A
52static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int);
53static int copyio_phys(addr64_t, addr64_t, vm_size_t, int);
54
3e170ce0
A
55/*
56 * Copy sizes bigger than this value will cause a kernel panic.
57 *
58 * Yes, this is an arbitrary fixed limit, but it's almost certainly
59 * a programming error to be copying more than this amount between
60 * user and wired kernel memory in a single invocation on this
61 * platform.
62 */
5ba3f43e 63const int copysize_limit_panic = (64 * MB);
3e170ce0 64
6d2010ae
A
65/*
66 * The copy engine has the following characteristics
67 * - copyio() handles copies to/from user or kernel space
68 * - copypv() deals with physical or virtual addresses
69 *
70 * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
71 * point describing the full glory of the copy window implementation. In K64,
72 * however, there is no need for windowing. Thanks to the vast shared address
73 * space, the kernel has direct access to userspace and to physical memory.
74 *
75 * User virtual addresses are accessible provided the user's cr3 is loaded.
76 * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
77 * translation.
78 *
79 * Copyin/out variants all boil done to just these 2 routines in locore.s which
80 * provide fault-recoverable copying:
81 */
82extern int _bcopy(const void *, void *, vm_size_t);
83extern int _bcopystr(const void *, void *, vm_size_t, vm_size_t *);
cb323159
A
84extern int _copyin_atomic32(const char *src, uint32_t *dst);
85extern int _copyin_atomic64(const char *src, uint64_t *dst);
86extern int _copyout_atomic32(const uint32_t *u32, char *src);
87extern int _copyout_atomic64(const uint64_t *u64, char *src);
6d2010ae 88
6d2010ae
A
89/*
90 * Types of copies:
91 */
0a7de745
A
92#define COPYIN 0 /* from user virtual to kernel virtual */
93#define COPYOUT 1 /* from kernel virtual to user virtual */
94#define COPYINSTR 2 /* string variant of copyout */
95#define COPYINPHYS 3 /* from user virtual to kernel physical */
96#define COPYOUTPHYS 4 /* from kernel physical to user virtual */
cb323159
A
97#define COPYINATOMIC32 5 /* from user virtual to kernel virtual */
98#define COPYINATOMIC64 6 /* from user virtual to kernel virtual */
99#define COPYOUTATOMIC32 7 /* from user virtual to kernel virtual */
100#define COPYOUTATOMIC64 8 /* from user virtual to kernel virtual */
6d2010ae 101
39037602 102#if ENABLE_SMAPLOG
04b8595b 103typedef struct {
0a7de745
A
104 uint64_t timestamp;
105 thread_t thread;
106 uintptr_t cr4;
107 uint8_t cpuid;
108 uint8_t smap_state;
109 uint8_t copyio_active;
04b8595b
A
110} smaplog_entry_t;
111
112#define SMAPLOG_BUFFER_SIZE (50)
0a7de745
A
113static smaplog_entry_t smaplog_cbuf[SMAPLOG_BUFFER_SIZE];
114static uint32_t smaplog_head = 0;
04b8595b
A
115
116static void
117smaplog_add_entry(boolean_t enabling)
118{
119 uint32_t index = 0;
120 thread_t thread = current_thread();
121
122 do {
123 index = smaplog_head;
124 } while (!OSCompareAndSwap(index, (index + 1) % SMAPLOG_BUFFER_SIZE, &smaplog_head));
125
126 assert(index < SMAPLOG_BUFFER_SIZE);
127 assert(smaplog_head < SMAPLOG_BUFFER_SIZE);
128 assert(thread);
129
130 smaplog_cbuf[index].timestamp = mach_absolute_time();
131 smaplog_cbuf[index].thread = thread;
132 smaplog_cbuf[index].cpuid = cpu_number();
133 smaplog_cbuf[index].cr4 = get_cr4();
134 smaplog_cbuf[index].smap_state = enabling;
135 smaplog_cbuf[index].copyio_active = (thread->machine.specFlags & CopyIOActive) ? 1 : 0;
136}
39037602 137#endif /* ENABLE_SMAPLOG */
04b8595b
A
138
139extern boolean_t pmap_smap_enabled;
0a7de745
A
140static inline void
141user_access_enable(void)
142{
04b8595b
A
143 if (pmap_smap_enabled) {
144 stac();
39037602 145#if ENABLE_SMAPLOG
04b8595b
A
146 smaplog_add_entry(TRUE);
147#endif
148 }
149}
0a7de745
A
150static inline void
151user_access_disable(void)
152{
04b8595b
A
153 if (pmap_smap_enabled) {
154 clac();
39037602 155#if ENABLE_SMAPLOG
04b8595b
A
156 smaplog_add_entry(FALSE);
157#endif
158 }
159}
a1c7dba1 160
39037602
A
161#if COPYIO_TRACE_ENABLED
162#define COPYIO_TRACE(x, a, b, c, d, e) KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e)
163#else
164#define COPYIO_TRACE(x, a, b, c, d, e) do { } while(0)
165#endif
166
6d2010ae
A
167static int
168copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
0a7de745 169 vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
6d2010ae 170{
0a7de745
A
171 thread_t thread = current_thread();
172 pmap_t pmap;
173 vm_size_t bytes_copied;
174 int error = 0;
175 boolean_t istate = FALSE;
176 boolean_t recursive_CopyIOActive;
177#if COPYIO_TRACE_ENABLED
178 int debug_type = 0xeff70010;
6d2010ae
A
179 debug_type += (copy_type << 2);
180#endif
d9a64523 181 vm_size_t kernel_buf_size = 0;
6d2010ae 182
0a7de745 183 if (__improbable(nbytes > copysize_limit_panic)) {
5ba3f43e 184 panic("%s(%p, %p, %lu) - transfer too large", __func__,
0a7de745
A
185 (void *)user_addr, (void *)kernel_addr, nbytes);
186 }
3e170ce0 187
39037602
A
188 COPYIO_TRACE(debug_type | DBG_FUNC_START,
189 user_addr, kernel_addr, nbytes, use_kernel_map, 0);
6d2010ae 190
0a7de745 191 if (__improbable(nbytes == 0)) {
6d2010ae 192 goto out;
0a7de745 193 }
6d2010ae 194
d9a64523
A
195 pmap = thread->map->pmap;
196 boolean_t nopagezero = thread->map->pmap->pagezero_accessible;
6d2010ae 197
d9a64523 198 if ((copy_type != COPYINPHYS) && (copy_type != COPYOUTPHYS)) {
0a7de745 199 if (__improbable((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) {
d9a64523 200 panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type, kernel_addr);
0a7de745 201 }
f427ee49
A
202 if (__probable(!zalloc_disable_copyio_check)) {
203 zone_t src_zone = NULL;
204 kernel_buf_size = zone_element_size(kernel_addr, &src_zone);
205 /*
206 * Size of elements in the permanent zone is not saved as a part of the
207 * zone's info
208 */
c3c9b80d 209 if (__improbable(src_zone && !src_zone->z_permanent &&
f427ee49 210 kernel_buf_size < nbytes)) {
d9a64523 211 panic("copyio: kernel buffer %p has size %lu < nbytes %lu", kernel_addr, kernel_buf_size, nbytes);
0a7de745 212 }
d9a64523 213 }
6d2010ae 214 }
0a7de745 215
6d2010ae
A
216 /* Sanity and security check for addresses to/from a user */
217
39037602 218 if (__improbable(((pmap != kernel_pmap) && (use_kernel_map == 0)) &&
0a7de745 219 ((nbytes && (user_addr + nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map))))) {
6d2010ae
A
220 error = EFAULT;
221 goto out;
222 }
223
cb323159
A
224 if (copy_type >= COPYINATOMIC32 && copy_type <= COPYOUTATOMIC64) {
225 if (__improbable(pmap == kernel_pmap)) {
226 error = EFAULT;
227 goto out;
228 }
229 }
230
5ba3f43e 231#if KASAN
cb323159
A
232 switch (copy_type) {
233 case COPYIN:
234 case COPYINSTR:
235 case COPYINATOMIC32:
236 case COPYINATOMIC64:
5ba3f43e 237 __asan_storeN((uptr)kernel_addr, nbytes);
cb323159
A
238 break;
239 case COPYOUT:
240 case COPYOUTATOMIC32:
241 case COPYOUTATOMIC64:
5ba3f43e 242 __asan_loadN((uptr)kernel_addr, nbytes);
cb323159
A
243 kasan_check_uninitialized((vm_address_t)kernel_addr, nbytes);
244 break;
5ba3f43e
A
245 }
246#endif
247
6d2010ae 248 /*
0a7de745 249 * If the no_shared_cr3 boot-arg is set (true), the kernel runs on
6d2010ae
A
250 * its own pmap and cr3 rather than the user's -- so that wild accesses
251 * from kernel or kexts can be trapped. So, during copyin and copyout,
252 * we need to switch back to the user's map/cr3. The thread is flagged
253 * "CopyIOActive" at this time so that if the thread is pre-empted,
254 * we will later restore the correct cr3.
255 */
256 recursive_CopyIOActive = thread->machine.specFlags & CopyIOActive;
39037602
A
257
258 boolean_t pdswitch = no_shared_cr3 || nopagezero;
259
260 if (__improbable(pdswitch)) {
6d2010ae 261 istate = ml_set_interrupts_enabled(FALSE);
39037602
A
262 if (nopagezero && pmap_pcid_ncpus) {
263 pmap_pcid_activate(pmap, cpu_number(), TRUE, TRUE);
264 } else if (get_cr3_base() != pmap->pm_cr3) {
6d2010ae 265 set_cr3_raw(pmap->pm_cr3);
39037602
A
266 }
267 thread->machine.specFlags |= CopyIOActive;
268 } else {
269 thread->machine.specFlags |= CopyIOActive;
6d2010ae
A
270 }
271
39037602
A
272 user_access_enable();
273
0a7de745 274#if DEVELOPMENT || DEBUG
6d2010ae
A
275 /*
276 * Ensure that we're running on the target thread's cr3.
277 */
278 if ((pmap != kernel_pmap) && !use_kernel_map &&
279 (get_cr3_base() != pmap->pm_cr3)) {
280 panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
0a7de745
A
281 copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map,
282 (void *) get_cr3_raw(), (void *) pmap->pm_cr3);
6d2010ae 283 }
39037602
A
284#endif
285
286 if (__improbable(pdswitch)) {
6d2010ae 287 (void) ml_set_interrupts_enabled(istate);
39037602 288 }
6d2010ae 289
39037602 290 COPYIO_TRACE(0xeff70044 | DBG_FUNC_NONE, user_addr,
0a7de745 291 kernel_addr, nbytes, 0, 0);
6d2010ae 292
0a7de745 293 switch (copy_type) {
6d2010ae 294 case COPYIN:
0a7de745
A
295 error = _bcopy((const void *) user_addr,
296 kernel_addr,
297 nbytes);
6d2010ae 298 break;
0a7de745 299
6d2010ae 300 case COPYOUT:
0a7de745
A
301 error = _bcopy(kernel_addr,
302 (void *) user_addr,
303 nbytes);
6d2010ae
A
304 break;
305
306 case COPYINPHYS:
0a7de745
A
307 error = _bcopy((const void *) user_addr,
308 PHYSMAP_PTOV(kernel_addr),
309 nbytes);
6d2010ae
A
310 break;
311
312 case COPYOUTPHYS:
0a7de745
A
313 error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr),
314 (void *) user_addr,
315 nbytes);
6d2010ae
A
316 break;
317
cb323159
A
318 case COPYINATOMIC32:
319 error = _copyin_atomic32((const void *) user_addr,
320 (void *) kernel_addr);
321 break;
322
323 case COPYINATOMIC64:
324 error = _copyin_atomic64((const void *) user_addr,
325 (void *) kernel_addr);
326 break;
327
328 case COPYOUTATOMIC32:
329 error = _copyout_atomic32((const void *) kernel_addr,
330 (void *) user_addr);
331 break;
332
333 case COPYOUTATOMIC64:
334 error = _copyout_atomic64((const void *) kernel_addr,
335 (void *) user_addr);
39037602
A
336 break;
337
6d2010ae 338 case COPYINSTR:
0a7de745
A
339 error = _bcopystr((const void *) user_addr,
340 kernel_addr,
341 (int) nbytes,
342 &bytes_copied);
6d2010ae
A
343
344 /*
345 * lencopied should be updated on success
346 * or ENAMETOOLONG... but not EFAULT
347 */
0a7de745
A
348 if (error != EFAULT) {
349 *lencopied = bytes_copied;
350 }
6d2010ae
A
351
352 if (error) {
353#if KDEBUG
0a7de745 354 nbytes = *lencopied;
6d2010ae 355#endif
0a7de745 356 break;
6d2010ae
A
357 }
358 if (*(kernel_addr + bytes_copied - 1) == 0) {
0a7de745 359 /*
6d2010ae
A
360 * we found a NULL terminator... we're done
361 */
362#if KDEBUG
0a7de745 363 nbytes = *lencopied;
6d2010ae
A
364#endif
365 break;
366 } else {
0a7de745 367 /*
6d2010ae
A
368 * no more room in the buffer and we haven't
369 * yet come across a NULL terminator
370 */
371#if KDEBUG
0a7de745 372 nbytes = *lencopied;
6d2010ae 373#endif
0a7de745 374 error = ENAMETOOLONG;
6d2010ae
A
375 break;
376 }
6d2010ae
A
377 }
378
04b8595b 379 user_access_disable();
39037602
A
380
381 if (__improbable(pdswitch)) {
6d2010ae 382 istate = ml_set_interrupts_enabled(FALSE);
0a7de745 383 if (!recursive_CopyIOActive && (get_cr3_raw() != kernel_pmap->pm_cr3)) {
39037602
A
384 if (nopagezero && pmap_pcid_ncpus) {
385 pmap_pcid_activate(pmap, cpu_number(), TRUE, FALSE);
386 } else {
387 set_cr3_raw(kernel_pmap->pm_cr3);
388 }
389 }
390
391 if (!recursive_CopyIOActive) {
392 thread->machine.specFlags &= ~CopyIOActive;
393 }
6d2010ae 394 (void) ml_set_interrupts_enabled(istate);
39037602
A
395 } else if (!recursive_CopyIOActive) {
396 thread->machine.specFlags &= ~CopyIOActive;
6d2010ae
A
397 }
398
399out:
39037602 400 COPYIO_TRACE(debug_type | DBG_FUNC_END, user_addr, kernel_addr, nbytes, error, 0);
6d2010ae 401
0a7de745 402 return error;
6d2010ae
A
403}
404
405
406static int
407copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which)
408{
0a7de745 409 char *paddr;
6d2010ae
A
410 user_addr_t vaddr;
411 int ctype;
412
413 if (which & cppvPsnk) {
414 paddr = (char *)sink;
0a7de745 415 vaddr = (user_addr_t)source;
6d2010ae
A
416 ctype = COPYINPHYS;
417 } else {
0a7de745 418 paddr = (char *)source;
6d2010ae
A
419 vaddr = (user_addr_t)sink;
420 ctype = COPYOUTPHYS;
0a7de745 421 CALL_COPYOUT_SHIM_PHYS((void *)PHYSMAP_PTOV(source), sink, csize)
6d2010ae
A
422 }
423 return copyio(ctype, vaddr, paddr, csize, NULL, which & cppvKmap);
424}
425
426int
427copyinmsg(const user_addr_t user_addr, char *kernel_addr, mach_msg_size_t nbytes)
428{
0a7de745
A
429 return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
430}
6d2010ae
A
431
432int
d9a64523 433copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
6d2010ae 434{
0a7de745 435 return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0);
6d2010ae
A
436}
437
39037602 438/*
cb323159
A
439 * copy{in,out}_atomic{32,64}
440 * Read or store an aligned value from userspace as a single memory transaction.
441 * These functions support userspace synchronization features
39037602
A
442 */
443int
cb323159
A
444copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
445{
446 /* Test alignment */
447 if (user_addr & 3) {
448 return EINVAL;
449 }
450 return copyio(COPYINATOMIC32, user_addr, (char *)(uintptr_t)kernel_addr, 4, NULL, 0);
451}
452
453int
454copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
455{
456 uint32_t u32;
457 int result = copyin_atomic32(user_addr, &u32);
458 if (__improbable(result)) {
459 return result;
460 }
461 if (u32 != value) {
462 return ESTALE;
463 }
464 cpu_pause();
465 return 0;
466}
467
468int
469copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
470{
471 /* Test alignment */
472 if (user_addr & 7) {
473 return EINVAL;
474 }
475 return copyio(COPYINATOMIC64, user_addr, (char *)(uintptr_t)kernel_addr, 8, NULL, 0);
476}
477
478int
479copyout_atomic32(uint32_t value, user_addr_t user_addr)
39037602 480{
cb323159
A
481 /* Test alignment */
482 if (user_addr & 3) {
39037602 483 return EINVAL;
0a7de745 484 }
cb323159
A
485 return copyio(COPYOUTATOMIC32, user_addr, (char *)&value, 4, NULL, 0);
486}
39037602 487
cb323159
A
488int
489copyout_atomic64(uint64_t value, user_addr_t user_addr)
490{
39037602 491 /* Test alignment */
cb323159 492 if (user_addr & 7) {
39037602 493 return EINVAL;
0a7de745 494 }
cb323159 495 return copyio(COPYOUTATOMIC64, user_addr, (char *)&value, 8, NULL, 0);
39037602
A
496}
497
6d2010ae 498int
0a7de745 499copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
6d2010ae 500{
0a7de745 501 *lencopied = 0;
6d2010ae 502
0a7de745 503 return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0);
6d2010ae
A
504}
505
506int
507copyoutmsg(const char *kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes)
508{
0a7de745
A
509 CALL_COPYOUT_SHIM_MSG(kernel_addr, user_addr, (vm_size_t)nbytes)
510 return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
6d2010ae
A
511}
512
513int
514copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
515{
0a7de745
A
516 CALL_COPYOUT_SHIM_NRML(kernel_addr, user_addr, nbytes)
517 return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0);
6d2010ae
A
518}
519
520
521kern_return_t
522copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which)
523{
524 unsigned int lop, csize;
525 int bothphys = 0;
6d2010ae 526
0a7de745
A
527 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64,
528 (unsigned)snk64, size, which, 0);
6d2010ae 529
0a7de745
A
530 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
531 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
532 }
533 if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk)) {
534 bothphys = 1; /* both are physical */
535 }
6d2010ae 536 while (size) {
0a7de745
A
537 if (bothphys) {
538 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */
6d2010ae 539
0a7de745
A
540 if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)))) {
541 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */
542 }
6d2010ae 543 } else {
0a7de745 544 /*
6d2010ae
A
545 * only need to compute the resid for the physical page
546 * address... we don't care about where we start/finish in
547 * the virtual since we just call the normal copyin/copyout
548 */
0a7de745
A
549 if (which & cppvPsrc) {
550 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));
551 } else {
552 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));
553 }
6d2010ae 554 }
0a7de745
A
555 csize = size; /* Assume we can copy it all */
556 if (lop < size) {
557 csize = lop; /* Nope, we can't do it all */
558 }
559#if 0
6d2010ae 560 /*
0a7de745 561 * flush_dcache64 is currently a nop on the i386...
6d2010ae
A
562 * it's used when copying to non-system memory such
563 * as video capture cards... on PPC there was a need
564 * to flush due to how we mapped this memory... not
565 * sure if it's needed on i386.
566 */
0a7de745
A
567 if (which & cppvFsrc) {
568 flush_dcache64(src64, csize, 1); /* If requested, flush source before move */
569 }
570 if (which & cppvFsnk) {
571 flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */
572 }
6d2010ae 573#endif
0a7de745
A
574 if (bothphys) {
575 bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */
576 } else {
577 if (copyio_phys(src64, snk64, csize, which)) {
578 return KERN_FAILURE;
579 }
6d2010ae
A
580 }
581#if 0
0a7de745
A
582 if (which & cppvFsrc) {
583 flush_dcache64(src64, csize, 1); /* If requested, flush source after move */
584 }
585 if (which & cppvFsnk) {
586 flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */
587 }
6d2010ae 588#endif
0a7de745
A
589 size -= csize; /* Calculate what is left */
590 snk64 += csize; /* Bump sink to next physical address */
591 src64 += csize; /* Bump source to next physical address */
6d2010ae
A
592 }
593 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64,
0a7de745 594 (unsigned)snk64, size, which, 0);
6d2010ae
A
595
596 return KERN_SUCCESS;
597}