2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach_assert.h>
30 #include <sys/errno.h>
31 #include <i386/param.h>
32 #include <i386/misc_protos.h>
33 #include <i386/cpu_data.h>
34 #include <i386/machine_routines.h>
35 #include <i386/cpuid.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_fault.h>
42 #include <sys/kdebug.h>
44 static int copyio(int, user_addr_t
, char *, vm_size_t
, vm_size_t
*, int);
45 static int copyio_phys(addr64_t
, addr64_t
, vm_size_t
, int);
48 * The copy engine has the following characteristics
49 * - copyio() handles copies to/from user or kernel space
50 * - copypv() deals with physical or virtual addresses
52 * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
53 * point describing the full glory of the copy window implementation. In K64,
54 * however, there is no need for windowing. Thanks to the vast shared address
55 * space, the kernel has direct access to userspace and to physical memory.
57 * User virtual addresses are accessible provided the user's cr3 is loaded.
58 * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
61 * Copyin/out variants all boil done to just these 2 routines in locore.s which
62 * provide fault-recoverable copying:
64 extern int _bcopy(const void *, void *, vm_size_t
);
65 extern int _bcopystr(const void *, void *, vm_size_t
, vm_size_t
*);
71 #define COPYIN 0 /* from user virtual to kernel virtual */
72 #define COPYOUT 1 /* from kernel virtual to user virtual */
73 #define COPYINSTR 2 /* string variant of copyout */
74 #define COPYINPHYS 3 /* from user virtual to kernel physical */
75 #define COPYOUTPHYS 4 /* from kernel physical to user virtual */
78 copyio(int copy_type
, user_addr_t user_addr
, char *kernel_addr
,
79 vm_size_t nbytes
, vm_size_t
*lencopied
, int use_kernel_map
)
83 vm_size_t bytes_copied
;
85 boolean_t istate
= FALSE
;
86 boolean_t recursive_CopyIOActive
;
88 int debug_type
= 0xeff70010;
89 debug_type
+= (copy_type
<< 2);
92 thread
= current_thread();
94 KERNEL_DEBUG(debug_type
| DBG_FUNC_START
,
95 (unsigned)(user_addr
>> 32), (unsigned)user_addr
,
96 nbytes
, thread
->machine
.copyio_state
, 0);
101 pmap
= thread
->map
->pmap
;
103 if ((copy_type
!= COPYINPHYS
) && (copy_type
!= COPYOUTPHYS
) && ((vm_offset_t
)kernel_addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
104 panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type
, kernel_addr
);
107 /* Sanity and security check for addresses to/from a user */
109 if (((pmap
!= kernel_pmap
) && (use_kernel_map
== 0)) &&
110 ((nbytes
&& (user_addr
+nbytes
<= user_addr
)) || ((user_addr
+ nbytes
) > vm_map_max(thread
->map
)))) {
116 * If the no_shared_cr3 boot-arg is set (true), the kernel runs on
117 * its own pmap and cr3 rather than the user's -- so that wild accesses
118 * from kernel or kexts can be trapped. So, during copyin and copyout,
119 * we need to switch back to the user's map/cr3. The thread is flagged
120 * "CopyIOActive" at this time so that if the thread is pre-empted,
121 * we will later restore the correct cr3.
123 recursive_CopyIOActive
= thread
->machine
.specFlags
& CopyIOActive
;
124 thread
->machine
.specFlags
|= CopyIOActive
;
126 istate
= ml_set_interrupts_enabled(FALSE
);
127 if (get_cr3_base() != pmap
->pm_cr3
)
128 set_cr3_raw(pmap
->pm_cr3
);
132 * Ensure that we're running on the target thread's cr3.
134 if ((pmap
!= kernel_pmap
) && !use_kernel_map
&&
135 (get_cr3_base() != pmap
->pm_cr3
)) {
136 panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
137 copy_type
, (void *)user_addr
, kernel_addr
, nbytes
, lencopied
, use_kernel_map
,
138 (void *) get_cr3_raw(), (void *) pmap
->pm_cr3
);
141 (void) ml_set_interrupts_enabled(istate
);
143 KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE
, (unsigned)user_addr
,
144 (unsigned)kernel_addr
, nbytes
, 0, 0);
149 error
= _bcopy((const void *) user_addr
,
155 error
= _bcopy(kernel_addr
,
161 error
= _bcopy((const void *) user_addr
,
162 PHYSMAP_PTOV(kernel_addr
),
167 error
= _bcopy((const void *) PHYSMAP_PTOV(kernel_addr
),
173 error
= _bcopystr((const void *) user_addr
,
179 * lencopied should be updated on success
180 * or ENAMETOOLONG... but not EFAULT
183 *lencopied
= bytes_copied
;
191 if (*(kernel_addr
+ bytes_copied
- 1) == 0) {
193 * we found a NULL terminator... we're done
201 * no more room in the buffer and we haven't
202 * yet come across a NULL terminator
207 error
= ENAMETOOLONG
;
213 if (!recursive_CopyIOActive
)
214 thread
->machine
.specFlags
&= ~CopyIOActive
;
216 istate
= ml_set_interrupts_enabled(FALSE
);
217 if (get_cr3_raw() != kernel_pmap
->pm_cr3
)
218 set_cr3_raw(kernel_pmap
->pm_cr3
);
219 (void) ml_set_interrupts_enabled(istate
);
223 KERNEL_DEBUG(debug_type
| DBG_FUNC_END
, (unsigned)user_addr
,
224 (unsigned)kernel_addr
, (unsigned)nbytes
, error
, 0);
231 copyio_phys(addr64_t source
, addr64_t sink
, vm_size_t csize
, int which
)
237 if (which
& cppvPsnk
) {
238 paddr
= (char *)sink
;
239 vaddr
= (user_addr_t
)source
;
242 paddr
= (char *)source
;
243 vaddr
= (user_addr_t
)sink
;
246 return copyio(ctype
, vaddr
, paddr
, csize
, NULL
, which
& cppvKmap
);
250 copyinmsg(const user_addr_t user_addr
, char *kernel_addr
, mach_msg_size_t nbytes
)
252 return copyio(COPYIN
, user_addr
, kernel_addr
, nbytes
, NULL
, 0);
256 copyin(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
)
258 return copyio(COPYIN
, user_addr
, kernel_addr
, nbytes
, NULL
, 0);
262 copyinstr(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
, vm_size_t
*lencopied
)
266 return copyio(COPYINSTR
, user_addr
, kernel_addr
, nbytes
, lencopied
, 0);
270 copyoutmsg(const char *kernel_addr
, user_addr_t user_addr
, mach_msg_size_t nbytes
)
272 return copyio(COPYOUT
, user_addr
, (char *)(uintptr_t)kernel_addr
, nbytes
, NULL
, 0);
276 copyout(const void *kernel_addr
, user_addr_t user_addr
, vm_size_t nbytes
)
278 return copyio(COPYOUT
, user_addr
, (char *)(uintptr_t)kernel_addr
, nbytes
, NULL
, 0);
283 copypv(addr64_t src64
, addr64_t snk64
, unsigned int size
, int which
)
285 unsigned int lop
, csize
;
288 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START
, (unsigned)src64
,
289 (unsigned)snk64
, size
, which
, 0);
291 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0 ) /* Make sure that only one is virtual */
292 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
294 if ((which
& (cppvPsrc
| cppvPsnk
)) == (cppvPsrc
| cppvPsnk
))
295 bothphys
= 1; /* both are physical */
300 lop
= (unsigned int)(PAGE_SIZE
- (snk64
& (PAGE_SIZE
- 1))); /* Assume sink smallest */
302 if (lop
> (unsigned int)(PAGE_SIZE
- (src64
& (PAGE_SIZE
- 1))))
303 lop
= (unsigned int)(PAGE_SIZE
- (src64
& (PAGE_SIZE
- 1))); /* No, source is smaller */
306 * only need to compute the resid for the physical page
307 * address... we don't care about where we start/finish in
308 * the virtual since we just call the normal copyin/copyout
310 if (which
& cppvPsrc
)
311 lop
= (unsigned int)(PAGE_SIZE
- (src64
& (PAGE_SIZE
- 1)));
313 lop
= (unsigned int)(PAGE_SIZE
- (snk64
& (PAGE_SIZE
- 1)));
315 csize
= size
; /* Assume we can copy it all */
317 csize
= lop
; /* Nope, we can't do it all */
320 * flush_dcache64 is currently a nop on the i386...
321 * it's used when copying to non-system memory such
322 * as video capture cards... on PPC there was a need
323 * to flush due to how we mapped this memory... not
324 * sure if it's needed on i386.
326 if (which
& cppvFsrc
)
327 flush_dcache64(src64
, csize
, 1); /* If requested, flush source before move */
328 if (which
& cppvFsnk
)
329 flush_dcache64(snk64
, csize
, 1); /* If requested, flush sink before move */
332 bcopy_phys(src64
, snk64
, csize
); /* Do a physical copy, virtually */
334 if (copyio_phys(src64
, snk64
, csize
, which
))
335 return (KERN_FAILURE
);
338 if (which
& cppvFsrc
)
339 flush_dcache64(src64
, csize
, 1); /* If requested, flush source after move */
340 if (which
& cppvFsnk
)
341 flush_dcache64(snk64
, csize
, 1); /* If requested, flush sink after move */
343 size
-= csize
; /* Calculate what is left */
344 snk64
+= csize
; /* Bump sink to next physical address */
345 src64
+= csize
; /* Bump source to next physical address */
347 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END
, (unsigned)src64
,
348 (unsigned)snk64
, size
, which
, 0);