2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach_assert.h>
30 #include <sys/errno.h>
31 #include <i386/param.h>
32 #include <i386/misc_protos.h>
33 #include <i386/cpu_data.h>
34 #include <i386/machine_routines.h>
35 #include <i386/cpuid.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_fault.h>
42 #include <sys/kdebug.h>
44 static int copyio(int, user_addr_t
, char *, vm_size_t
, vm_size_t
*, int);
45 static int copyio_phys(addr64_t
, addr64_t
, vm_size_t
, int);
48 * The copy engine has the following characteristics
49 * - copyio() handles copies to/from user or kernel space
50 * - copypv() deals with physical or virtual addresses
52 * Readers familiar with the 32-bit kernel will expect Joe's thesis at this
53 * point describing the full glory of the copy window implementation. In K64,
54 * however, there is no need for windowing. Thanks to the vast shared address
55 * space, the kernel has direct access to userspace and to physical memory.
57 * User virtual addresses are accessible provided the user's cr3 is loaded.
58 * Physical addresses are accessible via the direct map and the PHYSMAP_PTOV()
61 * Copyin/out variants all boil done to just these 2 routines in locore.s which
62 * provide fault-recoverable copying:
64 extern int _bcopy(const void *, void *, vm_size_t
);
65 extern int _bcopystr(const void *, void *, vm_size_t
, vm_size_t
*);
71 #define COPYIN 0 /* from user virtual to kernel virtual */
72 #define COPYOUT 1 /* from kernel virtual to user virtual */
73 #define COPYINSTR 2 /* string variant of copyout */
74 #define COPYINPHYS 3 /* from user virtual to kernel physical */
75 #define COPYOUTPHYS 4 /* from kernel physical to user virtual */
84 uint8_t copyio_active
;
87 #define SMAPLOG_BUFFER_SIZE (50)
88 static smaplog_entry_t smaplog_cbuf
[SMAPLOG_BUFFER_SIZE
];
89 static uint32_t smaplog_head
= 0;
92 smaplog_add_entry(boolean_t enabling
)
95 thread_t thread
= current_thread();
99 } while (!OSCompareAndSwap(index
, (index
+ 1) % SMAPLOG_BUFFER_SIZE
, &smaplog_head
));
101 assert(index
< SMAPLOG_BUFFER_SIZE
);
102 assert(smaplog_head
< SMAPLOG_BUFFER_SIZE
);
105 smaplog_cbuf
[index
].timestamp
= mach_absolute_time();
106 smaplog_cbuf
[index
].thread
= thread
;
107 smaplog_cbuf
[index
].cpuid
= cpu_number();
108 smaplog_cbuf
[index
].cr4
= get_cr4();
109 smaplog_cbuf
[index
].smap_state
= enabling
;
110 smaplog_cbuf
[index
].copyio_active
= (thread
->machine
.specFlags
& CopyIOActive
) ? 1 : 0;
112 #endif /* DEVELOPMENT */
114 extern boolean_t pmap_smap_enabled
;
115 static inline void user_access_enable(void) {
116 if (pmap_smap_enabled
) {
119 smaplog_add_entry(TRUE
);
123 static inline void user_access_disable(void) {
124 if (pmap_smap_enabled
) {
127 smaplog_add_entry(FALSE
);
133 copyio(int copy_type
, user_addr_t user_addr
, char *kernel_addr
,
134 vm_size_t nbytes
, vm_size_t
*lencopied
, int use_kernel_map
)
138 vm_size_t bytes_copied
;
140 boolean_t istate
= FALSE
;
141 boolean_t recursive_CopyIOActive
;
143 int debug_type
= 0xeff70010;
144 debug_type
+= (copy_type
<< 2);
147 thread
= current_thread();
149 KERNEL_DEBUG(debug_type
| DBG_FUNC_START
,
150 (unsigned)(user_addr
>> 32), (unsigned)user_addr
,
151 nbytes
, thread
->machine
.copyio_state
, 0);
156 pmap
= thread
->map
->pmap
;
158 if ((copy_type
!= COPYINPHYS
) && (copy_type
!= COPYOUTPHYS
) && ((vm_offset_t
)kernel_addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
)) {
159 panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type
, kernel_addr
);
162 /* Sanity and security check for addresses to/from a user */
164 if (((pmap
!= kernel_pmap
) && (use_kernel_map
== 0)) &&
165 ((nbytes
&& (user_addr
+nbytes
<= user_addr
)) || ((user_addr
+ nbytes
) > vm_map_max(thread
->map
)))) {
171 * If the no_shared_cr3 boot-arg is set (true), the kernel runs on
172 * its own pmap and cr3 rather than the user's -- so that wild accesses
173 * from kernel or kexts can be trapped. So, during copyin and copyout,
174 * we need to switch back to the user's map/cr3. The thread is flagged
175 * "CopyIOActive" at this time so that if the thread is pre-empted,
176 * we will later restore the correct cr3.
178 recursive_CopyIOActive
= thread
->machine
.specFlags
& CopyIOActive
;
179 thread
->machine
.specFlags
|= CopyIOActive
;
180 user_access_enable();
182 istate
= ml_set_interrupts_enabled(FALSE
);
183 if (get_cr3_base() != pmap
->pm_cr3
)
184 set_cr3_raw(pmap
->pm_cr3
);
188 * Ensure that we're running on the target thread's cr3.
190 if ((pmap
!= kernel_pmap
) && !use_kernel_map
&&
191 (get_cr3_base() != pmap
->pm_cr3
)) {
192 panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p",
193 copy_type
, (void *)user_addr
, kernel_addr
, nbytes
, lencopied
, use_kernel_map
,
194 (void *) get_cr3_raw(), (void *) pmap
->pm_cr3
);
197 (void) ml_set_interrupts_enabled(istate
);
199 KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE
, (unsigned)user_addr
,
200 (unsigned)kernel_addr
, nbytes
, 0, 0);
205 error
= _bcopy((const void *) user_addr
,
211 error
= _bcopy(kernel_addr
,
217 error
= _bcopy((const void *) user_addr
,
218 PHYSMAP_PTOV(kernel_addr
),
223 error
= _bcopy((const void *) PHYSMAP_PTOV(kernel_addr
),
229 error
= _bcopystr((const void *) user_addr
,
235 * lencopied should be updated on success
236 * or ENAMETOOLONG... but not EFAULT
239 *lencopied
= bytes_copied
;
247 if (*(kernel_addr
+ bytes_copied
- 1) == 0) {
249 * we found a NULL terminator... we're done
257 * no more room in the buffer and we haven't
258 * yet come across a NULL terminator
263 error
= ENAMETOOLONG
;
269 user_access_disable();
270 if (!recursive_CopyIOActive
) {
271 thread
->machine
.specFlags
&= ~CopyIOActive
;
274 istate
= ml_set_interrupts_enabled(FALSE
);
275 if (get_cr3_raw() != kernel_pmap
->pm_cr3
)
276 set_cr3_raw(kernel_pmap
->pm_cr3
);
277 (void) ml_set_interrupts_enabled(istate
);
281 KERNEL_DEBUG(debug_type
| DBG_FUNC_END
, (unsigned)user_addr
,
282 (unsigned)kernel_addr
, (unsigned)nbytes
, error
, 0);
289 copyio_phys(addr64_t source
, addr64_t sink
, vm_size_t csize
, int which
)
295 if (which
& cppvPsnk
) {
296 paddr
= (char *)sink
;
297 vaddr
= (user_addr_t
)source
;
300 paddr
= (char *)source
;
301 vaddr
= (user_addr_t
)sink
;
304 return copyio(ctype
, vaddr
, paddr
, csize
, NULL
, which
& cppvKmap
);
308 copyinmsg(const user_addr_t user_addr
, char *kernel_addr
, mach_msg_size_t nbytes
)
310 return copyio(COPYIN
, user_addr
, kernel_addr
, nbytes
, NULL
, 0);
314 copyin(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
)
316 return copyio(COPYIN
, user_addr
, kernel_addr
, nbytes
, NULL
, 0);
320 copyinstr(const user_addr_t user_addr
, char *kernel_addr
, vm_size_t nbytes
, vm_size_t
*lencopied
)
324 return copyio(COPYINSTR
, user_addr
, kernel_addr
, nbytes
, lencopied
, 0);
328 copyoutmsg(const char *kernel_addr
, user_addr_t user_addr
, mach_msg_size_t nbytes
)
330 return copyio(COPYOUT
, user_addr
, (char *)(uintptr_t)kernel_addr
, nbytes
, NULL
, 0);
334 copyout(const void *kernel_addr
, user_addr_t user_addr
, vm_size_t nbytes
)
336 return copyio(COPYOUT
, user_addr
, (char *)(uintptr_t)kernel_addr
, nbytes
, NULL
, 0);
341 copypv(addr64_t src64
, addr64_t snk64
, unsigned int size
, int which
)
343 unsigned int lop
, csize
;
346 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START
, (unsigned)src64
,
347 (unsigned)snk64
, size
, which
, 0);
349 if ((which
& (cppvPsrc
| cppvPsnk
)) == 0 ) /* Make sure that only one is virtual */
350 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
352 if ((which
& (cppvPsrc
| cppvPsnk
)) == (cppvPsrc
| cppvPsnk
))
353 bothphys
= 1; /* both are physical */
358 lop
= (unsigned int)(PAGE_SIZE
- (snk64
& (PAGE_SIZE
- 1))); /* Assume sink smallest */
360 if (lop
> (unsigned int)(PAGE_SIZE
- (src64
& (PAGE_SIZE
- 1))))
361 lop
= (unsigned int)(PAGE_SIZE
- (src64
& (PAGE_SIZE
- 1))); /* No, source is smaller */
364 * only need to compute the resid for the physical page
365 * address... we don't care about where we start/finish in
366 * the virtual since we just call the normal copyin/copyout
368 if (which
& cppvPsrc
)
369 lop
= (unsigned int)(PAGE_SIZE
- (src64
& (PAGE_SIZE
- 1)));
371 lop
= (unsigned int)(PAGE_SIZE
- (snk64
& (PAGE_SIZE
- 1)));
373 csize
= size
; /* Assume we can copy it all */
375 csize
= lop
; /* Nope, we can't do it all */
378 * flush_dcache64 is currently a nop on the i386...
379 * it's used when copying to non-system memory such
380 * as video capture cards... on PPC there was a need
381 * to flush due to how we mapped this memory... not
382 * sure if it's needed on i386.
384 if (which
& cppvFsrc
)
385 flush_dcache64(src64
, csize
, 1); /* If requested, flush source before move */
386 if (which
& cppvFsnk
)
387 flush_dcache64(snk64
, csize
, 1); /* If requested, flush sink before move */
390 bcopy_phys(src64
, snk64
, csize
); /* Do a physical copy, virtually */
392 if (copyio_phys(src64
, snk64
, csize
, which
))
393 return (KERN_FAILURE
);
396 if (which
& cppvFsrc
)
397 flush_dcache64(src64
, csize
, 1); /* If requested, flush source after move */
398 if (which
& cppvFsnk
)
399 flush_dcache64(snk64
, csize
, 1); /* If requested, flush sink after move */
401 size
-= csize
; /* Calculate what is left */
402 snk64
+= csize
; /* Bump sink to next physical address */
403 src64
+= csize
; /* Bump source to next physical address */
405 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END
, (unsigned)src64
,
406 (unsigned)snk64
, size
, which
, 0);