2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1988 University of Utah.
24 * Copyright (c) 1991, 1993
25 * The Regents of the University of California. All rights reserved.
27 * This code is derived from software contributed to Berkeley by
28 * the Systems Programming Group of the University of Utah Computer
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
61 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
65 * Mapped file (mmap) interface to VM
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/filedesc.h>
72 #include <sys/resourcevar.h>
74 #include <sys/vnode.h>
78 #include <sys/vadvise.h>
79 #include <sys/trace.h>
85 #include <mach/mach_types.h>
87 #include <kern/cpu_number.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_pager.h>
93 #include <mach/vm_sync.h>
94 #include <mach/vm_behavior.h>
95 #include <mach/vm_inherit.h>
96 #include <mach/vm_statistics.h>
106 struct sbrk_args
*uap
;
109 /* Not yet implemented */
121 struct sstk_args
*uap
;
124 /* Not yet implemented */
131 ogetpagesize(p
, uap
, retval
)
140 #endif /* COMPAT_43 */
151 osmmap(curp
, uap
, retval
)
153 register struct osmmap_args
*uap
;
162 #ifdef DOUBLE_ALIGN_PARAMS
168 if ((uap
->share
== MAP_SHARED
)|| (uap
->share
== MAP_PRIVATE
)) {
169 newargs
.addr
= uap
->addr
;
170 newargs
.len
= (size_t)uap
->len
;
171 newargs
.prot
= uap
->prot
;
172 newargs
.flags
= uap
->share
;
173 newargs
.fd
= uap
->fd
;
174 newargs
.pos
= (off_t
)uap
->pos
;
175 return(mmap(curp
,&newargs
, retval
));
186 #ifdef DOUBLE_ALIGN_PARAMS
194 struct mmap_args
*uap
;
198 * Map in special device (must be SHARED) or file
201 register struct vnode
*vp
;
206 kern_return_t result
;
207 vm_offset_t user_addr
;
210 vm_object_offset_t file_pos
;
211 boolean_t find_space
, docow
;
217 user_addr
= (vm_offset_t
)uap
->addr
;
218 user_size
= (vm_size_t
) uap
->len
;
219 prot
= (uap
->prot
& VM_PROT_ALL
);
223 * The vm code does not have prototypes & compiler doesn't do the'
224 * the right thing when you cast 64bit value and pass it in function
225 * call. So here it is.
227 file_pos
= (vm_object_offset_t
)uap
->pos
;
230 /* make sure mapping fits into numeric range etc */
231 if ((file_pos
+ user_size
> (vm_object_offset_t
)-PAGE_SIZE_64
) ||
232 ((ssize_t
) uap
->len
< 0 )||
233 ((flags
& MAP_ANON
) && uap
->fd
!= -1))
237 * Align the file position to a page boundary,
238 * and save its page offset component.
240 pageoff
= ((vm_offset_t
)file_pos
& PAGE_MASK
);
241 file_pos
-= (vm_object_offset_t
)pageoff
;
244 /* Adjust size for rounding (on both ends). */
245 user_size
+= pageoff
; /* low end... */
246 user_size
= (vm_size_t
) round_page(user_size
); /* hi end */
250 * Check for illegal addresses. Watch out for address wrap... Note
251 * that VM_*_ADDRESS are not constants due to casts (argh).
253 if (flags
& MAP_FIXED
) {
255 * The specified address must have the same remainder
256 * as the file offset taken modulo PAGE_SIZE, so it
257 * should be aligned after adjustment by pageoff.
259 user_addr
-= pageoff
;
260 if (user_addr
& PAGE_MASK
)
262 /* Address range must be all in user VM space. */
263 if (VM_MAX_ADDRESS
> 0 && (user_addr
+ user_size
> VM_MAX_ADDRESS
))
265 if (VM_MIN_ADDRESS
> 0 && user_addr
< VM_MIN_ADDRESS
)
267 if (user_addr
+ user_size
< user_addr
)
271 /* DO not have apis to get this info, need to wait till then*/
273 * XXX for non-fixed mappings where no hint is provided or
274 * the hint would fall in the potential heap space,
275 * place it after the end of the largest possible heap.
277 * There should really be a pmap call to determine a reasonable
280 else if (addr
< round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
))
281 addr
= round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
);
286 if (flags
& MAP_ANON
) {
288 * Mapping blank space is trivial.
291 maxprot
= VM_PROT_ALL
;
296 * Mapping file, get fp for validation. Obtain vnode and make
297 * sure it is of appropriate type.
299 err
= fdgetf(p
, uap
->fd
, &fp
);
302 if(fp
->f_type
== DTYPE_PSXSHM
) {
303 uap
->addr
= user_addr
;
304 uap
->len
= user_size
;
308 return(pshm_mmap(p
, uap
, retval
, fp
, pageoff
));
311 if (fp
->f_type
!= DTYPE_VNODE
)
313 vp
= (struct vnode
*)fp
->f_data
;
315 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
)
318 * XXX hack to handle use of /dev/zero to map anon memory (ala
321 if (vp
->v_type
== VCHR
|| vp
->v_type
== VSTR
) {
325 * Ensure that file and memory protections are
326 * compatible. Note that we only worry about
327 * writability if mapping is shared; in this case,
328 * current and max prot are dictated by the open file.
329 * XXX use the vnode instead? Problem is: what
330 * credentials do we use for determination? What if
331 * proc does a setuid?
333 maxprot
= VM_PROT_EXECUTE
; /* ??? */
334 if (fp
->f_flag
& FREAD
)
335 maxprot
|= VM_PROT_READ
;
336 else if (prot
& PROT_READ
)
339 * If we are sharing potential changes (either via
340 * MAP_SHARED or via the implicit sharing of character
341 * device mappings), and we are trying to get write
342 * permission although we opened it without asking
346 if ((flags
& MAP_SHARED
) != 0) {
347 if ((fp
->f_flag
& FWRITE
) != 0) {
354 (IMMUTABLE
|APPEND
)) == 0)
355 maxprot
|= VM_PROT_WRITE
;
356 else if (prot
& PROT_WRITE
)
358 } else if ((prot
& PROT_WRITE
) != 0)
361 maxprot
|= VM_PROT_WRITE
;
371 * We bend a little - round the start and end addresses
372 * to the nearest page boundary.
374 user_size
= round_page(user_size
);
376 if (file_pos
& PAGE_MASK_64
)
379 user_map
= current_map();
381 if ((flags
& MAP_FIXED
) == 0) {
383 user_addr
= round_page(user_addr
);
385 if (user_addr
!= trunc_page(user_addr
))
388 (void) vm_deallocate(user_map
, user_addr
, user_size
);
393 * Lookup/allocate object.
395 if (flags
& MAP_ANON
) {
397 * Unnamed anonymous regions always start at 0.
403 if (handle
== NULL
) {
407 #if defined(VM_PROT_READ_IS_EXEC)
408 if (prot
& VM_PROT_READ
)
409 prot
|= VM_PROT_EXECUTE
;
411 if (maxprot
& VM_PROT_READ
)
412 maxprot
|= VM_PROT_EXECUTE
;
415 result
= vm_allocate(user_map
, &user_addr
, user_size
, find_space
);
416 if (result
!= KERN_SUCCESS
)
420 UBCINFOCHECK("mmap", vp
);
421 pager
= ubc_getpager(vp
);
428 * FIXME: if we're writing the file we need a way to
429 * ensure that someone doesn't replace our R/W creds
430 * with ones that only work for read.
435 if ((flags
& (MAP_ANON
|MAP_SHARED
)) == 0) {
441 #if defined(VM_PROT_READ_IS_EXEC)
442 if (prot
& VM_PROT_READ
)
443 prot
|= VM_PROT_EXECUTE
;
445 if (maxprot
& VM_PROT_READ
)
446 maxprot
|= VM_PROT_EXECUTE
;
450 result
= vm_map_64(user_map
, &user_addr
, user_size
,
451 0, find_space
, pager
, file_pos
, docow
,
455 if (result
!= KERN_SUCCESS
)
461 if (flags
& (MAP_SHARED
|MAP_INHERIT
)) {
462 result
= vm_inherit(user_map
, user_addr
, user_size
,
464 if (result
!= KERN_SUCCESS
) {
465 (void) vm_deallocate(user_map
, user_addr
, user_size
);
474 *fdflags(p
, uap
->fd
) |= UF_MAPPED
;
475 *retval
= (register_t
)(user_addr
+ pageoff
);
477 case KERN_INVALID_ADDRESS
:
480 case KERN_PROTECTION_FAILURE
:
494 msync(p
, uap
, retval
)
496 struct msync_args
*uap
;
500 vm_size_t size
, pageoff
;
504 vm_sync_t sync_flags
=0;
506 addr
= (vm_offset_t
) uap
->addr
;
507 pageoff
= (addr
& PAGE_MASK
);
510 size
= (vm_size_t
) round_page(size
);
513 if (addr
+ size
< addr
)
516 user_map
= current_map();
518 if ((flags
& (MS_ASYNC
|MS_INVALIDATE
)) == (MS_ASYNC
|MS_INVALIDATE
))
523 * We cannot support this properly without maintaining
524 * list all mmaps done. Cannot use vm_map_entry as they could be
525 * split or coalesced by indepenedant actions. So instead of
526 * inaccurate results, lets just return error as invalid size
532 if (flags
& MS_KILLPAGES
)
533 sync_flags
|= VM_SYNC_KILLPAGES
;
534 if (flags
& MS_DEACTIVATE
)
535 sync_flags
|= VM_SYNC_DEACTIVATE
;
536 if (flags
& MS_INVALIDATE
)
537 sync_flags
|= VM_SYNC_INVALIDATE
;
539 if ( !(flags
& (MS_KILLPAGES
| MS_DEACTIVATE
))) {
540 if (flags
& MS_ASYNC
)
541 sync_flags
|= VM_SYNC_ASYNCHRONOUS
;
543 sync_flags
|= VM_SYNC_SYNCHRONOUS
;
545 rv
= vm_msync(user_map
, addr
, size
, sync_flags
);
550 case KERN_INVALID_ADDRESS
:
551 return (EINVAL
); /* Sun returns ENOMEM? */
565 /* Not yet implemented */
573 munmap(p
, uap
, retval
)
575 struct munmap_args
*uap
;
579 vm_offset_t user_addr
;
580 vm_size_t user_size
, pageoff
;
581 kern_return_t result
;
583 user_addr
= (vm_offset_t
) uap
->addr
;
584 user_size
= (vm_size_t
) uap
->len
;
586 pageoff
= (user_addr
& PAGE_MASK
);
588 user_addr
-= pageoff
;
589 user_size
+= pageoff
;
590 user_size
= round_page(user_size
);
591 if (user_addr
+ user_size
< user_addr
)
597 /* Address range must be all in user VM space. */
598 if (VM_MAX_ADDRESS
> 0 && (user_addr
+ user_size
> VM_MAX_ADDRESS
))
600 if (VM_MIN_ADDRESS
> 0 && user_addr
< VM_MIN_ADDRESS
)
604 result
= vm_deallocate(current_map(), user_addr
, user_size
);
605 if (result
!= KERN_SUCCESS
) {
617 * XXX should vm_deallocate any regions mapped to this file
619 *fdflags(p
, fd
) &= ~UF_MAPPED
;
622 struct mprotect_args
{
628 mprotect(p
, uap
, retval
)
630 struct mprotect_args
*uap
;
633 register vm_prot_t prot
;
634 vm_offset_t user_addr
;
635 vm_size_t user_size
, pageoff
;
636 kern_return_t result
;
639 user_addr
= (vm_offset_t
) uap
->addr
;
640 user_size
= (vm_size_t
) uap
->len
;
641 prot
= (vm_prot_t
)(uap
->prot
& VM_PROT_ALL
);
645 #if defined(VM_PROT_READ_IS_EXEC)
646 if (prot
& VM_PROT_READ
)
647 prot
|= VM_PROT_EXECUTE
;
651 pageoff
= (user_addr
& PAGE_MASK
);
652 user_addr
-= pageoff
;
653 user_size
+= pageoff
;
654 user_size
= round_page(user_size
);
655 if (user_addr
+ user_size
< user_addr
)
658 user_map
= current_map();
660 result
= vm_map_protect(user_map
, user_addr
, user_addr
+user_size
, prot
,
665 case KERN_PROTECTION_FAILURE
:
672 struct minherit_args
{
679 minherit(p
, uap
, retval
)
681 struct minherit_args
*uap
;
685 vm_size_t size
, pageoff
;
686 register vm_inherit_t inherit
;
688 kern_return_t result
;
690 addr
= (vm_offset_t
)uap
->addr
;
692 inherit
= uap
->inherit
;
694 pageoff
= (addr
& PAGE_MASK
);
697 size
= (vm_size_t
) round_page(size
);
698 if (addr
+ size
< addr
)
701 user_map
= current_map();
702 result
= vm_inherit(user_map
, addr
, size
,
707 case KERN_PROTECTION_FAILURE
:
713 struct madvise_args
{
720 madvise(p
, uap
, retval
)
722 struct madvise_args
*uap
;
726 vm_offset_t start
, end
;
727 vm_behavior_t new_behavior
;
728 kern_return_t result
;
731 * Check for illegal addresses. Watch out for address wrap... Note
732 * that VM_*_ADDRESS are not constants due to casts (argh).
734 if (VM_MAX_ADDRESS
> 0 &&
735 ((vm_offset_t
) uap
->addr
+ uap
->len
) > VM_MAX_ADDRESS
)
737 if (VM_MIN_ADDRESS
> 0 && uap
->addr
< VM_MIN_ADDRESS
)
740 if (((vm_offset_t
) uap
->addr
+ uap
->len
) < (vm_offset_t
) uap
->addr
)
744 * Since this routine is only advisory, we default to conservative
747 start
= trunc_page((vm_offset_t
) uap
->addr
);
748 end
= round_page((vm_offset_t
) uap
->addr
+ uap
->len
);
750 user_map
= current_map();
752 switch (uap
->behav
) {
754 new_behavior
= VM_BEHAVIOR_RANDOM
;
756 case MADV_SEQUENTIAL
:
757 new_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
760 new_behavior
= VM_BEHAVIOR_DEFAULT
;
763 new_behavior
= VM_BEHAVIOR_WILLNEED
;
766 new_behavior
= VM_BEHAVIOR_DONTNEED
;
772 result
= vm_behavior_set(user_map
, start
, end
, new_behavior
);
776 case KERN_INVALID_ADDRESS
:
783 struct mincore_args
{
790 mincore(p
, uap
, retval
)
792 struct mincore_args
*uap
;
795 vm_offset_t addr
, first_addr
;
800 int vecindex
, lastvecindex
;
809 * Make sure that the addresses presented are valid for user
812 first_addr
= addr
= trunc_page((vm_offset_t
) uap
->addr
);
813 end
= addr
+ (vm_size_t
)round_page(uap
->len
);
815 if (VM_MAX_ADDRESS
> 0 && end
> VM_MAX_ADDRESS
)
821 * Address of byte vector
828 * Do this on a map entry basis so that if the pages are not
829 * in the current processes address space, we can easily look
830 * up the pages elsewhere.
833 for(addr
; addr
< end
; addr
+= PAGE_SIZE
) {
835 ret
= vm_map_page_query(map
, addr
, &pqueryinfo
, &numref
);
836 if (ret
!= KERN_SUCCESS
)
839 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_PRESENT
)
840 mincoreinfo
|= MINCORE_INCORE
;
841 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_REF
)
842 mincoreinfo
|= MINCORE_REFERENCED
;
843 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_DIRTY
)
844 mincoreinfo
|= MINCORE_MODIFIED
;
848 * calculate index into user supplied byte vector
850 vecindex
= (addr
- first_addr
)>> PAGE_SHIFT
;
853 * If we have skipped map entries, we need to make sure that
854 * the byte vector is zeroed for those skipped entries.
856 while((lastvecindex
+ 1) < vecindex
) {
857 error
= subyte( vec
+ lastvecindex
, 0);
865 * Pass the page information to the user
867 error
= subyte( vec
+ vecindex
, mincoreinfo
);
871 lastvecindex
= vecindex
;
876 * Zero the last entries in the byte vector.
878 vecindex
= (end
- first_addr
) >> PAGE_SHIFT
;
879 while((lastvecindex
+ 1) < vecindex
) {
880 error
= subyte( vec
+ lastvecindex
, 0);
896 mlock(p
, uap
, retval
)
898 struct mlock_args
*uap
;
903 vm_size_t size
, pageoff
;
905 kern_return_t result
;
907 addr
= (vm_offset_t
) uap
->addr
;
910 pageoff
= (addr
& PAGE_MASK
);
913 size
= (vm_size_t
) round_page(size
);
915 /* disable wrap around */
916 if (addr
+ size
< addr
)
919 /* Hmm.. What am I going to do with this? */
920 if (atop(size
) + cnt
.v_wire_count
> vm_page_max_wired
)
922 #ifdef pmap_wired_count
923 if (size
+ ptoa(pmap_wired_count(vm_map_pmap(&p
->p_vmspace
->vm_map
))) >
924 p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
)
927 error
= suser(p
->p_ucred
, &p
->p_acflag
);
933 user_map
= current_map();
936 result
= vm_map_wire(user_map
, addr
, (vm_offset_t
)(addr
+size
), VM_PROT_NONE
, TRUE
);
937 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
940 struct munlock_args
{
945 munlock(p
, uap
, retval
)
947 struct munlock_args
*uap
;
951 vm_size_t size
, pageoff
;
954 kern_return_t result
;
956 addr
= (vm_offset_t
) uap
->addr
;
959 pageoff
= (addr
& PAGE_MASK
);
962 size
= (vm_size_t
) round_page(size
);
964 /* disable wrap around */
965 if (addr
+ size
< addr
)
969 /* Hmm.. What am I going to do with this? */
970 #ifndef pmap_wired_count
971 error
= suser(p
->p_ucred
, &p
->p_acflag
);
977 user_map
= current_map();
980 result
= vm_wire(host_priv_self(), user_map
, addr
, size
, VM_PROT_NONE
);
981 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
985 struct mlockall_args
{
992 struct mlockall_args
*uap
;
997 struct munlockall_args
{
1004 struct munlockall_args
*uap
;
1011 struct obreak_args
{
1014 obreak(p
, uap
, retval
)
1016 struct obreak_args
*uap
;
1019 /* Not implemented, obsolete */
1034 int print_map_addr
=0;
1037 /* CDY need to fix interface to allow user to map above 32 bits */
1038 kern_return_t
map_fd(
1042 boolean_t findspace
,
1046 boolean_t funnel_state
;
1048 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1050 ret
= map_fd_funneled( fd
, (vm_object_offset_t
)offset
,
1051 va
, findspace
, size
);
1053 (void) thread_funnel_set(kernel_flock
, FALSE
);
1058 kern_return_t
map_fd_funneled(
1060 vm_object_offset_t offset
,
1062 boolean_t findspace
,
1065 kern_return_t result
;
1069 vm_offset_t map_addr
=0;
1074 struct proc
*p
=(struct proc
*)current_proc();
1076 extern int print_map_addr
;
1080 * Find the inode; verify that it's a regular file.
1083 err
= fdgetf(p
, fd
, &fp
);
1087 if (fp
->f_type
!= DTYPE_VNODE
)
1088 return(KERN_INVALID_ARGUMENT
);
1090 if (!(fp
->f_flag
& FREAD
))
1091 return (KERN_PROTECTION_FAILURE
);
1093 vp
= (struct vnode
*)fp
->f_data
;
1095 if (vp
->v_type
!= VREG
)
1096 return (KERN_INVALID_ARGUMENT
);
1098 if (offset
& PAGE_MASK_64
) {
1099 printf("map_fd: file offset not page aligned(%d : %s)\n",p
->p_pid
, p
->p_comm
);
1100 return (KERN_INVALID_ARGUMENT
);
1102 map_size
= round_page(size
);
1105 * Allow user to map in a zero length file.
1108 return (KERN_SUCCESS
);
1112 UBCINFOCHECK("map_fd_funneled", vp
);
1113 pager
= (void *) ubc_getpager(vp
);
1115 return (KERN_FAILURE
);
1118 my_map
= current_map();
1122 &map_addr
, map_size
, (vm_offset_t
)0, TRUE
,
1123 pager
, offset
, TRUE
,
1124 VM_PROT_DEFAULT
, VM_PROT_ALL
,
1125 VM_INHERIT_DEFAULT
);
1126 if (result
!= KERN_SUCCESS
)
1131 vm_offset_t dst_addr
;
1134 if (copyin(va
, &dst_addr
, sizeof (dst_addr
)) ||
1135 trunc_page(dst_addr
) != dst_addr
) {
1136 (void) vm_map_remove(
1138 map_addr
, map_addr
+ map_size
,
1140 return (KERN_INVALID_ADDRESS
);
1143 result
= vm_map_copyin(
1145 map_addr
, map_size
, TRUE
,
1147 if (result
!= KERN_SUCCESS
) {
1149 (void) vm_map_remove(
1151 map_addr
, map_addr
+ map_size
,
1156 result
= vm_map_copy_overwrite(
1158 dst_addr
, tmp
, FALSE
);
1159 if (result
!= KERN_SUCCESS
) {
1160 vm_map_copy_discard(tmp
);
1164 if (copyout(&map_addr
, va
, sizeof (map_addr
))) {
1165 (void) vm_map_remove(
1167 map_addr
, map_addr
+ map_size
,
1169 return (KERN_INVALID_ADDRESS
);
1173 ubc_setcred(vp
, current_proc());
1176 return (KERN_SUCCESS
);