2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1988 University of Utah.
27 * Copyright (c) 1991, 1993
28 * The Regents of the University of California. All rights reserved.
30 * This code is derived from software contributed to Berkeley by
31 * the Systems Programming Group of the University of Utah Computer
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by the University of
45 * California, Berkeley and its contributors.
46 * 4. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
64 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
68 * Mapped file (mmap) interface to VM
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/filedesc.h>
75 #include <sys/resourcevar.h>
77 #include <sys/vnode.h>
81 #include <sys/vadvise.h>
82 #include <sys/trace.h>
88 #include <mach/mach_types.h>
90 #include <kern/cpu_number.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_pager.h>
96 #include <mach/vm_sync.h>
97 #include <mach/vm_behavior.h>
98 #include <mach/vm_inherit.h>
99 #include <mach/vm_statistics.h>
109 struct sbrk_args
*uap
;
112 /* Not yet implemented */
124 struct sstk_args
*uap
;
127 /* Not yet implemented */
134 ogetpagesize(p
, uap
, retval
)
143 #endif /* COMPAT_43 */
155 osmmap(curp
, uap
, retval
)
157 register struct osmmap_args
*uap
;
166 #ifdef DOUBLE_ALIGN_PARAMS
172 if ((uap
->share
== MAP_SHARED
)|| (uap
->share
== MAP_PRIVATE
)) {
173 newargs
.addr
= uap
->addr
;
174 newargs
.len
= (size_t)uap
->len
;
175 newargs
.prot
= uap
->prot
;
176 newargs
.flags
= uap
->share
;
177 newargs
.fd
= uap
->fd
;
178 newargs
.pos
= (off_t
)uap
->pos
;
179 return(mmap(curp
,&newargs
, retval
));
190 #ifdef DOUBLE_ALIGN_PARAMS
198 struct mmap_args
*uap
;
202 * Map in special device (must be SHARED) or file
205 register struct vnode
*vp
;
210 kern_return_t result
;
211 vm_offset_t user_addr
;
214 vm_object_offset_t file_pos
;
215 boolean_t find_space
, docow
;
221 user_addr
= (vm_offset_t
)uap
->addr
;
222 user_size
= (vm_size_t
) uap
->len
;
223 prot
= (uap
->prot
& VM_PROT_ALL
);
227 * The vm code does not have prototypes & compiler doesn't do the'
228 * the right thing when you cast 64bit value and pass it in function
229 * call. So here it is.
231 file_pos
= (vm_object_offset_t
)uap
->pos
;
234 /* make sure mapping fits into numeric range etc */
235 if ((file_pos
+ user_size
> (vm_object_offset_t
)-PAGE_SIZE_64
) ||
236 ((ssize_t
) uap
->len
< 0 )||
237 ((flags
& MAP_ANON
) && uap
->fd
!= -1))
241 * Align the file position to a page boundary,
242 * and save its page offset component.
244 pageoff
= ((vm_offset_t
)file_pos
& PAGE_MASK
);
245 file_pos
-= (vm_object_offset_t
)pageoff
;
248 /* Adjust size for rounding (on both ends). */
249 user_size
+= pageoff
; /* low end... */
250 user_size
= (vm_size_t
) round_page_32(user_size
); /* hi end */
254 * Check for illegal addresses. Watch out for address wrap... Note
255 * that VM_*_ADDRESS are not constants due to casts (argh).
257 if (flags
& MAP_FIXED
) {
259 * The specified address must have the same remainder
260 * as the file offset taken modulo PAGE_SIZE, so it
261 * should be aligned after adjustment by pageoff.
263 user_addr
-= pageoff
;
264 if (user_addr
& PAGE_MASK
)
266 /* Address range must be all in user VM space. */
267 if (VM_MAX_ADDRESS
> 0 && (user_addr
+ user_size
> VM_MAX_ADDRESS
))
269 if (VM_MIN_ADDRESS
> 0 && user_addr
< VM_MIN_ADDRESS
)
271 if (user_addr
+ user_size
< user_addr
)
275 /* DO not have apis to get this info, need to wait till then*/
277 * XXX for non-fixed mappings where no hint is provided or
278 * the hint would fall in the potential heap space,
279 * place it after the end of the largest possible heap.
281 * There should really be a pmap call to determine a reasonable
284 else if (addr
< round_page_32(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
))
285 addr
= round_page_32(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
);
290 if (flags
& MAP_ANON
) {
292 * Mapping blank space is trivial.
295 maxprot
= VM_PROT_ALL
;
300 * Mapping file, get fp for validation. Obtain vnode and make
301 * sure it is of appropriate type.
303 err
= fdgetf(p
, uap
->fd
, &fp
);
306 if(fp
->f_type
== DTYPE_PSXSHM
) {
307 uap
->addr
= (caddr_t
)user_addr
;
308 uap
->len
= user_size
;
312 return(pshm_mmap(p
, uap
, retval
, fp
, pageoff
));
315 if (fp
->f_type
!= DTYPE_VNODE
)
317 vp
= (struct vnode
*)fp
->f_data
;
319 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
)
322 * XXX hack to handle use of /dev/zero to map anon memory (ala
325 if (vp
->v_type
== VCHR
|| vp
->v_type
== VSTR
) {
329 * Ensure that file and memory protections are
330 * compatible. Note that we only worry about
331 * writability if mapping is shared; in this case,
332 * current and max prot are dictated by the open file.
333 * XXX use the vnode instead? Problem is: what
334 * credentials do we use for determination? What if
335 * proc does a setuid?
337 maxprot
= VM_PROT_EXECUTE
; /* ??? */
338 if (fp
->f_flag
& FREAD
)
339 maxprot
|= VM_PROT_READ
;
340 else if (prot
& PROT_READ
)
343 * If we are sharing potential changes (either via
344 * MAP_SHARED or via the implicit sharing of character
345 * device mappings), and we are trying to get write
346 * permission although we opened it without asking
350 if ((flags
& MAP_SHARED
) != 0) {
351 if ((fp
->f_flag
& FWRITE
) != 0) {
358 (IMMUTABLE
|APPEND
)) == 0)
359 maxprot
|= VM_PROT_WRITE
;
360 else if (prot
& PROT_WRITE
)
362 } else if ((prot
& PROT_WRITE
) != 0)
365 maxprot
|= VM_PROT_WRITE
;
375 * We bend a little - round the start and end addresses
376 * to the nearest page boundary.
378 user_size
= round_page_32(user_size
);
380 if (file_pos
& PAGE_MASK_64
)
383 user_map
= current_map();
385 if ((flags
& MAP_FIXED
) == 0) {
387 user_addr
= round_page_32(user_addr
);
389 if (user_addr
!= trunc_page_32(user_addr
))
392 (void) vm_deallocate(user_map
, user_addr
, user_size
);
397 * Lookup/allocate object.
399 if (flags
& MAP_ANON
) {
401 * Unnamed anonymous regions always start at 0.
407 if (handle
== NULL
) {
411 #if defined(VM_PROT_READ_IS_EXEC)
412 if (prot
& VM_PROT_READ
)
413 prot
|= VM_PROT_EXECUTE
;
415 if (maxprot
& VM_PROT_READ
)
416 maxprot
|= VM_PROT_EXECUTE
;
419 result
= vm_allocate(user_map
, &user_addr
, user_size
, find_space
);
420 if (result
!= KERN_SUCCESS
)
423 result
= vm_protect(user_map
, user_addr
, user_size
, TRUE
, maxprot
);
424 if (result
!= KERN_SUCCESS
)
426 result
= vm_protect(user_map
, user_addr
, user_size
, FALSE
, prot
);
427 if (result
!= KERN_SUCCESS
)
431 UBCINFOCHECK("mmap", vp
);
432 pager
= (vm_pager_t
)ubc_getpager(vp
);
439 * FIXME: if we're writing the file we need a way to
440 * ensure that someone doesn't replace our R/W creds
441 * with ones that only work for read.
446 if ((flags
& (MAP_ANON
|MAP_SHARED
)) == 0) {
452 #if defined(VM_PROT_READ_IS_EXEC)
453 if (prot
& VM_PROT_READ
)
454 prot
|= VM_PROT_EXECUTE
;
456 if (maxprot
& VM_PROT_READ
)
457 maxprot
|= VM_PROT_EXECUTE
;
461 result
= vm_map_64(user_map
, &user_addr
, user_size
,
462 0, find_space
, pager
, file_pos
, docow
,
466 if (result
!= KERN_SUCCESS
)
472 if (flags
& MAP_SHARED
) {
473 result
= vm_inherit(user_map
, user_addr
, user_size
,
475 if (result
!= KERN_SUCCESS
) {
476 (void) vm_deallocate(user_map
, user_addr
, user_size
);
485 *fdflags(p
, uap
->fd
) |= UF_MAPPED
;
486 *retval
= (register_t
)(user_addr
+ pageoff
);
488 case KERN_INVALID_ADDRESS
:
491 case KERN_PROTECTION_FAILURE
:
505 msync(p
, uap
, retval
)
507 struct msync_args
*uap
;
511 vm_size_t size
, pageoff
;
515 vm_sync_t sync_flags
=0;
517 addr
= (vm_offset_t
) uap
->addr
;
518 pageoff
= (addr
& PAGE_MASK
);
521 size
= (vm_size_t
) round_page_32(size
);
524 if (addr
+ size
< addr
)
527 user_map
= current_map();
529 if ((flags
& (MS_ASYNC
|MS_SYNC
)) == (MS_ASYNC
|MS_SYNC
))
532 if ((flags
& (MS_ASYNC
|MS_INVALIDATE
)) == (MS_ASYNC
|MS_INVALIDATE
))
537 * We cannot support this properly without maintaining
538 * list all mmaps done. Cannot use vm_map_entry as they could be
539 * split or coalesced by indepenedant actions. So instead of
540 * inaccurate results, lets just return error as invalid size
543 return (EINVAL
); /* XXX breaks posix apps */
546 if (flags
& MS_KILLPAGES
)
547 sync_flags
|= VM_SYNC_KILLPAGES
;
548 if (flags
& MS_DEACTIVATE
)
549 sync_flags
|= VM_SYNC_DEACTIVATE
;
550 if (flags
& MS_INVALIDATE
)
551 sync_flags
|= VM_SYNC_INVALIDATE
;
553 if ( !(flags
& (MS_KILLPAGES
| MS_DEACTIVATE
))) {
554 if (flags
& MS_ASYNC
)
555 sync_flags
|= VM_SYNC_ASYNCHRONOUS
;
557 sync_flags
|= VM_SYNC_SYNCHRONOUS
;
559 rv
= vm_msync(user_map
, addr
, size
, sync_flags
);
564 case KERN_INVALID_ADDRESS
:
565 return (EINVAL
); /* Sun returns ENOMEM? */
579 /* Not yet implemented */
588 munmap(p
, uap
, retval
)
590 struct munmap_args
*uap
;
594 vm_offset_t user_addr
;
595 vm_size_t user_size
, pageoff
;
596 kern_return_t result
;
598 user_addr
= (vm_offset_t
) uap
->addr
;
599 user_size
= (vm_size_t
) uap
->len
;
601 pageoff
= (user_addr
& PAGE_MASK
);
603 user_addr
-= pageoff
;
604 user_size
+= pageoff
;
605 user_size
= round_page_32(user_size
);
606 if (user_addr
+ user_size
< user_addr
)
612 /* Address range must be all in user VM space. */
613 if (VM_MAX_ADDRESS
> 0 && (user_addr
+ user_size
> VM_MAX_ADDRESS
))
615 if (VM_MIN_ADDRESS
> 0 && user_addr
< VM_MIN_ADDRESS
)
619 result
= vm_deallocate(current_map(), user_addr
, user_size
);
620 if (result
!= KERN_SUCCESS
) {
632 * XXX should vm_deallocate any regions mapped to this file
634 *fdflags(p
, fd
) &= ~UF_MAPPED
;
637 struct mprotect_args
{
643 mprotect(p
, uap
, retval
)
645 struct mprotect_args
*uap
;
648 register vm_prot_t prot
;
649 vm_offset_t user_addr
;
650 vm_size_t user_size
, pageoff
;
651 kern_return_t result
;
654 user_addr
= (vm_offset_t
) uap
->addr
;
655 user_size
= (vm_size_t
) uap
->len
;
656 prot
= (vm_prot_t
)(uap
->prot
& VM_PROT_ALL
);
660 #if defined(VM_PROT_READ_IS_EXEC)
661 if (prot
& VM_PROT_READ
)
662 prot
|= VM_PROT_EXECUTE
;
666 pageoff
= (user_addr
& PAGE_MASK
);
667 user_addr
-= pageoff
;
668 user_size
+= pageoff
;
669 user_size
= round_page_32(user_size
);
670 if (user_addr
+ user_size
< user_addr
)
673 user_map
= current_map();
675 result
= vm_map_protect(user_map
, user_addr
, user_addr
+user_size
, prot
,
680 case KERN_PROTECTION_FAILURE
:
687 struct minherit_args
{
694 minherit(p
, uap
, retval
)
696 struct minherit_args
*uap
;
700 vm_size_t size
, pageoff
;
701 register vm_inherit_t inherit
;
703 kern_return_t result
;
705 addr
= (vm_offset_t
)uap
->addr
;
707 inherit
= uap
->inherit
;
709 pageoff
= (addr
& PAGE_MASK
);
712 size
= (vm_size_t
) round_page_32(size
);
713 if (addr
+ size
< addr
)
716 user_map
= current_map();
717 result
= vm_inherit(user_map
, addr
, size
,
722 case KERN_PROTECTION_FAILURE
:
728 struct madvise_args
{
735 madvise(p
, uap
, retval
)
737 struct madvise_args
*uap
;
741 vm_offset_t start
, end
;
742 vm_behavior_t new_behavior
;
743 kern_return_t result
;
746 * Check for illegal addresses. Watch out for address wrap... Note
747 * that VM_*_ADDRESS are not constants due to casts (argh).
749 if (VM_MAX_ADDRESS
> 0 &&
750 ((vm_offset_t
) uap
->addr
+ uap
->len
) > VM_MAX_ADDRESS
)
752 if (VM_MIN_ADDRESS
> 0 && uap
->addr
< VM_MIN_ADDRESS
)
755 if (((vm_offset_t
) uap
->addr
+ uap
->len
) < (vm_offset_t
) uap
->addr
)
759 * Since this routine is only advisory, we default to conservative
762 start
= trunc_page_32((vm_offset_t
) uap
->addr
);
763 end
= round_page_32((vm_offset_t
) uap
->addr
+ uap
->len
);
765 user_map
= current_map();
767 switch (uap
->behav
) {
769 new_behavior
= VM_BEHAVIOR_RANDOM
;
771 case MADV_SEQUENTIAL
:
772 new_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
775 new_behavior
= VM_BEHAVIOR_DEFAULT
;
778 new_behavior
= VM_BEHAVIOR_WILLNEED
;
781 new_behavior
= VM_BEHAVIOR_DONTNEED
;
787 result
= vm_behavior_set(user_map
, start
, end
, new_behavior
);
791 case KERN_INVALID_ADDRESS
:
798 struct mincore_args
{
805 mincore(p
, uap
, retval
)
807 struct mincore_args
*uap
;
810 vm_offset_t addr
, first_addr
;
815 int vecindex
, lastvecindex
;
824 * Make sure that the addresses presented are valid for user
827 first_addr
= addr
= trunc_page_32((vm_offset_t
) uap
->addr
);
828 end
= addr
+ (vm_size_t
)round_page_32(uap
->len
);
830 if (VM_MAX_ADDRESS
> 0 && end
> VM_MAX_ADDRESS
)
836 * Address of byte vector
843 * Do this on a map entry basis so that if the pages are not
844 * in the current processes address space, we can easily look
845 * up the pages elsewhere.
848 for(addr
; addr
< end
; addr
+= PAGE_SIZE
) {
850 ret
= vm_map_page_query(map
, addr
, &pqueryinfo
, &numref
);
851 if (ret
!= KERN_SUCCESS
)
854 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_PRESENT
)
855 mincoreinfo
|= MINCORE_INCORE
;
856 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_REF
)
857 mincoreinfo
|= MINCORE_REFERENCED
;
858 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_DIRTY
)
859 mincoreinfo
|= MINCORE_MODIFIED
;
863 * calculate index into user supplied byte vector
865 vecindex
= (addr
- first_addr
)>> PAGE_SHIFT
;
868 * If we have skipped map entries, we need to make sure that
869 * the byte vector is zeroed for those skipped entries.
871 while((lastvecindex
+ 1) < vecindex
) {
872 error
= subyte( vec
+ lastvecindex
, 0);
880 * Pass the page information to the user
882 error
= subyte( vec
+ vecindex
, mincoreinfo
);
886 lastvecindex
= vecindex
;
891 * Zero the last entries in the byte vector.
893 vecindex
= (end
- first_addr
) >> PAGE_SHIFT
;
894 while((lastvecindex
+ 1) < vecindex
) {
895 error
= subyte( vec
+ lastvecindex
, 0);
911 mlock(p
, uap
, retval
)
913 struct mlock_args
*uap
;
918 vm_size_t size
, pageoff
;
920 kern_return_t result
;
922 addr
= (vm_offset_t
) uap
->addr
;
925 pageoff
= (addr
& PAGE_MASK
);
928 size
= (vm_size_t
) round_page_32(size
);
930 /* disable wrap around */
931 if (addr
+ size
< addr
)
934 /* Hmm.. What am I going to do with this? */
935 if (atop(size
) + cnt
.v_wire_count
> vm_page_max_wired
)
937 #ifdef pmap_wired_count
938 if (size
+ ptoa(pmap_wired_count(vm_map_pmap(&p
->p_vmspace
->vm_map
))) >
939 p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
)
942 error
= suser(p
->p_ucred
, &p
->p_acflag
);
948 user_map
= current_map();
951 result
= vm_map_wire(user_map
, addr
, (vm_offset_t
)(addr
+size
), VM_PROT_NONE
, TRUE
);
952 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
955 struct munlock_args
{
960 munlock(p
, uap
, retval
)
962 struct munlock_args
*uap
;
966 vm_size_t size
, pageoff
;
969 kern_return_t result
;
971 addr
= (vm_offset_t
) uap
->addr
;
974 pageoff
= (addr
& PAGE_MASK
);
977 size
= (vm_size_t
) round_page_32(size
);
979 /* disable wrap around */
980 if (addr
+ size
< addr
)
984 /* Hmm.. What am I going to do with this? */
985 #ifndef pmap_wired_count
986 error
= suser(p
->p_ucred
, &p
->p_acflag
);
992 user_map
= current_map();
995 result
= vm_wire(host_priv_self(), user_map
, addr
, size
, VM_PROT_NONE
);
996 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
1000 struct mlockall_args
{
1007 struct mlockall_args
*uap
;
1012 struct munlockall_args
{
1019 struct munlockall_args
*uap
;
1026 struct obreak_args
{
1030 obreak(p
, uap
, retval
)
1032 struct obreak_args
*uap
;
1035 /* Not implemented, obsolete */
1051 /* CDY need to fix interface to allow user to map above 32 bits */
1052 /* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
1058 boolean_t findspace
,
1062 boolean_t funnel_state
;
1064 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1066 ret
= map_fd_funneled( fd
, (vm_object_offset_t
)offset
,
1067 va
, findspace
, size
);
1069 (void) thread_funnel_set(kernel_flock
, FALSE
);
1077 vm_object_offset_t offset
,
1079 boolean_t findspace
,
1082 kern_return_t result
;
1086 vm_offset_t map_addr
=0;
1091 struct proc
*p
=(struct proc
*)current_proc();
1094 * Find the inode; verify that it's a regular file.
1097 err
= fdgetf(p
, fd
, &fp
);
1101 if (fp
->f_type
!= DTYPE_VNODE
)
1102 return(KERN_INVALID_ARGUMENT
);
1104 if (!(fp
->f_flag
& FREAD
))
1105 return (KERN_PROTECTION_FAILURE
);
1107 vp
= (struct vnode
*)fp
->f_data
;
1109 if (vp
->v_type
!= VREG
)
1110 return (KERN_INVALID_ARGUMENT
);
1112 if (offset
& PAGE_MASK_64
) {
1113 printf("map_fd: file offset not page aligned(%d : %s)\n",p
->p_pid
, p
->p_comm
);
1114 return (KERN_INVALID_ARGUMENT
);
1116 map_size
= round_page_32(size
);
1119 * Allow user to map in a zero length file.
1122 return (KERN_SUCCESS
);
1126 UBCINFOCHECK("map_fd_funneled", vp
);
1127 pager
= (void *) ubc_getpager(vp
);
1129 return (KERN_FAILURE
);
1132 my_map
= current_map();
1136 &map_addr
, map_size
, (vm_offset_t
)0, TRUE
,
1137 pager
, offset
, TRUE
,
1138 VM_PROT_DEFAULT
, VM_PROT_ALL
,
1139 VM_INHERIT_DEFAULT
);
1140 if (result
!= KERN_SUCCESS
)
1145 vm_offset_t dst_addr
;
1148 if (copyin(va
, &dst_addr
, sizeof (dst_addr
)) ||
1149 trunc_page_32(dst_addr
) != dst_addr
) {
1150 (void) vm_map_remove(
1152 map_addr
, map_addr
+ map_size
,
1154 return (KERN_INVALID_ADDRESS
);
1157 result
= vm_map_copyin(
1159 map_addr
, map_size
, TRUE
,
1161 if (result
!= KERN_SUCCESS
) {
1163 (void) vm_map_remove(
1165 map_addr
, map_addr
+ map_size
,
1170 result
= vm_map_copy_overwrite(
1172 dst_addr
, tmp
, FALSE
);
1173 if (result
!= KERN_SUCCESS
) {
1174 vm_map_copy_discard(tmp
);
1178 if (copyout(&map_addr
, va
, sizeof (map_addr
))) {
1179 (void) vm_map_remove(
1181 map_addr
, map_addr
+ map_size
,
1183 return (KERN_INVALID_ADDRESS
);
1187 ubc_setcred(vp
, current_proc());
1190 return (KERN_SUCCESS
);