2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1988 University of Utah.
30 * Copyright (c) 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * the Systems Programming Group of the University of Utah Computer
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
67 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
71 * Mapped file (mmap) interface to VM
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/filedesc.h>
77 #include <sys/proc_internal.h>
78 #include <sys/kauth.h>
79 #include <sys/resourcevar.h>
80 #include <sys/vnode_internal.h>
83 #include <sys/file_internal.h>
84 #include <sys/vadvise.h>
85 #include <sys/trace.h>
90 #include <sys/sysproto.h>
92 #include <bsm/audit_kernel.h>
93 #include <bsm/audit_kevents.h>
95 #include <mach/mach_types.h>
96 #include <mach/mach_traps.h>
97 #include <mach/vm_sync.h>
98 #include <mach/vm_behavior.h>
99 #include <mach/vm_inherit.h>
100 #include <mach/vm_statistics.h>
101 #include <mach/mach_vm.h>
102 #include <mach/vm_map.h>
103 #include <mach/host_priv.h>
105 #include <kern/cpu_number.h>
106 #include <kern/host.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_kern.h>
110 #include <vm/vm_pager.h>
113 sbrk(__unused
struct proc
*p
, __unused
struct sbrk_args
*uap
, __unused register_t
*retval
)
115 /* Not yet implemented */
120 sstk(__unused
struct proc
*p
, __unused
struct sstk_args
*uap
, __unused register_t
*retval
)
122 /* Not yet implemented */
139 register struct osmmap_args
*uap
,
142 struct mmap_args newargs
;
146 if ((uap
->share
== MAP_SHARED
)|| (uap
->share
== MAP_PRIVATE
)) {
147 newargs
.addr
= CAST_USER_ADDR_T(uap
->addr
);
148 newargs
.len
= CAST_USER_ADDR_T(uap
->len
);
149 newargs
.prot
= uap
->prot
;
150 newargs
.flags
= uap
->share
;
151 newargs
.fd
= uap
->fd
;
152 newargs
.pos
= (off_t
)uap
->pos
;
153 ret
= mmap(curp
, &newargs
, &addr
);
155 *retval
= CAST_DOWN(register_t
, addr
);
163 mmap(struct proc
*p
, struct mmap_args
*uap
, user_addr_t
*retval
)
166 * Map in special device (must be SHARED) or file
169 register struct vnode
*vp
;
174 kern_return_t result
;
175 mach_vm_offset_t user_addr
;
176 mach_vm_size_t user_size
;
177 vm_object_offset_t pageoff
;
178 vm_object_offset_t file_pos
;
189 user_addr
= (mach_vm_offset_t
)uap
->addr
;
190 user_size
= (mach_vm_size_t
) uap
->len
;
192 AUDIT_ARG(addr
, user_addr
);
193 AUDIT_ARG(len
, user_size
);
194 AUDIT_ARG(fd
, uap
->fd
);
196 prot
= (uap
->prot
& VM_PROT_ALL
);
201 * The vm code does not have prototypes & compiler doesn't do the'
202 * the right thing when you cast 64bit value and pass it in function
203 * call. So here it is.
205 file_pos
= (vm_object_offset_t
)uap
->pos
;
208 /* make sure mapping fits into numeric range etc */
209 if ((file_pos
+ user_size
> (vm_object_offset_t
)-PAGE_SIZE_64
) ||
210 ((flags
& MAP_ANON
) && fd
!= -1))
214 * Align the file position to a page boundary,
215 * and save its page offset component.
217 pageoff
= (file_pos
& PAGE_MASK
);
218 file_pos
-= (vm_object_offset_t
)pageoff
;
221 /* Adjust size for rounding (on both ends). */
222 user_size
+= pageoff
; /* low end... */
223 user_size
= mach_vm_round_page(user_size
); /* hi end */
227 * Check for illegal addresses. Watch out for address wrap... Note
228 * that VM_*_ADDRESS are not constants due to casts (argh).
230 if (flags
& MAP_FIXED
) {
232 * The specified address must have the same remainder
233 * as the file offset taken modulo PAGE_SIZE, so it
234 * should be aligned after adjustment by pageoff.
236 user_addr
-= pageoff
;
237 if (user_addr
& PAGE_MASK
)
241 /* DO not have apis to get this info, need to wait till then*/
243 * XXX for non-fixed mappings where no hint is provided or
244 * the hint would fall in the potential heap space,
245 * place it after the end of the largest possible heap.
247 * There should really be a pmap call to determine a reasonable
250 else if (addr
< mach_vm_round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
))
251 addr
= mach_vm_round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
);
256 if (flags
& MAP_ANON
) {
258 * Mapping blank space is trivial.
261 maxprot
= VM_PROT_ALL
;
265 struct vnode_attr va
;
266 struct vfs_context context
;
268 * Mapping file, get fp for validation. Obtain vnode and make
269 * sure it is of appropriate type.
271 err
= fp_lookup(p
, fd
, &fp
, 0);
275 if(fp
->f_fglob
->fg_type
== DTYPE_PSXSHM
) {
276 uap
->addr
= (user_addr_t
)user_addr
;
277 uap
->len
= (user_size_t
)user_size
;
281 error
= pshm_mmap(p
, uap
, retval
, fp
, (off_t
)pageoff
);
285 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
289 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
290 error
= vnode_getwithref(vp
);
294 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
) {
300 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
302 /* conformance change - mmap needs to update access time for mapped
306 nanotime(&va
.va_access_time
);
307 VATTR_SET_ACTIVE(&va
, va_access_time
);
309 context
.vc_ucred
= kauth_cred_get();
310 vnode_setattr(vp
, &va
, &context
);
313 * XXX hack to handle use of /dev/zero to map anon memory (ala
316 if (vp
->v_type
== VCHR
|| vp
->v_type
== VSTR
) {
322 * Ensure that file and memory protections are
323 * compatible. Note that we only worry about
324 * writability if mapping is shared; in this case,
325 * current and max prot are dictated by the open file.
326 * XXX use the vnode instead? Problem is: what
327 * credentials do we use for determination? What if
328 * proc does a setuid?
330 maxprot
= VM_PROT_EXECUTE
; /* ??? */
331 if (fp
->f_fglob
->fg_flag
& FREAD
)
332 maxprot
|= VM_PROT_READ
;
333 else if (prot
& PROT_READ
) {
339 * If we are sharing potential changes (either via
340 * MAP_SHARED or via the implicit sharing of character
341 * device mappings), and we are trying to get write
342 * permission although we opened it without asking
346 if ((flags
& MAP_SHARED
) != 0) {
347 if ((fp
->f_fglob
->fg_flag
& FWRITE
) != 0) {
349 * check for write access
351 * Note that we already made this check when granting FWRITE
352 * against the file, so it seems redundant here.
354 error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_CHECKIMMUTABLE
, &context
);
356 /* if not granted for any reason, but we wanted it, bad */
357 if ((prot
& PROT_WRITE
) && (error
!= 0)) {
362 /* if writable, remember */
364 maxprot
|= VM_PROT_WRITE
;
366 } else if ((prot
& PROT_WRITE
) != 0) {
372 maxprot
|= VM_PROT_WRITE
;
378 if (user_size
== 0) {
386 * We bend a little - round the start and end addresses
387 * to the nearest page boundary.
389 user_size
= mach_vm_round_page(user_size
);
391 if (file_pos
& PAGE_MASK_64
) {
398 user_map
= current_map();
400 if ((flags
& MAP_FIXED
) == 0) {
401 alloc_flags
= VM_FLAGS_ANYWHERE
;
402 user_addr
= mach_vm_round_page(user_addr
);
404 if (user_addr
!= mach_vm_trunc_page(user_addr
)) {
411 * mmap(MAP_FIXED) will replace any existing mappings in the
412 * specified range, if the new mapping is successful.
413 * If we just deallocate the specified address range here,
414 * another thread might jump in and allocate memory in that
415 * range before we get a chance to establish the new mapping,
416 * and we won't have a chance to restore the old mappings.
417 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
418 * has to deallocate the existing mappings and establish the
419 * new ones atomically.
421 alloc_flags
= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
426 * Lookup/allocate object.
428 if (handle
== NULL
) {
432 #if defined(VM_PROT_READ_IS_EXEC)
433 if (prot
& VM_PROT_READ
)
434 prot
|= VM_PROT_EXECUTE
;
436 if (maxprot
& VM_PROT_READ
)
437 maxprot
|= VM_PROT_EXECUTE
;
440 result
= mach_vm_map(user_map
, &user_addr
, user_size
, 0,
441 alloc_flags
, IPC_PORT_NULL
, 0,
442 FALSE
, prot
, maxprot
,
443 (flags
& MAP_SHARED
) ? VM_INHERIT_SHARE
:
445 if (result
!= KERN_SUCCESS
)
448 UBCINFOCHECK("mmap", vp
);
449 pager
= (vm_pager_t
)ubc_getpager(vp
);
459 * FIXME: if we're writing the file we need a way to
460 * ensure that someone doesn't replace our R/W creds
461 * with ones that only work for read.
464 ubc_setthreadcred(vp
, p
, current_thread());
466 if ((flags
& (MAP_ANON
|MAP_SHARED
)) == 0) {
472 #if defined(VM_PROT_READ_IS_EXEC)
473 if (prot
& VM_PROT_READ
)
474 prot
|= VM_PROT_EXECUTE
;
476 if (maxprot
& VM_PROT_READ
)
477 maxprot
|= VM_PROT_EXECUTE
;
481 result
= mach_vm_map(user_map
, &user_addr
, user_size
,
482 0, alloc_flags
, (ipc_port_t
)pager
, file_pos
,
483 docow
, prot
, maxprot
,
484 (flags
& MAP_SHARED
) ? VM_INHERIT_SHARE
:
487 if (result
!= KERN_SUCCESS
) {
492 (void)ubc_map(vp
,(prot
& ( PROT_EXEC
| PROT_READ
| PROT_WRITE
| PROT_EXEC
)));
501 *retval
= user_addr
+ pageoff
;
504 case KERN_INVALID_ADDRESS
:
508 case KERN_PROTECTION_FAILURE
:
517 fp_drop(p
, fd
, fp
, 0);
522 msync(__unused
struct proc
*p
, struct msync_args
*uap
, __unused register_t
*retval
)
524 mach_vm_offset_t addr
;
529 vm_sync_t sync_flags
=0;
531 addr
= (mach_vm_offset_t
) uap
->addr
;
532 size
= (mach_vm_size_t
)uap
->len
;
534 if (addr
& PAGE_MASK_64
) {
535 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
540 * We cannot support this properly without maintaining
541 * list all mmaps done. Cannot use vm_map_entry as they could be
542 * split or coalesced by indepenedant actions. So instead of
543 * inaccurate results, lets just return error as invalid size
546 return (EINVAL
); /* XXX breaks posix apps */
550 /* disallow contradictory flags */
551 if ((flags
& (MS_SYNC
|MS_ASYNC
)) == (MS_SYNC
|MS_ASYNC
) ||
552 (flags
& (MS_ASYNC
|MS_INVALIDATE
)) == (MS_ASYNC
|MS_INVALIDATE
))
555 if (flags
& MS_KILLPAGES
)
556 sync_flags
|= VM_SYNC_KILLPAGES
;
557 if (flags
& MS_DEACTIVATE
)
558 sync_flags
|= VM_SYNC_DEACTIVATE
;
559 if (flags
& MS_INVALIDATE
)
560 sync_flags
|= VM_SYNC_INVALIDATE
;
562 if ( !(flags
& (MS_KILLPAGES
| MS_DEACTIVATE
))) {
563 if (flags
& MS_ASYNC
)
564 sync_flags
|= VM_SYNC_ASYNCHRONOUS
;
566 sync_flags
|= VM_SYNC_SYNCHRONOUS
;
569 sync_flags
|= VM_SYNC_CONTIGUOUS
; /* complain if holes */
571 user_map
= current_map();
572 rv
= mach_vm_msync(user_map
, addr
, size
, sync_flags
);
577 case KERN_INVALID_ADDRESS
: /* hole in region being sync'ed */
591 /* Not yet implemented */
596 munmap(__unused
struct proc
*p
, struct munmap_args
*uap
, __unused register_t
*retval
)
598 mach_vm_offset_t user_addr
;
599 mach_vm_size_t user_size
;
600 kern_return_t result
;
602 user_addr
= (mach_vm_offset_t
) uap
->addr
;
603 user_size
= (mach_vm_size_t
) uap
->len
;
605 AUDIT_ARG(addr
, user_addr
);
606 AUDIT_ARG(len
, user_size
);
608 if (user_addr
& PAGE_MASK_64
) {
609 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
613 if (user_addr
+ user_size
< user_addr
)
616 if (user_size
== 0) {
617 /* UNIX SPEC: size is 0, return EINVAL */
621 result
= mach_vm_deallocate(current_map(), user_addr
, user_size
);
622 if (result
!= KERN_SUCCESS
) {
629 mprotect(__unused
struct proc
*p
, struct mprotect_args
*uap
, __unused register_t
*retval
)
631 register vm_prot_t prot
;
632 mach_vm_offset_t user_addr
;
633 mach_vm_size_t user_size
;
634 kern_return_t result
;
637 AUDIT_ARG(addr
, uap
->addr
);
638 AUDIT_ARG(len
, uap
->len
);
639 AUDIT_ARG(value
, uap
->prot
);
641 user_addr
= (mach_vm_offset_t
) uap
->addr
;
642 user_size
= (mach_vm_size_t
) uap
->len
;
643 prot
= (vm_prot_t
)(uap
->prot
& VM_PROT_ALL
);
645 if (user_addr
& PAGE_MASK_64
) {
646 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
652 #if defined(VM_PROT_READ_IS_EXEC)
653 if (prot
& VM_PROT_READ
)
654 prot
|= VM_PROT_EXECUTE
;
658 user_map
= current_map();
660 result
= mach_vm_protect(user_map
, user_addr
, user_size
,
665 case KERN_PROTECTION_FAILURE
:
667 case KERN_INVALID_ADDRESS
:
668 /* UNIX SPEC: for an invalid address range, return ENOMEM */
676 minherit(__unused
struct proc
*p
, struct minherit_args
*uap
, __unused register_t
*retval
)
678 mach_vm_offset_t addr
;
680 register vm_inherit_t inherit
;
682 kern_return_t result
;
684 AUDIT_ARG(addr
, uap
->addr
);
685 AUDIT_ARG(len
, uap
->len
);
686 AUDIT_ARG(value
, uap
->inherit
);
688 addr
= (mach_vm_offset_t
)uap
->addr
;
689 size
= (mach_vm_size_t
)uap
->len
;
690 inherit
= uap
->inherit
;
692 user_map
= current_map();
693 result
= mach_vm_inherit(user_map
, addr
, size
,
698 case KERN_PROTECTION_FAILURE
:
705 madvise(__unused
struct proc
*p
, struct madvise_args
*uap
, __unused register_t
*retval
)
708 mach_vm_offset_t start
;
710 vm_behavior_t new_behavior
;
711 kern_return_t result
;
714 * Since this routine is only advisory, we default to conservative
717 switch (uap
->behav
) {
719 new_behavior
= VM_BEHAVIOR_RANDOM
;
721 case MADV_SEQUENTIAL
:
722 new_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
725 new_behavior
= VM_BEHAVIOR_DEFAULT
;
728 new_behavior
= VM_BEHAVIOR_WILLNEED
;
731 new_behavior
= VM_BEHAVIOR_DONTNEED
;
737 start
= (mach_vm_offset_t
) uap
->addr
;
738 size
= (mach_vm_size_t
) uap
->len
;
740 user_map
= current_map();
742 result
= mach_vm_behavior_set(user_map
, start
, size
, new_behavior
);
746 case KERN_INVALID_ADDRESS
:
754 mincore(__unused
struct proc
*p
, struct mincore_args
*uap
, __unused register_t
*retval
)
756 mach_vm_offset_t addr
, first_addr
, end
;
760 int vecindex
, lastvecindex
;
771 * Make sure that the addresses presented are valid for user
774 first_addr
= addr
= mach_vm_trunc_page(uap
->addr
);
775 end
= addr
+ mach_vm_round_page(uap
->len
);
781 * Address of byte vector
788 * Do this on a map entry basis so that if the pages are not
789 * in the current processes address space, we can easily look
790 * up the pages elsewhere.
793 for( ; addr
< end
; addr
+= PAGE_SIZE
) {
795 ret
= vm_map_page_query(map
, addr
, &pqueryinfo
, &numref
);
796 if (ret
!= KERN_SUCCESS
)
799 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_PRESENT
)
800 mincoreinfo
|= MINCORE_INCORE
;
801 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_REF
)
802 mincoreinfo
|= MINCORE_REFERENCED
;
803 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_DIRTY
)
804 mincoreinfo
|= MINCORE_MODIFIED
;
808 * calculate index into user supplied byte vector
810 vecindex
= (addr
- first_addr
)>> PAGE_SHIFT
;
813 * If we have skipped map entries, we need to make sure that
814 * the byte vector is zeroed for those skipped entries.
816 while((lastvecindex
+ 1) < vecindex
) {
818 error
= copyout(&c
, vec
+ lastvecindex
, 1);
826 * Pass the page information to the user
828 c
= (char)mincoreinfo
;
829 error
= copyout(&c
, vec
+ vecindex
, 1);
833 lastvecindex
= vecindex
;
838 * Zero the last entries in the byte vector.
840 vecindex
= (end
- first_addr
) >> PAGE_SHIFT
;
841 while((lastvecindex
+ 1) < vecindex
) {
843 error
= copyout(&c
, vec
+ lastvecindex
, 1);
854 mlock(__unused
struct proc
*p
, struct mlock_args
*uap
, __unused register_t
*retvalval
)
857 vm_map_offset_t addr
;
858 vm_map_size_t size
, pageoff
;
859 kern_return_t result
;
861 AUDIT_ARG(addr
, uap
->addr
);
862 AUDIT_ARG(len
, uap
->len
);
864 addr
= (vm_map_offset_t
) uap
->addr
;
865 size
= (vm_map_size_t
)uap
->len
;
867 /* disable wrap around */
868 if (addr
+ size
< addr
)
874 pageoff
= (addr
& PAGE_MASK
);
876 size
= vm_map_round_page(size
+pageoff
);
879 /* Hmm.. What am I going to do with this? */
880 if (atop(size
) + cnt
.v_wire_count
> vm_page_max_wired
)
882 #ifdef pmap_wired_count
883 if (size
+ ptoa(pmap_wired_count(vm_map_pmap(&p
->p_vmspace
->vm_map
))) >
884 p
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
)
887 error
= suser(kauth_cred_get(), &p
->p_acflag
);
893 user_map
= current_map();
895 /* have to call vm_map_wire directly to pass "I don't know" protections */
896 result
= vm_map_wire(user_map
, addr
, addr
+size
, VM_PROT_NONE
, TRUE
);
897 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
901 munlock(__unused
struct proc
*p
, struct munlock_args
*uap
, __unused register_t
*retval
)
903 mach_vm_offset_t addr
;
906 kern_return_t result
;
908 AUDIT_ARG(addr
, uap
->addr
);
909 AUDIT_ARG(addr
, uap
->len
);
911 addr
= (mach_vm_offset_t
) uap
->addr
;
912 size
= (mach_vm_size_t
)uap
->len
;
916 /* Hmm.. What am I going to do with this? */
917 #ifndef pmap_wired_count
918 error
= suser(kauth_cred_get(), &p
->p_acflag
);
924 user_map
= current_map();
926 /* JMM - need to remove all wirings by spec - this just removes one */
927 result
= mach_vm_wire(host_priv_self(), user_map
, addr
, size
, VM_PROT_NONE
);
928 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
933 mlockall(__unused
struct proc
*p
, __unused
struct mlockall_args
*uap
, __unused register_t
*retval
)
939 munlockall(__unused
struct proc
*p
, __unused
struct munlockall_args
*uap
, __unused register_t
*retval
)
947 obreak(__unused
struct proc
*p
, __unused
struct obreak_args
*uap
, __unused register_t
*retval
)
949 /* Not implemented, obsolete */
956 ovadvise(__unused
struct proc
*p
, __unused
struct ovadvise_args
*uap
, __unused register_t
*retval
)
966 /* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
968 map_fd(struct map_fd_args
*args
)
971 vm_offset_t offset
= args
->offset
;
972 vm_offset_t
*va
= args
->va
;
973 boolean_t findspace
= args
->findspace
;
974 vm_size_t size
= args
->size
;
977 AUDIT_MACH_SYSCALL_ENTER(AUE_MAPFD
);
978 AUDIT_ARG(addr
, CAST_DOWN(user_addr_t
, va
));
981 ret
= map_fd_funneled( fd
, (vm_object_offset_t
)offset
, va
, findspace
, size
);
983 AUDIT_MACH_SYSCALL_EXIT(ret
);
990 vm_object_offset_t offset
,
995 kern_return_t result
;
999 vm_offset_t map_addr
=0;
1003 struct proc
*p
=(struct proc
*)current_proc();
1004 struct vnode_attr vattr
;
1005 struct vfs_context context
;
1008 * Find the inode; verify that it's a regular file.
1011 err
= fp_lookup(p
, fd
, &fp
, 0);
1015 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
){
1016 err
= KERN_INVALID_ARGUMENT
;
1020 if (!(fp
->f_fglob
->fg_flag
& FREAD
)) {
1021 err
= KERN_PROTECTION_FAILURE
;
1025 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1026 err
= vnode_getwithref(vp
);
1030 if (vp
->v_type
!= VREG
) {
1031 (void)vnode_put(vp
);
1032 err
= KERN_INVALID_ARGUMENT
;
1036 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
1038 /* conformance change - mmap needs to update access time for mapped
1042 nanotime(&vattr
.va_access_time
);
1043 VATTR_SET_ACTIVE(&vattr
, va_access_time
);
1044 context
.vc_proc
= p
;
1045 context
.vc_ucred
= kauth_cred_get();
1046 vnode_setattr(vp
, &vattr
, &context
);
1048 if (offset
& PAGE_MASK_64
) {
1049 printf("map_fd: file offset not page aligned(%d : %s)\n",p
->p_pid
, p
->p_comm
);
1050 (void)vnode_put(vp
);
1051 err
= KERN_INVALID_ARGUMENT
;
1054 map_size
= round_page(size
);
1057 * Allow user to map in a zero length file.
1060 (void)vnode_put(vp
);
1067 UBCINFOCHECK("map_fd_funneled", vp
);
1068 pager
= (void *) ubc_getpager(vp
);
1069 if (pager
== NULL
) {
1070 (void)vnode_put(vp
);
1076 my_map
= current_map();
1080 &map_addr
, map_size
, (vm_offset_t
)0,
1081 VM_FLAGS_ANYWHERE
, pager
, offset
, TRUE
,
1082 VM_PROT_DEFAULT
, VM_PROT_ALL
,
1083 VM_INHERIT_DEFAULT
);
1084 if (result
!= KERN_SUCCESS
) {
1085 (void)vnode_put(vp
);
1092 vm_offset_t dst_addr
;
1095 if (copyin(CAST_USER_ADDR_T(va
), &dst_addr
, sizeof (dst_addr
)) ||
1096 trunc_page_32(dst_addr
) != dst_addr
) {
1097 (void) vm_map_remove(
1099 map_addr
, map_addr
+ map_size
,
1101 (void)vnode_put(vp
);
1102 err
= KERN_INVALID_ADDRESS
;
1106 result
= vm_map_copyin(my_map
, (vm_map_address_t
)map_addr
,
1107 (vm_map_size_t
)map_size
, TRUE
, &tmp
);
1108 if (result
!= KERN_SUCCESS
) {
1110 (void) vm_map_remove(my_map
, vm_map_trunc_page(map_addr
),
1111 vm_map_round_page(map_addr
+ map_size
),
1113 (void)vnode_put(vp
);
1118 result
= vm_map_copy_overwrite(my_map
,
1119 (vm_map_address_t
)dst_addr
, tmp
, FALSE
);
1120 if (result
!= KERN_SUCCESS
) {
1121 vm_map_copy_discard(tmp
);
1122 (void)vnode_put(vp
);
1127 if (copyout(&map_addr
, CAST_USER_ADDR_T(va
), sizeof (map_addr
))) {
1128 (void) vm_map_remove(my_map
, vm_map_trunc_page(map_addr
),
1129 vm_map_round_page(map_addr
+ map_size
),
1131 (void)vnode_put(vp
);
1132 err
= KERN_INVALID_ADDRESS
;
1137 ubc_setthreadcred(vp
, current_proc(), current_thread());
1138 (void)ubc_map(vp
, (PROT_READ
| PROT_WRITE
| PROT_EXEC
));
1139 (void)vnode_put(vp
);
1142 fp_drop(p
, fd
, fp
, 0);