2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1988 University of Utah.
30 * Copyright (c) 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * the Systems Programming Group of the University of Utah Computer
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
67 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
77 * Mapped file (mmap) interface to VM
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/filedesc.h>
83 #include <sys/proc_internal.h>
84 #include <sys/kauth.h>
85 #include <sys/resourcevar.h>
86 #include <sys/vnode_internal.h>
89 #include <sys/file_internal.h>
90 #include <sys/vadvise.h>
91 #include <sys/trace.h>
96 #include <sys/ubc_internal.h>
97 #include <sys/sysproto.h>
99 #include <sys/syscall.h>
100 #include <sys/kdebug.h>
102 #include <security/audit/audit.h>
103 #include <bsm/audit_kevents.h>
105 #include <mach/mach_types.h>
106 #include <mach/mach_traps.h>
107 #include <mach/vm_sync.h>
108 #include <mach/vm_behavior.h>
109 #include <mach/vm_inherit.h>
110 #include <mach/vm_statistics.h>
111 #include <mach/mach_vm.h>
112 #include <mach/vm_map.h>
113 #include <mach/host_priv.h>
115 #include <kern/cpu_number.h>
116 #include <kern/host.h>
118 #include <vm/vm_map.h>
119 #include <vm/vm_kern.h>
120 #include <vm/vm_pager.h>
121 #include <vm/vm_protos.h>
123 /* XXX the following function should probably be static */
124 kern_return_t
map_fd_funneled(int, vm_object_offset_t
, vm_offset_t
*,
125 boolean_t
, vm_size_t
);
128 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
129 * XXX usage is PROT_* from an interface perspective. Thus the values of
130 * XXX VM_PROT_* and PROT_* need to correspond.
133 mmap(proc_t p
, struct mmap_args
*uap
, user_addr_t
*retval
)
136 * Map in special device (must be SHARED) or file
139 register struct vnode
*vp
;
144 kern_return_t result
;
145 mach_vm_offset_t user_addr
;
146 mach_vm_size_t user_size
;
147 vm_object_offset_t pageoff
;
148 vm_object_offset_t file_pos
;
153 memory_object_t pager
= MEMORY_OBJECT_NULL
;
154 memory_object_control_t control
;
160 user_addr
= (mach_vm_offset_t
)uap
->addr
;
161 user_size
= (mach_vm_size_t
) uap
->len
;
163 AUDIT_ARG(addr
, user_addr
);
164 AUDIT_ARG(len
, user_size
);
165 AUDIT_ARG(fd
, uap
->fd
);
167 prot
= (uap
->prot
& VM_PROT_ALL
);
170 * Since the hardware currently does not support writing without
171 * read-before-write, or execution-without-read, if the request is
172 * for write or execute access, we must imply read access as well;
173 * otherwise programs expecting this to work will fail to operate.
175 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
176 prot
|= VM_PROT_READ
;
177 #endif /* radar 3777787 */
183 * The vm code does not have prototypes & compiler doesn't do the'
184 * the right thing when you cast 64bit value and pass it in function
185 * call. So here it is.
187 file_pos
= (vm_object_offset_t
)uap
->pos
;
190 /* make sure mapping fits into numeric range etc */
191 if (file_pos
+ user_size
> (vm_object_offset_t
)-PAGE_SIZE_64
)
195 * Align the file position to a page boundary,
196 * and save its page offset component.
198 pageoff
= (file_pos
& PAGE_MASK
);
199 file_pos
-= (vm_object_offset_t
)pageoff
;
202 /* Adjust size for rounding (on both ends). */
203 user_size
+= pageoff
; /* low end... */
204 user_size
= mach_vm_round_page(user_size
); /* hi end */
208 * Check for illegal addresses. Watch out for address wrap... Note
209 * that VM_*_ADDRESS are not constants due to casts (argh).
211 if (flags
& MAP_FIXED
) {
213 * The specified address must have the same remainder
214 * as the file offset taken modulo PAGE_SIZE, so it
215 * should be aligned after adjustment by pageoff.
217 user_addr
-= pageoff
;
218 if (user_addr
& PAGE_MASK
)
222 /* DO not have apis to get this info, need to wait till then*/
224 * XXX for non-fixed mappings where no hint is provided or
225 * the hint would fall in the potential heap space,
226 * place it after the end of the largest possible heap.
228 * There should really be a pmap call to determine a reasonable
231 else if (addr
< mach_vm_round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
))
232 addr
= mach_vm_round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
);
238 if (flags
& MAP_ANON
) {
240 * Mapping blank space is trivial. Use positive fds as the alias
241 * value for memory tracking.
245 * Use "fd" to pass (some) Mach VM allocation flags,
246 * (see the VM_FLAGS_* definitions).
248 alloc_flags
= fd
& (VM_FLAGS_ALIAS_MASK
|
250 if (alloc_flags
!= fd
) {
251 /* reject if there are any extra flags */
257 maxprot
= VM_PROT_ALL
;
261 struct vnode_attr va
;
262 vfs_context_t ctx
= vfs_context_current();
265 * Mapping file, get fp for validation. Obtain vnode and make
266 * sure it is of appropriate type.
268 err
= fp_lookup(p
, fd
, &fp
, 0);
272 if(fp
->f_fglob
->fg_type
== DTYPE_PSXSHM
) {
273 uap
->addr
= (user_addr_t
)user_addr
;
274 uap
->len
= (user_size_t
)user_size
;
278 error
= pshm_mmap(p
, uap
, retval
, fp
, (off_t
)pageoff
);
282 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
286 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
287 error
= vnode_getwithref(vp
);
291 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
) {
297 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
300 * POSIX: mmap needs to update access time for mapped files
302 if ((vnode_vfsvisflags(vp
) & MNT_NOATIME
) == 0) {
304 nanotime(&va
.va_access_time
);
305 VATTR_SET_ACTIVE(&va
, va_access_time
);
306 vnode_setattr(vp
, &va
, ctx
);
310 * XXX hack to handle use of /dev/zero to map anon memory (ala
313 if (vp
->v_type
== VCHR
|| vp
->v_type
== VSTR
) {
319 * Ensure that file and memory protections are
320 * compatible. Note that we only worry about
321 * writability if mapping is shared; in this case,
322 * current and max prot are dictated by the open file.
323 * XXX use the vnode instead? Problem is: what
324 * credentials do we use for determination? What if
325 * proc does a setuid?
327 maxprot
= VM_PROT_EXECUTE
; /* ??? */
328 if (fp
->f_fglob
->fg_flag
& FREAD
)
329 maxprot
|= VM_PROT_READ
;
330 else if (prot
& PROT_READ
) {
336 * If we are sharing potential changes (either via
337 * MAP_SHARED or via the implicit sharing of character
338 * device mappings), and we are trying to get write
339 * permission although we opened it without asking
343 if ((flags
& MAP_SHARED
) != 0) {
344 if ((fp
->f_fglob
->fg_flag
& FWRITE
) != 0 &&
346 * Do not allow writable mappings of
347 * swap files (see vm_swapfile_pager.c).
351 * check for write access
353 * Note that we already made this check when granting FWRITE
354 * against the file, so it seems redundant here.
356 error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_CHECKIMMUTABLE
, ctx
);
358 /* if not granted for any reason, but we wanted it, bad */
359 if ((prot
& PROT_WRITE
) && (error
!= 0)) {
364 /* if writable, remember */
366 maxprot
|= VM_PROT_WRITE
;
368 } else if ((prot
& PROT_WRITE
) != 0) {
374 maxprot
|= VM_PROT_WRITE
;
378 error
= mac_file_check_mmap(vfs_context_ucred(ctx
),
379 fp
->f_fglob
, prot
, flags
, &maxprot
);
388 if (user_size
== 0) {
396 * We bend a little - round the start and end addresses
397 * to the nearest page boundary.
399 user_size
= mach_vm_round_page(user_size
);
401 if (file_pos
& PAGE_MASK_64
) {
408 user_map
= current_map();
410 if ((flags
& MAP_FIXED
) == 0) {
411 alloc_flags
|= VM_FLAGS_ANYWHERE
;
412 user_addr
= mach_vm_round_page(user_addr
);
414 if (user_addr
!= mach_vm_trunc_page(user_addr
)) {
421 * mmap(MAP_FIXED) will replace any existing mappings in the
422 * specified range, if the new mapping is successful.
423 * If we just deallocate the specified address range here,
424 * another thread might jump in and allocate memory in that
425 * range before we get a chance to establish the new mapping,
426 * and we won't have a chance to restore the old mappings.
427 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
428 * has to deallocate the existing mappings and establish the
429 * new ones atomically.
431 alloc_flags
|= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
434 if (flags
& MAP_NOCACHE
)
435 alloc_flags
|= VM_FLAGS_NO_CACHE
;
438 * Lookup/allocate object.
440 if (handle
== NULL
) {
444 #if defined(VM_PROT_READ_IS_EXEC)
445 if (prot
& VM_PROT_READ
)
446 prot
|= VM_PROT_EXECUTE
;
447 if (maxprot
& VM_PROT_READ
)
448 maxprot
|= VM_PROT_EXECUTE
;
453 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
454 prot
|= VM_PROT_READ
;
455 if (maxprot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
456 maxprot
|= VM_PROT_READ
;
457 #endif /* radar 3777787 */
459 result
= vm_map_enter_mem_object(user_map
,
460 &user_addr
, user_size
,
462 IPC_PORT_NULL
, 0, FALSE
,
464 (flags
& MAP_SHARED
) ?
468 if (vnode_isswap(vp
)) {
470 * Map swap files with a special pager
471 * that returns obfuscated contents.
474 pager
= swapfile_pager_setup(vp
);
475 if (pager
!= MEMORY_OBJECT_NULL
) {
476 control
= swapfile_pager_control(pager
);
479 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
482 if (control
== NULL
) {
490 * FIXME: if we're writing the file we need a way to
491 * ensure that someone doesn't replace our R/W creds
492 * with ones that only work for read.
495 ubc_setthreadcred(vp
, p
, current_thread());
497 if ((flags
& (MAP_ANON
|MAP_SHARED
)) == 0) {
503 #if defined(VM_PROT_READ_IS_EXEC)
504 if (prot
& VM_PROT_READ
)
505 prot
|= VM_PROT_EXECUTE
;
506 if (maxprot
& VM_PROT_READ
)
507 maxprot
|= VM_PROT_EXECUTE
;
512 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
513 prot
|= VM_PROT_READ
;
514 if (maxprot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
515 maxprot
|= VM_PROT_READ
;
516 #endif /* radar 3777787 */
518 result
= vm_map_enter_mem_object_control(user_map
,
519 &user_addr
, user_size
,
522 docow
, prot
, maxprot
,
523 (flags
& MAP_SHARED
) ?
534 *retval
= user_addr
+ pageoff
;
537 case KERN_INVALID_ADDRESS
:
541 case KERN_PROTECTION_FAILURE
:
549 if (pager
!= MEMORY_OBJECT_NULL
) {
551 * Release the reference on the pager.
552 * If the mapping was successful, it now holds
553 * an extra reference.
555 memory_object_deallocate(pager
);
558 fp_drop(p
, fd
, fp
, 0);
560 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_mmap
) | DBG_FUNC_NONE
), fd
, (uint32_t)(*retval
), (uint32_t)user_size
, error
, 0);
561 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2
, SYS_mmap
) | DBG_FUNC_NONE
), (uint32_t)(*retval
>> 32), (uint32_t)(user_size
>> 32),
562 (uint32_t)(file_pos
>> 32), (uint32_t)file_pos
, 0);
568 msync(__unused proc_t p
, struct msync_args
*uap
, int32_t *retval
)
570 __pthread_testcancel(1);
571 return(msync_nocancel(p
, (struct msync_nocancel_args
*)uap
, retval
));
575 msync_nocancel(__unused proc_t p
, struct msync_nocancel_args
*uap
, __unused
int32_t *retval
)
577 mach_vm_offset_t addr
;
582 vm_sync_t sync_flags
=0;
584 addr
= (mach_vm_offset_t
) uap
->addr
;
585 size
= (mach_vm_size_t
)uap
->len
;
587 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_msync
) | DBG_FUNC_NONE
), (uint32_t)(addr
>> 32), (uint32_t)(size
>> 32), 0, 0, 0);
589 if (addr
& PAGE_MASK_64
) {
590 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
595 * We cannot support this properly without maintaining
596 * list all mmaps done. Cannot use vm_map_entry as they could be
597 * split or coalesced by indepenedant actions. So instead of
598 * inaccurate results, lets just return error as invalid size
601 return (EINVAL
); /* XXX breaks posix apps */
605 /* disallow contradictory flags */
606 if ((flags
& (MS_SYNC
|MS_ASYNC
)) == (MS_SYNC
|MS_ASYNC
))
609 if (flags
& MS_KILLPAGES
)
610 sync_flags
|= VM_SYNC_KILLPAGES
;
611 if (flags
& MS_DEACTIVATE
)
612 sync_flags
|= VM_SYNC_DEACTIVATE
;
613 if (flags
& MS_INVALIDATE
)
614 sync_flags
|= VM_SYNC_INVALIDATE
;
616 if ( !(flags
& (MS_KILLPAGES
| MS_DEACTIVATE
))) {
617 if (flags
& MS_ASYNC
)
618 sync_flags
|= VM_SYNC_ASYNCHRONOUS
;
620 sync_flags
|= VM_SYNC_SYNCHRONOUS
;
623 sync_flags
|= VM_SYNC_CONTIGUOUS
; /* complain if holes */
625 user_map
= current_map();
626 rv
= mach_vm_msync(user_map
, addr
, size
, sync_flags
);
631 case KERN_INVALID_ADDRESS
: /* hole in region being sync'ed */
643 munmap(__unused proc_t p
, struct munmap_args
*uap
, __unused
int32_t *retval
)
645 mach_vm_offset_t user_addr
;
646 mach_vm_size_t user_size
;
647 kern_return_t result
;
649 user_addr
= (mach_vm_offset_t
) uap
->addr
;
650 user_size
= (mach_vm_size_t
) uap
->len
;
652 AUDIT_ARG(addr
, user_addr
);
653 AUDIT_ARG(len
, user_size
);
655 if (user_addr
& PAGE_MASK_64
) {
656 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
660 if (user_addr
+ user_size
< user_addr
)
663 if (user_size
== 0) {
664 /* UNIX SPEC: size is 0, return EINVAL */
668 result
= mach_vm_deallocate(current_map(), user_addr
, user_size
);
669 if (result
!= KERN_SUCCESS
) {
676 mprotect(__unused proc_t p
, struct mprotect_args
*uap
, __unused
int32_t *retval
)
678 register vm_prot_t prot
;
679 mach_vm_offset_t user_addr
;
680 mach_vm_size_t user_size
;
681 kern_return_t result
;
687 AUDIT_ARG(addr
, uap
->addr
);
688 AUDIT_ARG(len
, uap
->len
);
689 AUDIT_ARG(value32
, uap
->prot
);
691 user_addr
= (mach_vm_offset_t
) uap
->addr
;
692 user_size
= (mach_vm_size_t
) uap
->len
;
693 prot
= (vm_prot_t
)(uap
->prot
& VM_PROT_ALL
);
695 if (user_addr
& PAGE_MASK_64
) {
696 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
702 #if defined(VM_PROT_READ_IS_EXEC)
703 if (prot
& VM_PROT_READ
)
704 prot
|= VM_PROT_EXECUTE
;
709 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
710 prot
|= VM_PROT_READ
;
713 user_map
= current_map();
717 * The MAC check for mprotect is of limited use for 2 reasons:
718 * Without mmap revocation, the caller could have asked for the max
719 * protections initially instead of a reduced set, so a mprotect
720 * check would offer no new security.
721 * It is not possible to extract the vnode from the pager object(s)
722 * of the target memory range.
723 * However, the MAC check may be used to prevent a process from,
724 * e.g., making the stack executable.
726 error
= mac_proc_check_mprotect(p
, user_addr
,
731 result
= mach_vm_protect(user_map
, user_addr
, user_size
,
736 case KERN_PROTECTION_FAILURE
:
738 case KERN_INVALID_ADDRESS
:
739 /* UNIX SPEC: for an invalid address range, return ENOMEM */
747 minherit(__unused proc_t p
, struct minherit_args
*uap
, __unused
int32_t *retval
)
749 mach_vm_offset_t addr
;
751 register vm_inherit_t inherit
;
753 kern_return_t result
;
755 AUDIT_ARG(addr
, uap
->addr
);
756 AUDIT_ARG(len
, uap
->len
);
757 AUDIT_ARG(value32
, uap
->inherit
);
759 addr
= (mach_vm_offset_t
)uap
->addr
;
760 size
= (mach_vm_size_t
)uap
->len
;
761 inherit
= uap
->inherit
;
763 user_map
= current_map();
764 result
= mach_vm_inherit(user_map
, addr
, size
,
769 case KERN_PROTECTION_FAILURE
:
776 madvise(__unused proc_t p
, struct madvise_args
*uap
, __unused
int32_t *retval
)
779 mach_vm_offset_t start
;
781 vm_behavior_t new_behavior
;
782 kern_return_t result
;
785 * Since this routine is only advisory, we default to conservative
788 switch (uap
->behav
) {
790 new_behavior
= VM_BEHAVIOR_RANDOM
;
792 case MADV_SEQUENTIAL
:
793 new_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
796 new_behavior
= VM_BEHAVIOR_DEFAULT
;
799 new_behavior
= VM_BEHAVIOR_WILLNEED
;
802 new_behavior
= VM_BEHAVIOR_DONTNEED
;
805 new_behavior
= VM_BEHAVIOR_FREE
;
807 case MADV_ZERO_WIRED_PAGES
:
808 new_behavior
= VM_BEHAVIOR_ZERO_WIRED_PAGES
;
810 case MADV_FREE_REUSABLE
:
811 new_behavior
= VM_BEHAVIOR_REUSABLE
;
813 case MADV_FREE_REUSE
:
814 new_behavior
= VM_BEHAVIOR_REUSE
;
817 new_behavior
= VM_BEHAVIOR_CAN_REUSE
;
823 start
= (mach_vm_offset_t
) uap
->addr
;
824 size
= (mach_vm_size_t
) uap
->len
;
826 user_map
= current_map();
828 result
= mach_vm_behavior_set(user_map
, start
, size
, new_behavior
);
832 case KERN_INVALID_ADDRESS
:
840 mincore(__unused proc_t p
, struct mincore_args
*uap
, __unused
int32_t *retval
)
842 mach_vm_offset_t addr
, first_addr
, end
;
846 int vecindex
, lastvecindex
;
857 * Make sure that the addresses presented are valid for user
860 first_addr
= addr
= mach_vm_trunc_page(uap
->addr
);
861 end
= addr
+ mach_vm_round_page(uap
->len
);
867 * Address of byte vector
874 * Do this on a map entry basis so that if the pages are not
875 * in the current processes address space, we can easily look
876 * up the pages elsewhere.
879 for( ; addr
< end
; addr
+= PAGE_SIZE
) {
881 ret
= mach_vm_page_query(map
, addr
, &pqueryinfo
, &numref
);
882 if (ret
!= KERN_SUCCESS
)
885 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_PRESENT
)
886 mincoreinfo
|= MINCORE_INCORE
;
887 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_REF
)
888 mincoreinfo
|= MINCORE_REFERENCED
;
889 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_DIRTY
)
890 mincoreinfo
|= MINCORE_MODIFIED
;
894 * calculate index into user supplied byte vector
896 vecindex
= (addr
- first_addr
)>> PAGE_SHIFT
;
899 * If we have skipped map entries, we need to make sure that
900 * the byte vector is zeroed for those skipped entries.
902 while((lastvecindex
+ 1) < vecindex
) {
904 error
= copyout(&c
, vec
+ lastvecindex
, 1);
912 * Pass the page information to the user
914 c
= (char)mincoreinfo
;
915 error
= copyout(&c
, vec
+ vecindex
, 1);
919 lastvecindex
= vecindex
;
924 * Zero the last entries in the byte vector.
926 vecindex
= (end
- first_addr
) >> PAGE_SHIFT
;
927 while((lastvecindex
+ 1) < vecindex
) {
929 error
= copyout(&c
, vec
+ lastvecindex
, 1);
940 mlock(__unused proc_t p
, struct mlock_args
*uap
, __unused
int32_t *retvalval
)
943 vm_map_offset_t addr
;
944 vm_map_size_t size
, pageoff
;
945 kern_return_t result
;
947 AUDIT_ARG(addr
, uap
->addr
);
948 AUDIT_ARG(len
, uap
->len
);
950 addr
= (vm_map_offset_t
) uap
->addr
;
951 size
= (vm_map_size_t
)uap
->len
;
953 /* disable wrap around */
954 if (addr
+ size
< addr
)
960 pageoff
= (addr
& PAGE_MASK
);
962 size
= vm_map_round_page(size
+pageoff
);
963 user_map
= current_map();
965 /* have to call vm_map_wire directly to pass "I don't know" protections */
966 result
= vm_map_wire(user_map
, addr
, addr
+size
, VM_PROT_NONE
, TRUE
);
968 if (result
== KERN_RESOURCE_SHORTAGE
)
970 else if (result
!= KERN_SUCCESS
)
973 return 0; /* KERN_SUCCESS */
977 munlock(__unused proc_t p
, struct munlock_args
*uap
, __unused
int32_t *retval
)
979 mach_vm_offset_t addr
;
982 kern_return_t result
;
984 AUDIT_ARG(addr
, uap
->addr
);
985 AUDIT_ARG(addr
, uap
->len
);
987 addr
= (mach_vm_offset_t
) uap
->addr
;
988 size
= (mach_vm_size_t
)uap
->len
;
989 user_map
= current_map();
991 /* JMM - need to remove all wirings by spec - this just removes one */
992 result
= mach_vm_wire(host_priv_self(), user_map
, addr
, size
, VM_PROT_NONE
);
993 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
998 mlockall(__unused proc_t p
, __unused
struct mlockall_args
*uap
, __unused
int32_t *retval
)
1004 munlockall(__unused proc_t p
, __unused
struct munlockall_args
*uap
, __unused
int32_t *retval
)
1009 /* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
1011 map_fd(struct map_fd_args
*args
)
1014 vm_offset_t offset
= args
->offset
;
1015 vm_offset_t
*va
= args
->va
;
1016 boolean_t findspace
= args
->findspace
;
1017 vm_size_t size
= args
->size
;
1020 AUDIT_MACH_SYSCALL_ENTER(AUE_MAPFD
);
1021 AUDIT_ARG(addr
, CAST_DOWN(user_addr_t
, args
->va
));
1024 ret
= map_fd_funneled( fd
, (vm_object_offset_t
)offset
, va
, findspace
, size
);
1026 AUDIT_MACH_SYSCALL_EXIT(ret
);
1033 vm_object_offset_t offset
,
1035 boolean_t findspace
,
1038 kern_return_t result
;
1039 struct fileproc
*fp
;
1042 vm_offset_t map_addr
=0;
1046 proc_t p
= current_proc();
1047 struct vnode_attr vattr
;
1050 * Find the inode; verify that it's a regular file.
1053 err
= fp_lookup(p
, fd
, &fp
, 0);
1057 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
){
1058 err
= KERN_INVALID_ARGUMENT
;
1062 if (!(fp
->f_fglob
->fg_flag
& FREAD
)) {
1063 err
= KERN_PROTECTION_FAILURE
;
1067 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1068 err
= vnode_getwithref(vp
);
1072 if (vp
->v_type
!= VREG
) {
1073 (void)vnode_put(vp
);
1074 err
= KERN_INVALID_ARGUMENT
;
1078 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
1081 * POSIX: mmap needs to update access time for mapped files
1083 if ((vnode_vfsvisflags(vp
) & MNT_NOATIME
) == 0) {
1085 nanotime(&vattr
.va_access_time
);
1086 VATTR_SET_ACTIVE(&vattr
, va_access_time
);
1087 vnode_setattr(vp
, &vattr
, vfs_context_current());
1090 if (offset
& PAGE_MASK_64
) {
1091 printf("map_fd: file offset not page aligned(%d : %s)\n",p
->p_pid
, p
->p_comm
);
1092 (void)vnode_put(vp
);
1093 err
= KERN_INVALID_ARGUMENT
;
1096 map_size
= round_page(size
);
1099 * Allow user to map in a zero length file.
1102 (void)vnode_put(vp
);
1109 pager
= (void *)ubc_getpager(vp
);
1110 if (pager
== NULL
) {
1111 (void)vnode_put(vp
);
1117 my_map
= current_map();
1121 &map_addr
, map_size
, (vm_offset_t
)0,
1122 VM_FLAGS_ANYWHERE
, pager
, offset
, TRUE
,
1123 VM_PROT_DEFAULT
, VM_PROT_ALL
,
1124 VM_INHERIT_DEFAULT
);
1125 if (result
!= KERN_SUCCESS
) {
1126 (void)vnode_put(vp
);
1133 //K64todo fix for 64bit user?
1137 if (copyin(CAST_USER_ADDR_T(va
), &dst_addr
, sizeof (dst_addr
)) ||
1138 trunc_page(dst_addr
) != dst_addr
) {
1139 (void) vm_map_remove(
1141 map_addr
, map_addr
+ map_size
,
1143 (void)vnode_put(vp
);
1144 err
= KERN_INVALID_ADDRESS
;
1148 result
= vm_map_copyin(my_map
, (vm_map_address_t
)map_addr
,
1149 (vm_map_size_t
)map_size
, TRUE
, &tmp
);
1150 if (result
!= KERN_SUCCESS
) {
1152 (void) vm_map_remove(my_map
, vm_map_trunc_page(map_addr
),
1153 vm_map_round_page(map_addr
+ map_size
),
1155 (void)vnode_put(vp
);
1160 result
= vm_map_copy_overwrite(my_map
,
1161 (vm_map_address_t
)dst_addr
, tmp
, FALSE
);
1162 if (result
!= KERN_SUCCESS
) {
1163 vm_map_copy_discard(tmp
);
1164 (void)vnode_put(vp
);
1169 // K64todo bug compatible now, should fix for 64bit user
1170 uint32_t user_map_addr
= CAST_DOWN_EXPLICIT(uint32_t, map_addr
);
1171 if (copyout(&user_map_addr
, CAST_USER_ADDR_T(va
), sizeof (user_map_addr
))) {
1172 (void) vm_map_remove(my_map
, vm_map_trunc_page(map_addr
),
1173 vm_map_round_page(map_addr
+ map_size
),
1175 (void)vnode_put(vp
);
1176 err
= KERN_INVALID_ADDRESS
;
1181 ubc_setthreadcred(vp
, current_proc(), current_thread());
1182 (void)vnode_put(vp
);
1185 fp_drop(p
, fd
, fp
, 0);