2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1988 University of Utah.
30 * Copyright (c) 1991, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * the Systems Programming Group of the University of Utah Computer
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
67 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
77 * Mapped file (mmap) interface to VM
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/filedesc.h>
83 #include <sys/proc_internal.h>
84 #include <sys/kauth.h>
85 #include <sys/resourcevar.h>
86 #include <sys/vnode_internal.h>
89 #include <sys/file_internal.h>
90 #include <sys/vadvise.h>
91 #include <sys/trace.h>
96 #include <sys/ubc_internal.h>
97 #include <sys/sysproto.h>
99 #include <sys/cprotect.h>
102 #include <sys/syscall.h>
103 #include <sys/kdebug.h>
104 #include <sys/bsdtask_info.h>
106 #include <security/audit/audit.h>
107 #include <bsm/audit_kevents.h>
109 #include <mach/mach_types.h>
110 #include <mach/mach_traps.h>
111 #include <mach/vm_sync.h>
112 #include <mach/vm_behavior.h>
113 #include <mach/vm_inherit.h>
114 #include <mach/vm_statistics.h>
115 #include <mach/mach_vm.h>
116 #include <mach/vm_map.h>
117 #include <mach/host_priv.h>
119 #include <machine/machine_routines.h>
121 #include <kern/cpu_number.h>
122 #include <kern/host.h>
123 #include <kern/task.h>
124 #include <kern/page_decrypt.h>
126 #include <IOKit/IOReturn.h>
128 #include <vm/vm_map.h>
129 #include <vm/vm_kern.h>
130 #include <vm/vm_pager.h>
131 #include <vm/vm_protos.h>
134 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
135 * XXX usage is PROT_* from an interface perspective. Thus the values of
136 * XXX VM_PROT_* and PROT_* need to correspond.
139 mmap(proc_t p
, struct mmap_args
*uap
, user_addr_t
*retval
)
142 * Map in special device (must be SHARED) or file
145 register struct vnode
*vp
;
150 kern_return_t result
;
151 vm_map_offset_t user_addr
;
152 vm_map_size_t user_size
;
153 vm_object_offset_t pageoff
;
154 vm_object_offset_t file_pos
;
159 memory_object_t pager
= MEMORY_OBJECT_NULL
;
160 memory_object_control_t control
;
167 user_map
= current_map();
168 user_addr
= (vm_map_offset_t
)uap
->addr
;
169 user_size
= (vm_map_size_t
) uap
->len
;
171 AUDIT_ARG(addr
, user_addr
);
172 AUDIT_ARG(len
, user_size
);
173 AUDIT_ARG(fd
, uap
->fd
);
175 prot
= (uap
->prot
& VM_PROT_ALL
);
178 * Since the hardware currently does not support writing without
179 * read-before-write, or execution-without-read, if the request is
180 * for write or execute access, we must imply read access as well;
181 * otherwise programs expecting this to work will fail to operate.
183 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
184 prot
|= VM_PROT_READ
;
185 #endif /* radar 3777787 */
191 * The vm code does not have prototypes & compiler doesn't do the'
192 * the right thing when you cast 64bit value and pass it in function
193 * call. So here it is.
195 file_pos
= (vm_object_offset_t
)uap
->pos
;
198 /* make sure mapping fits into numeric range etc */
199 if (file_pos
+ user_size
> (vm_object_offset_t
)-PAGE_SIZE_64
)
203 * Align the file position to a page boundary,
204 * and save its page offset component.
206 pageoff
= (file_pos
& vm_map_page_mask(user_map
));
207 file_pos
-= (vm_object_offset_t
)pageoff
;
210 /* Adjust size for rounding (on both ends). */
211 user_size
+= pageoff
; /* low end... */
212 user_size
= vm_map_round_page(user_size
,
213 vm_map_page_mask(user_map
)); /* hi end */
215 if ((flags
& MAP_JIT
) && ((flags
& MAP_FIXED
) || (flags
& MAP_SHARED
) || !(flags
& MAP_ANON
))){
219 * Check for illegal addresses. Watch out for address wrap... Note
220 * that VM_*_ADDRESS are not constants due to casts (argh).
222 if (flags
& MAP_FIXED
) {
224 * The specified address must have the same remainder
225 * as the file offset taken modulo PAGE_SIZE, so it
226 * should be aligned after adjustment by pageoff.
228 user_addr
-= pageoff
;
229 if (user_addr
& vm_map_page_mask(user_map
))
233 /* DO not have apis to get this info, need to wait till then*/
235 * XXX for non-fixed mappings where no hint is provided or
236 * the hint would fall in the potential heap space,
237 * place it after the end of the largest possible heap.
239 * There should really be a pmap call to determine a reasonable
242 else if (addr
< vm_map_round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
,
243 vm_map_page_mask(user_map
)))
244 addr
= vm_map_round_page(p
->p_vmspace
->vm_daddr
+ MAXDSIZ
,
245 vm_map_page_mask(user_map
));
251 if (flags
& MAP_ANON
) {
253 maxprot
= VM_PROT_ALL
;
258 error
= mac_proc_check_map_anon(p
, user_addr
, user_size
, prot
, flags
, &maxprot
);
265 * Mapping blank space is trivial. Use positive fds as the alias
266 * value for memory tracking.
270 * Use "fd" to pass (some) Mach VM allocation flags,
271 * (see the VM_FLAGS_* definitions).
273 alloc_flags
= fd
& (VM_FLAGS_ALIAS_MASK
| VM_FLAGS_SUPERPAGE_MASK
|
275 if (alloc_flags
!= fd
) {
276 /* reject if there are any extra flags */
285 struct vnode_attr va
;
286 vfs_context_t ctx
= vfs_context_current();
292 * Mapping file, get fp for validation. Obtain vnode and make
293 * sure it is of appropriate type.
295 err
= fp_lookup(p
, fd
, &fp
, 0);
299 switch (FILEGLOB_DTYPE(fp
->f_fglob
)) {
301 uap
->addr
= (user_addr_t
)user_addr
;
302 uap
->len
= (user_size_t
)user_size
;
306 error
= pshm_mmap(p
, uap
, retval
, fp
, (off_t
)pageoff
);
314 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
315 error
= vnode_getwithref(vp
);
319 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VCHR
) {
325 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
328 * POSIX: mmap needs to update access time for mapped files
330 if ((vnode_vfsvisflags(vp
) & MNT_NOATIME
) == 0) {
332 nanotime(&va
.va_access_time
);
333 VATTR_SET_ACTIVE(&va
, va_access_time
);
334 vnode_setattr(vp
, &va
, ctx
);
338 * XXX hack to handle use of /dev/zero to map anon memory (ala
341 if (vp
->v_type
== VCHR
|| vp
->v_type
== VSTR
) {
347 * Ensure that file and memory protections are
348 * compatible. Note that we only worry about
349 * writability if mapping is shared; in this case,
350 * current and max prot are dictated by the open file.
351 * XXX use the vnode instead? Problem is: what
352 * credentials do we use for determination? What if
353 * proc does a setuid?
355 maxprot
= VM_PROT_EXECUTE
; /* ??? */
356 if (fp
->f_fglob
->fg_flag
& FREAD
)
357 maxprot
|= VM_PROT_READ
;
358 else if (prot
& PROT_READ
) {
364 * If we are sharing potential changes (either via
365 * MAP_SHARED or via the implicit sharing of character
366 * device mappings), and we are trying to get write
367 * permission although we opened it without asking
371 if ((flags
& MAP_SHARED
) != 0) {
372 if ((fp
->f_fglob
->fg_flag
& FWRITE
) != 0 &&
374 * Do not allow writable mappings of
375 * swap files (see vm_swapfile_pager.c).
379 * check for write access
381 * Note that we already made this check when granting FWRITE
382 * against the file, so it seems redundant here.
384 error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_CHECKIMMUTABLE
, ctx
);
386 /* if not granted for any reason, but we wanted it, bad */
387 if ((prot
& PROT_WRITE
) && (error
!= 0)) {
392 /* if writable, remember */
394 maxprot
|= VM_PROT_WRITE
;
396 } else if ((prot
& PROT_WRITE
) != 0) {
402 maxprot
|= VM_PROT_WRITE
;
406 error
= mac_file_check_mmap(vfs_context_ucred(ctx
),
407 fp
->f_fglob
, prot
, flags
, &maxprot
);
416 error
= cp_handle_vnop(vp
, CP_READ_ACCESS
| CP_WRITE_ACCESS
, 0);
418 (void) vnode_put(vp
);
422 #endif /* CONFIG_PROTECT */
428 if (user_size
== 0) {
436 * We bend a little - round the start and end addresses
437 * to the nearest page boundary.
439 user_size
= vm_map_round_page(user_size
,
440 vm_map_page_mask(user_map
));
442 if (file_pos
& vm_map_page_mask(user_map
)) {
449 if ((flags
& MAP_FIXED
) == 0) {
450 alloc_flags
|= VM_FLAGS_ANYWHERE
;
451 user_addr
= vm_map_round_page(user_addr
,
452 vm_map_page_mask(user_map
));
454 if (user_addr
!= vm_map_trunc_page(user_addr
,
455 vm_map_page_mask(user_map
))) {
462 * mmap(MAP_FIXED) will replace any existing mappings in the
463 * specified range, if the new mapping is successful.
464 * If we just deallocate the specified address range here,
465 * another thread might jump in and allocate memory in that
466 * range before we get a chance to establish the new mapping,
467 * and we won't have a chance to restore the old mappings.
468 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
469 * has to deallocate the existing mappings and establish the
470 * new ones atomically.
472 alloc_flags
|= VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
;
475 if (flags
& MAP_NOCACHE
)
476 alloc_flags
|= VM_FLAGS_NO_CACHE
;
478 if (flags
& MAP_JIT
){
479 alloc_flags
|= VM_FLAGS_MAP_JIT
;
482 * Lookup/allocate object.
484 if (handle
== NULL
) {
488 #if defined(VM_PROT_READ_IS_EXEC)
489 if (prot
& VM_PROT_READ
)
490 prot
|= VM_PROT_EXECUTE
;
491 if (maxprot
& VM_PROT_READ
)
492 maxprot
|= VM_PROT_EXECUTE
;
497 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
498 prot
|= VM_PROT_READ
;
499 if (maxprot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
500 maxprot
|= VM_PROT_READ
;
501 #endif /* radar 3777787 */
503 result
= vm_map_enter_mem_object(user_map
,
504 &user_addr
, user_size
,
506 IPC_PORT_NULL
, 0, FALSE
,
508 (flags
& MAP_SHARED
) ?
512 /* If a non-binding address was specified for this anonymous
513 * mapping, retry the mapping with a zero base
514 * in the event the mapping operation failed due to
515 * lack of space between the address and the map's maximum.
517 if ((result
== KERN_NO_SPACE
) && ((flags
& MAP_FIXED
) == 0) && user_addr
&& (num_retries
++ == 0)) {
518 user_addr
= vm_map_page_size(user_map
);
522 if (vnode_isswap(vp
)) {
524 * Map swap files with a special pager
525 * that returns obfuscated contents.
528 pager
= swapfile_pager_setup(vp
);
529 if (pager
!= MEMORY_OBJECT_NULL
) {
530 control
= swapfile_pager_control(pager
);
533 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
536 if (control
== NULL
) {
544 * FIXME: if we're writing the file we need a way to
545 * ensure that someone doesn't replace our R/W creds
546 * with ones that only work for read.
549 ubc_setthreadcred(vp
, p
, current_thread());
551 if ((flags
& (MAP_ANON
|MAP_SHARED
)) == 0) {
557 #if defined(VM_PROT_READ_IS_EXEC)
558 if (prot
& VM_PROT_READ
)
559 prot
|= VM_PROT_EXECUTE
;
560 if (maxprot
& VM_PROT_READ
)
561 maxprot
|= VM_PROT_EXECUTE
;
566 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
567 prot
|= VM_PROT_READ
;
568 if (maxprot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
569 maxprot
|= VM_PROT_READ
;
570 #endif /* radar 3777787 */
572 result
= vm_map_enter_mem_object_control(user_map
,
573 &user_addr
, user_size
,
576 docow
, prot
, maxprot
,
577 (flags
& MAP_SHARED
) ?
581 /* If a non-binding address was specified for this file backed
582 * mapping, retry the mapping with a zero base
583 * in the event the mapping operation failed due to
584 * lack of space between the address and the map's maximum.
586 if ((result
== KERN_NO_SPACE
) && ((flags
& MAP_FIXED
) == 0) && user_addr
&& (num_retries
++ == 0)) {
587 user_addr
= vm_map_page_size(user_map
);
598 *retval
= user_addr
+ pageoff
;
601 case KERN_INVALID_ADDRESS
:
605 case KERN_PROTECTION_FAILURE
:
613 if (pager
!= MEMORY_OBJECT_NULL
) {
615 * Release the reference on the pager.
616 * If the mapping was successful, it now holds
617 * an extra reference.
619 memory_object_deallocate(pager
);
622 fp_drop(p
, fd
, fp
, 0);
624 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_mmap
) | DBG_FUNC_NONE
), fd
, (uint32_t)(*retval
), (uint32_t)user_size
, error
, 0);
625 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2
, SYS_mmap
) | DBG_FUNC_NONE
), (uint32_t)(*retval
>> 32), (uint32_t)(user_size
>> 32),
626 (uint32_t)(file_pos
>> 32), (uint32_t)file_pos
, 0);
631 msync(__unused proc_t p
, struct msync_args
*uap
, int32_t *retval
)
633 __pthread_testcancel(1);
634 return(msync_nocancel(p
, (struct msync_nocancel_args
*)uap
, retval
));
638 msync_nocancel(__unused proc_t p
, struct msync_nocancel_args
*uap
, __unused
int32_t *retval
)
640 mach_vm_offset_t addr
;
645 vm_sync_t sync_flags
=0;
647 user_map
= current_map();
648 addr
= (mach_vm_offset_t
) uap
->addr
;
649 size
= (mach_vm_size_t
)uap
->len
;
650 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO
, SYS_msync
) | DBG_FUNC_NONE
), (uint32_t)(addr
>> 32), (uint32_t)(size
>> 32), 0, 0, 0);
651 if (addr
& vm_map_page_mask(user_map
)) {
652 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
657 * We cannot support this properly without maintaining
658 * list all mmaps done. Cannot use vm_map_entry as they could be
659 * split or coalesced by indepenedant actions. So instead of
660 * inaccurate results, lets just return error as invalid size
663 return (EINVAL
); /* XXX breaks posix apps */
667 /* disallow contradictory flags */
668 if ((flags
& (MS_SYNC
|MS_ASYNC
)) == (MS_SYNC
|MS_ASYNC
))
671 if (flags
& MS_KILLPAGES
)
672 sync_flags
|= VM_SYNC_KILLPAGES
;
673 if (flags
& MS_DEACTIVATE
)
674 sync_flags
|= VM_SYNC_DEACTIVATE
;
675 if (flags
& MS_INVALIDATE
)
676 sync_flags
|= VM_SYNC_INVALIDATE
;
678 if ( !(flags
& (MS_KILLPAGES
| MS_DEACTIVATE
))) {
679 if (flags
& MS_ASYNC
)
680 sync_flags
|= VM_SYNC_ASYNCHRONOUS
;
682 sync_flags
|= VM_SYNC_SYNCHRONOUS
;
685 sync_flags
|= VM_SYNC_CONTIGUOUS
; /* complain if holes */
687 rv
= mach_vm_msync(user_map
, addr
, size
, sync_flags
);
692 case KERN_INVALID_ADDRESS
: /* hole in region being sync'ed */
704 munmap(__unused proc_t p
, struct munmap_args
*uap
, __unused
int32_t *retval
)
706 mach_vm_offset_t user_addr
;
707 mach_vm_size_t user_size
;
708 kern_return_t result
;
711 user_map
= current_map();
712 user_addr
= (mach_vm_offset_t
) uap
->addr
;
713 user_size
= (mach_vm_size_t
) uap
->len
;
715 AUDIT_ARG(addr
, user_addr
);
716 AUDIT_ARG(len
, user_size
);
718 if (user_addr
& vm_map_page_mask(user_map
)) {
719 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
723 if (user_addr
+ user_size
< user_addr
)
726 if (user_size
== 0) {
727 /* UNIX SPEC: size is 0, return EINVAL */
731 result
= mach_vm_deallocate(user_map
, user_addr
, user_size
);
732 if (result
!= KERN_SUCCESS
) {
739 mprotect(__unused proc_t p
, struct mprotect_args
*uap
, __unused
int32_t *retval
)
741 register vm_prot_t prot
;
742 mach_vm_offset_t user_addr
;
743 mach_vm_size_t user_size
;
744 kern_return_t result
;
750 AUDIT_ARG(addr
, uap
->addr
);
751 AUDIT_ARG(len
, uap
->len
);
752 AUDIT_ARG(value32
, uap
->prot
);
754 user_map
= current_map();
755 user_addr
= (mach_vm_offset_t
) uap
->addr
;
756 user_size
= (mach_vm_size_t
) uap
->len
;
757 prot
= (vm_prot_t
)(uap
->prot
& (VM_PROT_ALL
| VM_PROT_TRUSTED
));
759 if (user_addr
& vm_map_page_mask(user_map
)) {
760 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
766 #if defined(VM_PROT_READ_IS_EXEC)
767 if (prot
& VM_PROT_READ
)
768 prot
|= VM_PROT_EXECUTE
;
773 if (prot
& (VM_PROT_EXECUTE
| VM_PROT_WRITE
))
774 prot
|= VM_PROT_READ
;
779 * The MAC check for mprotect is of limited use for 2 reasons:
780 * Without mmap revocation, the caller could have asked for the max
781 * protections initially instead of a reduced set, so a mprotect
782 * check would offer no new security.
783 * It is not possible to extract the vnode from the pager object(s)
784 * of the target memory range.
785 * However, the MAC check may be used to prevent a process from,
786 * e.g., making the stack executable.
788 error
= mac_proc_check_mprotect(p
, user_addr
,
794 if(prot
& VM_PROT_TRUSTED
) {
795 #if CONFIG_DYNAMIC_CODE_SIGNING
796 /* CODE SIGNING ENFORCEMENT - JIT support */
797 /* The special protection value VM_PROT_TRUSTED requests that we treat
798 * this page as if it had a valid code signature.
799 * If this is enabled, there MUST be a MAC policy implementing the
800 * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
801 * compromised because the check would always succeed and thusly any
802 * process could sign dynamically. */
803 result
= vm_map_sign(
805 vm_map_trunc_page(user_addr
,
806 vm_map_page_mask(user_map
)),
807 vm_map_round_page(user_addr
+user_size
,
808 vm_map_page_mask(user_map
)));
812 case KERN_INVALID_ADDRESS
:
813 /* UNIX SPEC: for an invalid address range, return ENOMEM */
822 prot
&= ~VM_PROT_TRUSTED
;
824 result
= mach_vm_protect(user_map
, user_addr
, user_size
,
829 case KERN_PROTECTION_FAILURE
:
831 case KERN_INVALID_ADDRESS
:
832 /* UNIX SPEC: for an invalid address range, return ENOMEM */
840 minherit(__unused proc_t p
, struct minherit_args
*uap
, __unused
int32_t *retval
)
842 mach_vm_offset_t addr
;
844 register vm_inherit_t inherit
;
846 kern_return_t result
;
848 AUDIT_ARG(addr
, uap
->addr
);
849 AUDIT_ARG(len
, uap
->len
);
850 AUDIT_ARG(value32
, uap
->inherit
);
852 addr
= (mach_vm_offset_t
)uap
->addr
;
853 size
= (mach_vm_size_t
)uap
->len
;
854 inherit
= uap
->inherit
;
856 user_map
= current_map();
857 result
= mach_vm_inherit(user_map
, addr
, size
,
862 case KERN_PROTECTION_FAILURE
:
869 madvise(__unused proc_t p
, struct madvise_args
*uap
, __unused
int32_t *retval
)
872 mach_vm_offset_t start
;
874 vm_behavior_t new_behavior
;
875 kern_return_t result
;
878 * Since this routine is only advisory, we default to conservative
881 switch (uap
->behav
) {
883 new_behavior
= VM_BEHAVIOR_RANDOM
;
885 case MADV_SEQUENTIAL
:
886 new_behavior
= VM_BEHAVIOR_SEQUENTIAL
;
889 new_behavior
= VM_BEHAVIOR_DEFAULT
;
892 new_behavior
= VM_BEHAVIOR_WILLNEED
;
895 new_behavior
= VM_BEHAVIOR_DONTNEED
;
898 new_behavior
= VM_BEHAVIOR_FREE
;
900 case MADV_ZERO_WIRED_PAGES
:
901 new_behavior
= VM_BEHAVIOR_ZERO_WIRED_PAGES
;
903 case MADV_FREE_REUSABLE
:
904 new_behavior
= VM_BEHAVIOR_REUSABLE
;
906 case MADV_FREE_REUSE
:
907 new_behavior
= VM_BEHAVIOR_REUSE
;
910 new_behavior
= VM_BEHAVIOR_CAN_REUSE
;
916 start
= (mach_vm_offset_t
) uap
->addr
;
917 size
= (mach_vm_size_t
) uap
->len
;
919 user_map
= current_map();
921 result
= mach_vm_behavior_set(user_map
, start
, size
, new_behavior
);
925 case KERN_INVALID_ADDRESS
:
935 mincore(__unused proc_t p
, struct mincore_args
*uap
, __unused
int32_t *retval
)
937 mach_vm_offset_t addr
, first_addr
, end
;
941 int vecindex
, lastvecindex
;
952 * Make sure that the addresses presented are valid for user
955 first_addr
= addr
= vm_map_trunc_page(uap
->addr
,
956 vm_map_page_mask(map
));
957 end
= addr
+ vm_map_round_page(uap
->len
,
958 vm_map_page_mask(map
));
964 * Address of byte vector
971 * Do this on a map entry basis so that if the pages are not
972 * in the current processes address space, we can easily look
973 * up the pages elsewhere.
976 for( ; addr
< end
; addr
+= PAGE_SIZE
) {
978 ret
= mach_vm_page_query(map
, addr
, &pqueryinfo
, &numref
);
979 if (ret
!= KERN_SUCCESS
)
982 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_PRESENT
)
983 mincoreinfo
|= MINCORE_INCORE
;
984 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_REF
)
985 mincoreinfo
|= MINCORE_REFERENCED
;
986 if (pqueryinfo
& VM_PAGE_QUERY_PAGE_DIRTY
)
987 mincoreinfo
|= MINCORE_MODIFIED
;
991 * calculate index into user supplied byte vector
993 vecindex
= (addr
- first_addr
)>> PAGE_SHIFT
;
996 * If we have skipped map entries, we need to make sure that
997 * the byte vector is zeroed for those skipped entries.
999 while((lastvecindex
+ 1) < vecindex
) {
1001 error
= copyout(&c
, vec
+ lastvecindex
, 1);
1009 * Pass the page information to the user
1011 c
= (char)mincoreinfo
;
1012 error
= copyout(&c
, vec
+ vecindex
, 1);
1016 lastvecindex
= vecindex
;
1021 * Zero the last entries in the byte vector.
1023 vecindex
= (end
- first_addr
) >> PAGE_SHIFT
;
1024 while((lastvecindex
+ 1) < vecindex
) {
1026 error
= copyout(&c
, vec
+ lastvecindex
, 1);
1037 mlock(__unused proc_t p
, struct mlock_args
*uap
, __unused
int32_t *retvalval
)
1040 vm_map_offset_t addr
;
1041 vm_map_size_t size
, pageoff
;
1042 kern_return_t result
;
1044 AUDIT_ARG(addr
, uap
->addr
);
1045 AUDIT_ARG(len
, uap
->len
);
1047 addr
= (vm_map_offset_t
) uap
->addr
;
1048 size
= (vm_map_size_t
)uap
->len
;
1050 /* disable wrap around */
1051 if (addr
+ size
< addr
)
1057 user_map
= current_map();
1058 pageoff
= (addr
& vm_map_page_mask(user_map
));
1060 size
= vm_map_round_page(size
+pageoff
, vm_map_page_mask(user_map
));
1062 /* have to call vm_map_wire directly to pass "I don't know" protections */
1063 result
= vm_map_wire(user_map
, addr
, addr
+size
, VM_PROT_NONE
, TRUE
);
1065 if (result
== KERN_RESOURCE_SHORTAGE
)
1067 else if (result
!= KERN_SUCCESS
)
1070 return 0; /* KERN_SUCCESS */
1074 munlock(__unused proc_t p
, struct munlock_args
*uap
, __unused
int32_t *retval
)
1076 mach_vm_offset_t addr
;
1077 mach_vm_size_t size
;
1079 kern_return_t result
;
1081 AUDIT_ARG(addr
, uap
->addr
);
1082 AUDIT_ARG(addr
, uap
->len
);
1084 addr
= (mach_vm_offset_t
) uap
->addr
;
1085 size
= (mach_vm_size_t
)uap
->len
;
1086 user_map
= current_map();
1088 /* JMM - need to remove all wirings by spec - this just removes one */
1089 result
= mach_vm_wire(host_priv_self(), user_map
, addr
, size
, VM_PROT_NONE
);
1090 return (result
== KERN_SUCCESS
? 0 : ENOMEM
);
1095 mlockall(__unused proc_t p
, __unused
struct mlockall_args
*uap
, __unused
int32_t *retval
)
1101 munlockall(__unused proc_t p
, __unused
struct munlockall_args
*uap
, __unused
int32_t *retval
)
1106 #if CONFIG_CODE_DECRYPTION
1108 mremap_encrypted(__unused
struct proc
*p
, struct mremap_encrypted_args
*uap
, __unused
int32_t *retval
)
1110 mach_vm_offset_t user_addr
;
1111 mach_vm_size_t user_size
;
1112 kern_return_t result
;
1116 cpu_subtype_t cpusubtype
;
1117 pager_crypt_info_t crypt_info
;
1118 const char * cryptname
= 0;
1121 struct proc_regioninfo_internal pinfo
;
1123 uintptr_t vnodeaddr
;
1126 AUDIT_ARG(addr
, uap
->addr
);
1127 AUDIT_ARG(len
, uap
->len
);
1129 user_map
= current_map();
1130 user_addr
= (mach_vm_offset_t
) uap
->addr
;
1131 user_size
= (mach_vm_size_t
) uap
->len
;
1133 cryptid
= uap
->cryptid
;
1134 cputype
= uap
->cputype
;
1135 cpusubtype
= uap
->cpusubtype
;
1137 if (user_addr
& vm_map_page_mask(user_map
)) {
1138 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
1144 /* not encrypted, just an empty load command */
1147 cryptname
="com.apple.unfree";
1150 /* some random cryptid that you could manually put into
1151 * your binary if you want NULL */
1152 cryptname
="com.apple.null";
1158 if (NULL
== text_crypter_create
) return ENOTSUP
;
1160 ret
= fill_procregioninfo_onlymappedvnodes( proc_task(p
), user_addr
, &pinfo
, &vnodeaddr
, &vid
);
1161 if (ret
== 0 || !vnodeaddr
) {
1162 /* No really, this returns 0 if the memory address is not backed by a file */
1166 vp
= (vnode_t
)vnodeaddr
;
1167 if ((vnode_getwithvid(vp
, vid
)) == 0) {
1168 MALLOC_ZONE(vpath
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1175 ret
= vn_getpath(vp
, vpath
, &len
);
1177 FREE_ZONE(vpath
, MAXPATHLEN
, M_NAMEI
);
1188 kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n",
1189 __FUNCTION__
, vpath
, cryptid
, cputype
, cpusubtype
, (uint64_t)user_addr
, (uint64_t)user_size
);
1192 /* set up decrypter first */
1193 crypt_file_data_t crypt_data
= {
1196 .cpusubtype
= cpusubtype
};
1197 result
= text_crypter_create(&crypt_info
, cryptname
, (void*)&crypt_data
);
1198 FREE_ZONE(vpath
, MAXPATHLEN
, M_NAMEI
);
1201 printf("%s: unable to create decrypter %s, kr=%d\n",
1202 __FUNCTION__
, cryptname
, result
);
1203 if (result
== kIOReturnNotPrivileged
) {
1204 /* text encryption returned decryption failure */
1211 /* now remap using the decrypter */
1212 result
= vm_map_apple_protected(user_map
, user_addr
, user_addr
+user_size
, &crypt_info
);
1214 printf("%s: mapping failed with %d\n", __FUNCTION__
, result
);
1215 crypt_info
.crypt_end(crypt_info
.crypt_ops
);
1221 #endif /* CONFIG_CODE_DECRYPTION */