/*
- * Copyright (c) 2007 Apple Inc. All Rights Reserved.
- *
+ * Copyright (c) 2007-2019 Apple Inc. All Rights Reserved.
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
#include <vm/vm_pager.h>
#include <vm/vm_protos.h>
+#if CONFIG_MACF
+#include <security/mac_framework.h>
+#endif
+#include <os/overflow.h>
+
+#ifndef CONFIG_EMBEDDED
+#include <IOKit/IOBSD.h> /* for IOTaskHasEntitlement */
+#include <sys/csr.h> /* for csr_check */
+#define MAP_32BIT_ENTITLEMENT "com.apple.security.mmap-map-32bit"
+#endif
+
/*
* XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
* XXX usage is PROT_* from an interface perspective. Thus the values of
* Map in special device (must be SHARED) or file
*/
struct fileproc *fp;
- struct vnode *vp;
- int flags;
- int prot;
- int err=0;
- vm_map_t user_map;
- kern_return_t result;
- vm_map_offset_t user_addr;
- vm_map_size_t user_size;
- vm_object_offset_t pageoff;
- vm_object_offset_t file_pos;
- int alloc_flags=0;
- boolean_t docow;
- vm_prot_t maxprot;
- void *handle;
- memory_object_t pager = MEMORY_OBJECT_NULL;
- memory_object_control_t control;
- int mapanon=0;
- int fpref=0;
- int error =0;
+ struct vnode *vp;
+ int flags;
+ int prot;
+ int err = 0;
+ vm_map_t user_map;
+ kern_return_t result;
+ vm_map_offset_t user_addr;
+ vm_map_offset_t sum;
+ vm_map_size_t user_size;
+ vm_object_offset_t pageoff;
+ vm_object_offset_t file_pos;
+ int alloc_flags = 0;
+ vm_tag_t tag = VM_KERN_MEMORY_NONE;
+ vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ boolean_t docow;
+ vm_prot_t maxprot;
+ void *handle;
+ memory_object_t pager = MEMORY_OBJECT_NULL;
+ memory_object_control_t control;
+ int mapanon = 0;
+ int fpref = 0;
+ int error = 0;
int fd = uap->fd;
int num_retries = 0;
/*
* Note that for UNIX03 conformance, there is additional parameter checking for
- * mmap() system call in libsyscall prior to entering the kernel. The sanity
+ * mmap() system call in libsyscall prior to entering the kernel. The sanity
* checks and argument validation done in this function are not the only places
* one can get returned errnos.
*/
AUDIT_ARG(len, user_size);
AUDIT_ARG(fd, uap->fd);
+ if (vm_map_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
prot = (uap->prot & VM_PROT_ALL);
#if 3777787
/*
* for write or execute access, we must imply read access as well;
* otherwise programs expecting this to work will fail to operate.
*/
- if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
+ if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) {
prot |= VM_PROT_READ;
-#endif /* radar 3777787 */
+ }
+#endif /* radar 3777787 */
flags = uap->flags;
vp = NULLVP;
/*
- * The vm code does not have prototypes & compiler doesn't do the'
- * the right thing when you cast 64bit value and pass it in function
+ * The vm code does not have prototypes & compiler doesn't do
+ * the right thing when you cast 64bit value and pass it in function
* call. So here it is.
*/
file_pos = (vm_object_offset_t)uap->pos;
/* make sure mapping fits into numeric range etc */
- if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
- return (EINVAL);
+ if (os_add3_overflow(file_pos, user_size, PAGE_SIZE_64 - 1, &sum)) {
+ return EINVAL;
+ }
/*
* Align the file position to a page boundary,
/* Adjust size for rounding (on both ends). */
- user_size += pageoff; /* low end... */
- user_size = vm_map_round_page(user_size,
- vm_map_page_mask(user_map)); /* hi end */
+ user_size += pageoff; /* low end... */
+ user_size = vm_map_round_page(user_size,
+ vm_map_page_mask(user_map)); /* hi end */
if (flags & MAP_JIT) {
if ((flags & MAP_FIXED) ||
(flags & MAP_JIT)) {
return EINVAL;
}
+ }
+ if (flags & MAP_RESILIENT_CODESIGN) {
if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
return EPERM;
}
}
+ if (flags & MAP_SHARED) {
+ /*
+ * MAP_RESILIENT_MEDIA is not valid with MAP_SHARED because
+ * there is no place to inject zero-filled pages without
+ * actually adding them to the file.
+ * Since we didn't reject that combination before, there might
+ * already be callers using it and getting a valid MAP_SHARED
+ * mapping but without the resilience.
+ * For backwards compatibility's sake, let's keep ignoring
+ * MAP_RESILIENT_MEDIA in that case.
+ */
+ flags &= ~MAP_RESILIENT_MEDIA;
+ }
+ if (flags & MAP_RESILIENT_MEDIA) {
+ if ((flags & MAP_ANON) ||
+ (flags & MAP_SHARED)) {
+ return EINVAL;
+ }
+ }
/*
* Check for illegal addresses. Watch out for address wrap... Note
* should be aligned after adjustment by pageoff.
*/
user_addr -= pageoff;
- if (user_addr & vm_map_page_mask(user_map))
- return (EINVAL);
+ if (user_addr & vm_map_page_mask(user_map)) {
+ return EINVAL;
+ }
}
#ifdef notyet
/* DO not have apis to get this info, need to wait till then*/
* location.
*/
else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
- vm_map_page_mask(user_map)))
+ vm_map_page_mask(user_map))) {
addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
- vm_map_page_mask(user_map));
+ vm_map_page_mask(user_map));
+ }
#endif
alloc_flags = 0;
if (flags & MAP_ANON) {
-
maxprot = VM_PROT_ALL;
#if CONFIG_MACF
/*
error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
if (error) {
return EINVAL;
- }
+ }
#endif /* MAC */
/*
* Mapping blank space is trivial. Use positive fds as the alias
- * value for memory tracking.
+ * value for memory tracking.
*/
if (fd != -1) {
/*
* Use "fd" to pass (some) Mach VM allocation flags,
* (see the VM_FLAGS_* definitions).
*/
- alloc_flags = fd & (VM_FLAGS_ALIAS_MASK | VM_FLAGS_SUPERPAGE_MASK |
- VM_FLAGS_PURGABLE);
+ alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
+ VM_FLAGS_SUPERPAGE_MASK |
+ VM_FLAGS_PURGABLE |
+ VM_FLAGS_4GB_CHUNK);
if (alloc_flags != fd) {
/* reject if there are any extra flags */
return EINVAL;
}
+ VM_GET_FLAGS_ALIAS(alloc_flags, tag);
+ alloc_flags &= ~VM_FLAGS_ALIAS_MASK;
}
-
+
handle = NULL;
file_pos = 0;
mapanon = 1;
struct vnode_attr va;
vfs_context_t ctx = vfs_context_current();
- if (flags & MAP_JIT)
+ if (flags & MAP_JIT) {
return EINVAL;
+ }
/*
* Mapping file, get fp for validation. Obtain vnode and make
* sure it is of appropriate type.
*/
err = fp_lookup(p, fd, &fp, 0);
- if (err)
- return(err);
+ if (err) {
+ return err;
+ }
fpref = 1;
switch (FILEGLOB_DTYPE(fp->f_fglob)) {
case DTYPE_PSXSHM:
}
vp = (struct vnode *)fp->f_fglob->fg_data;
error = vnode_getwithref(vp);
- if(error != 0)
+ if (error != 0) {
goto bad;
+ }
if (vp->v_type != VREG && vp->v_type != VCHR) {
(void)vnode_put(vp);
}
AUDIT_ARG(vnpath, vp, ARG_VNODE1);
-
+
/*
* POSIX: mmap needs to update access time for mapped files
*/
* credentials do we use for determination? What if
* proc does a setuid?
*/
- maxprot = VM_PROT_EXECUTE; /* ??? */
- if (fp->f_fglob->fg_flag & FREAD)
+ maxprot = VM_PROT_EXECUTE; /* ??? */
+ if (fp->f_fglob->fg_flag & FREAD) {
maxprot |= VM_PROT_READ;
- else if (prot & PROT_READ) {
+ } else if (prot & PROT_READ) {
(void)vnode_put(vp);
error = EACCES;
goto bad;
* MAP_SHARED or via the implicit sharing of character
* device mappings), and we are trying to get write
* permission although we opened it without asking
- * for it, bail out.
+ * for it, bail out.
*/
if ((flags & MAP_SHARED) != 0) {
if ((fp->f_fglob->fg_flag & FWRITE) != 0 &&
/*
- * Do not allow writable mappings of
+ * Do not allow writable mappings of
* swap files (see vm_swapfile_pager.c).
*/
!vnode_isswap(vp)) {
- /*
- * check for write access
- *
- * Note that we already made this check when granting FWRITE
- * against the file, so it seems redundant here.
- */
- error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
-
- /* if not granted for any reason, but we wanted it, bad */
- if ((prot & PROT_WRITE) && (error != 0)) {
- vnode_put(vp);
- goto bad;
- }
-
- /* if writable, remember */
- if (error == 0)
- maxprot |= VM_PROT_WRITE;
-
+ /*
+ * check for write access
+ *
+ * Note that we already made this check when granting FWRITE
+ * against the file, so it seems redundant here.
+ */
+ error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
+
+ /* if not granted for any reason, but we wanted it, bad */
+ if ((prot & PROT_WRITE) && (error != 0)) {
+ vnode_put(vp);
+ goto bad;
+ }
+
+ /* if writable, remember */
+ if (error == 0) {
+ maxprot |= VM_PROT_WRITE;
+ }
} else if ((prot & PROT_WRITE) != 0) {
(void)vnode_put(vp);
error = EACCES;
goto bad;
}
- } else
+ } else {
maxprot |= VM_PROT_WRITE;
+ }
handle = (void *)vp;
#if CONFIG_MACF
goto bad;
}
#endif /* MAC */
+ /*
+ * Consult the file system to determine if this
+ * particular file object can be mapped.
+ */
+ error = VNOP_MMAP_CHECK(vp, prot, ctx);
+ if (error) {
+ (void)vnode_put(vp);
+ goto bad;
+ }
}
+
+ /*
+ * No copy-on-read for mmap() mappings themselves.
+ */
+ vmk_flags.vmkf_no_copy_on_read = 1;
}
- if (user_size == 0) {
- if (!mapanon)
+ if (user_size == 0) {
+ if (!mapanon) {
(void)vnode_put(vp);
+ }
error = 0;
goto bad;
}
* to the nearest page boundary.
*/
user_size = vm_map_round_page(user_size,
- vm_map_page_mask(user_map));
+ vm_map_page_mask(user_map));
if (file_pos & vm_map_page_mask(user_map)) {
- if (!mapanon)
+ if (!mapanon) {
(void)vnode_put(vp);
+ }
error = EINVAL;
goto bad;
}
if ((flags & MAP_FIXED) == 0) {
alloc_flags |= VM_FLAGS_ANYWHERE;
user_addr = vm_map_round_page(user_addr,
- vm_map_page_mask(user_map));
+ vm_map_page_mask(user_map));
} else {
if (user_addr != vm_map_trunc_page(user_addr,
- vm_map_page_mask(user_map))) {
- if (!mapanon)
- (void)vnode_put(vp);
+ vm_map_page_mask(user_map))) {
+ if (!mapanon) {
+ (void)vnode_put(vp);
+ }
error = EINVAL;
goto bad;
}
alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
}
- if (flags & MAP_NOCACHE)
+ if (flags & MAP_NOCACHE) {
alloc_flags |= VM_FLAGS_NO_CACHE;
+ }
if (flags & MAP_JIT) {
- alloc_flags |= VM_FLAGS_MAP_JIT;
+ vmk_flags.vmkf_map_jit = TRUE;
}
if (flags & MAP_RESILIENT_CODESIGN) {
alloc_flags |= VM_FLAGS_RESILIENT_CODESIGN;
}
+ if (flags & MAP_RESILIENT_MEDIA) {
+ alloc_flags |= VM_FLAGS_RESILIENT_MEDIA;
+ }
+
+#ifndef CONFIG_EMBEDDED
+ if (flags & MAP_32BIT) {
+ if (csr_check(CSR_ALLOW_UNTRUSTED_KEXTS) == 0 ||
+ IOTaskHasEntitlement(current_task(), MAP_32BIT_ENTITLEMENT)) {
+ vmk_flags.vmkf_32bit_map_va = TRUE;
+ } else {
+ error = EPERM;
+ goto bad;
+ }
+ }
+#endif
/*
* Lookup/allocate object.
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
- if (prot & VM_PROT_READ)
+ if (prot & VM_PROT_READ) {
prot |= VM_PROT_EXECUTE;
- if (maxprot & VM_PROT_READ)
+ }
+ if (maxprot & VM_PROT_READ) {
maxprot |= VM_PROT_EXECUTE;
+ }
#endif
#endif
#if 3777787
- if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
+ if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) {
prot |= VM_PROT_READ;
- if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
+ }
+ if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) {
maxprot |= VM_PROT_READ;
-#endif /* radar 3777787 */
+ }
+#endif /* radar 3777787 */
map_anon_retry:
result = vm_map_enter_mem_object(user_map,
- &user_addr, user_size,
- 0, alloc_flags,
- IPC_PORT_NULL, 0, FALSE,
- prot, maxprot,
- (flags & MAP_SHARED) ?
- VM_INHERIT_SHARE :
- VM_INHERIT_DEFAULT);
+ &user_addr, user_size,
+ 0, alloc_flags, vmk_flags,
+ tag,
+ IPC_PORT_NULL, 0, FALSE,
+ prot, maxprot,
+ (flags & MAP_SHARED) ?
+ VM_INHERIT_SHARE :
+ VM_INHERIT_DEFAULT);
/* If a non-binding address was specified for this anonymous
* mapping, retry the mapping with a zero base
} else {
control = ubc_getobject(vp, UBC_FLAGS_NONE);
}
-
+
if (control == NULL) {
(void)vnode_put(vp);
error = ENOMEM;
* Set credentials:
* FIXME: if we're writing the file we need a way to
* ensure that someone doesn't replace our R/W creds
- * with ones that only work for read.
+ * with ones that only work for read.
*/
ubc_setthreadcred(vp, p, current_thread());
docow = FALSE;
- if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
+ if ((flags & (MAP_ANON | MAP_SHARED)) == 0) {
docow = TRUE;
}
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
- if (prot & VM_PROT_READ)
+ if (prot & VM_PROT_READ) {
prot |= VM_PROT_EXECUTE;
- if (maxprot & VM_PROT_READ)
+ }
+ if (maxprot & VM_PROT_READ) {
maxprot |= VM_PROT_EXECUTE;
+ }
#endif
#endif /* notyet */
#if 3777787
- if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
+ if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) {
prot |= VM_PROT_READ;
- if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
+ }
+ if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) {
maxprot |= VM_PROT_READ;
-#endif /* radar 3777787 */
+ }
+#endif /* radar 3777787 */
map_file_retry:
- if ((flags & MAP_RESILIENT_CODESIGN) ||
- (flags & MAP_RESILIENT_MEDIA)) {
+ if (flags & MAP_RESILIENT_CODESIGN) {
if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
assert(!mapanon);
vnode_put(vp);
/* strictly limit access to "prot" */
maxprot &= prot;
}
+
+ vm_object_offset_t end_pos = 0;
+ if (os_add_overflow(user_size, file_pos, &end_pos)) {
+ vnode_put(vp);
+ error = EINVAL;
+ goto bad;
+ }
+
result = vm_map_enter_mem_object_control(user_map,
- &user_addr, user_size,
- 0, alloc_flags,
- control, file_pos,
- docow, prot, maxprot,
- (flags & MAP_SHARED) ?
- VM_INHERIT_SHARE :
- VM_INHERIT_DEFAULT);
+ &user_addr, user_size,
+ 0, alloc_flags, vmk_flags,
+ tag,
+ control, file_pos,
+ docow, prot, maxprot,
+ (flags & MAP_SHARED) ?
+ VM_INHERIT_SHARE :
+ VM_INHERIT_DEFAULT);
/* If a non-binding address was specified for this file backed
* mapping, retry the mapping with a zero base
*/
memory_object_deallocate(pager);
}
- if (fpref)
+ if (fpref) {
fp_drop(p, fd, fp, 0);
+ }
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
+#ifndef CONFIG_EMBEDDED
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
- (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
- return(error);
+ (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
+#endif
+ return error;
}
int
msync(__unused proc_t p, struct msync_args *uap, int32_t *retval)
{
__pthread_testcancel(1);
- return(msync_nocancel(p, (struct msync_nocancel_args *)uap, retval));
+ return msync_nocancel(p, (struct msync_nocancel_args *)uap, retval);
}
int
int flags;
vm_map_t user_map;
int rv;
- vm_sync_t sync_flags=0;
+ vm_sync_t sync_flags = 0;
user_map = current_map();
addr = (mach_vm_offset_t) uap->addr;
- size = (mach_vm_size_t)uap->len;
+ size = (mach_vm_size_t) uap->len;
+#ifndef CONFIG_EMBEDDED
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
+#endif
+ if (mach_vm_range_overflows(addr, size)) {
+ return EINVAL;
+ }
if (addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
/*
* We cannot support this properly without maintaining
* list all mmaps done. Cannot use vm_map_entry as they could be
- * split or coalesced by indepenedant actions. So instead of
+ * split or coalesced by indepenedant actions. So instead of
* inaccurate results, lets just return error as invalid size
* specified
*/
- return (EINVAL); /* XXX breaks posix apps */
+ return EINVAL; /* XXX breaks posix apps */
}
flags = uap->flags;
/* disallow contradictory flags */
- if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
- return (EINVAL);
+ if ((flags & (MS_SYNC | MS_ASYNC)) == (MS_SYNC | MS_ASYNC)) {
+ return EINVAL;
+ }
- if (flags & MS_KILLPAGES)
- sync_flags |= VM_SYNC_KILLPAGES;
- if (flags & MS_DEACTIVATE)
- sync_flags |= VM_SYNC_DEACTIVATE;
- if (flags & MS_INVALIDATE)
- sync_flags |= VM_SYNC_INVALIDATE;
+ if (flags & MS_KILLPAGES) {
+ sync_flags |= VM_SYNC_KILLPAGES;
+ }
+ if (flags & MS_DEACTIVATE) {
+ sync_flags |= VM_SYNC_DEACTIVATE;
+ }
+ if (flags & MS_INVALIDATE) {
+ sync_flags |= VM_SYNC_INVALIDATE;
+ }
- if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
- if (flags & MS_ASYNC)
- sync_flags |= VM_SYNC_ASYNCHRONOUS;
- else
- sync_flags |= VM_SYNC_SYNCHRONOUS;
+ if (!(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
+ if (flags & MS_ASYNC) {
+ sync_flags |= VM_SYNC_ASYNCHRONOUS;
+ } else {
+ sync_flags |= VM_SYNC_SYNCHRONOUS;
+ }
}
- sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */
+ sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */
rv = mach_vm_msync(user_map, addr, size, sync_flags);
switch (rv) {
case KERN_SUCCESS:
break;
- case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */
- return (ENOMEM);
+ case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */
+ return ENOMEM;
case KERN_FAILURE:
- return (EIO);
+ return EIO;
default:
- return (EINVAL);
+ return EINVAL;
}
- return (0);
+ return 0;
}
int
munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
{
- mach_vm_offset_t user_addr;
- mach_vm_size_t user_size;
- kern_return_t result;
- vm_map_t user_map;
+ mach_vm_offset_t user_addr;
+ mach_vm_size_t user_size;
+ kern_return_t result;
+ vm_map_t user_map;
user_map = current_map();
user_addr = (mach_vm_offset_t) uap->addr;
return EINVAL;
}
- if (user_addr + user_size < user_addr)
- return(EINVAL);
+ if (mach_vm_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
if (user_size == 0) {
/* UNIX SPEC: size is 0, return EINVAL */
result = mach_vm_deallocate(user_map, user_addr, user_size);
if (result != KERN_SUCCESS) {
- return(EINVAL);
+ return EINVAL;
}
- return(0);
+ return 0;
}
int
mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval)
{
vm_prot_t prot;
- mach_vm_offset_t user_addr;
- mach_vm_size_t user_size;
- kern_return_t result;
- vm_map_t user_map;
+ mach_vm_offset_t user_addr;
+ mach_vm_size_t user_size;
+ kern_return_t result;
+ vm_map_t user_map;
#if CONFIG_MACF
int error;
#endif
user_size = (mach_vm_size_t) uap->len;
prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ));
+ if (mach_vm_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
if (user_addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
}
-
+
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
- if (prot & VM_PROT_READ)
+ if (prot & VM_PROT_READ) {
prot |= VM_PROT_EXECUTE;
+ }
#endif
#endif /* notyet */
#if 3936456
- if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
+ if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) {
prot |= VM_PROT_READ;
-#endif /* 3936456 */
+ }
+#endif /* 3936456 */
+#if defined(__arm64__)
+ if (prot & VM_PROT_STRIP_READ) {
+ prot &= ~(VM_PROT_READ | VM_PROT_STRIP_READ);
+ }
+#endif
#if CONFIG_MACF
/*
* e.g., making the stack executable.
*/
error = mac_proc_check_mprotect(p, user_addr,
- user_size, prot);
- if (error)
- return (error);
+ user_size, prot);
+ if (error) {
+ return error;
+ }
#endif
- if(prot & VM_PROT_TRUSTED) {
+ if (prot & VM_PROT_TRUSTED) {
#if CONFIG_DYNAMIC_CODE_SIGNING
/* CODE SIGNING ENFORCEMENT - JIT support */
/* The special protection value VM_PROT_TRUSTED requests that we treat
* this page as if it had a valid code signature.
- * If this is enabled, there MUST be a MAC policy implementing the
+ * If this is enabled, there MUST be a MAC policy implementing the
* mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
* compromised because the check would always succeed and thusly any
* process could sign dynamically. */
result = vm_map_sign(
- user_map,
+ user_map,
vm_map_trunc_page(user_addr,
- vm_map_page_mask(user_map)),
- vm_map_round_page(user_addr+user_size,
- vm_map_page_mask(user_map)));
+ vm_map_page_mask(user_map)),
+ vm_map_round_page(user_addr + user_size,
+ vm_map_page_mask(user_map)));
switch (result) {
- case KERN_SUCCESS:
- break;
- case KERN_INVALID_ADDRESS:
- /* UNIX SPEC: for an invalid address range, return ENOMEM */
- return ENOMEM;
- default:
- return EINVAL;
+ case KERN_SUCCESS:
+ break;
+ case KERN_INVALID_ADDRESS:
+ /* UNIX SPEC: for an invalid address range, return ENOMEM */
+ return ENOMEM;
+ default:
+ return EINVAL;
}
#else
return ENOTSUP;
#endif
}
prot &= ~VM_PROT_TRUSTED;
-
+
result = mach_vm_protect(user_map, user_addr, user_size,
- FALSE, prot);
+ FALSE, prot);
switch (result) {
case KERN_SUCCESS:
- return (0);
+ return 0;
case KERN_PROTECTION_FAILURE:
- return (EACCES);
+ return EACCES;
case KERN_INVALID_ADDRESS:
/* UNIX SPEC: for an invalid address range, return ENOMEM */
return ENOMEM;
}
- return (EINVAL);
+ return EINVAL;
}
mach_vm_offset_t addr;
mach_vm_size_t size;
vm_inherit_t inherit;
- vm_map_t user_map;
- kern_return_t result;
+ vm_map_t user_map;
+ kern_return_t result;
AUDIT_ARG(addr, uap->addr);
AUDIT_ARG(len, uap->len);
addr = (mach_vm_offset_t)uap->addr;
size = (mach_vm_size_t)uap->len;
inherit = uap->inherit;
-
+ if (mach_vm_range_overflows(addr, size)) {
+ return EINVAL;
+ }
user_map = current_map();
result = mach_vm_inherit(user_map, addr, size,
- inherit);
+ inherit);
switch (result) {
case KERN_SUCCESS:
- return (0);
+ return 0;
case KERN_PROTECTION_FAILURE:
- return (EACCES);
+ return EACCES;
}
- return (EINVAL);
+ return EINVAL;
}
int
mach_vm_offset_t start;
mach_vm_size_t size;
vm_behavior_t new_behavior;
- kern_return_t result;
+ kern_return_t result;
/*
* Since this routine is only advisory, we default to conservative
* behavior.
*/
switch (uap->behav) {
- case MADV_RANDOM:
- new_behavior = VM_BEHAVIOR_RANDOM;
- break;
- case MADV_SEQUENTIAL:
- new_behavior = VM_BEHAVIOR_SEQUENTIAL;
- break;
- case MADV_NORMAL:
- new_behavior = VM_BEHAVIOR_DEFAULT;
- break;
- case MADV_WILLNEED:
- new_behavior = VM_BEHAVIOR_WILLNEED;
- break;
- case MADV_DONTNEED:
- new_behavior = VM_BEHAVIOR_DONTNEED;
- break;
- case MADV_FREE:
- new_behavior = VM_BEHAVIOR_FREE;
- break;
- case MADV_ZERO_WIRED_PAGES:
- new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES;
- break;
- case MADV_FREE_REUSABLE:
- new_behavior = VM_BEHAVIOR_REUSABLE;
- break;
- case MADV_FREE_REUSE:
- new_behavior = VM_BEHAVIOR_REUSE;
- break;
- case MADV_CAN_REUSE:
- new_behavior = VM_BEHAVIOR_CAN_REUSE;
- break;
- case MADV_PAGEOUT:
+ case MADV_RANDOM:
+ new_behavior = VM_BEHAVIOR_RANDOM;
+ break;
+ case MADV_SEQUENTIAL:
+ new_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ break;
+ case MADV_NORMAL:
+ new_behavior = VM_BEHAVIOR_DEFAULT;
+ break;
+ case MADV_WILLNEED:
+ new_behavior = VM_BEHAVIOR_WILLNEED;
+ break;
+ case MADV_DONTNEED:
+ new_behavior = VM_BEHAVIOR_DONTNEED;
+ break;
+ case MADV_FREE:
+ new_behavior = VM_BEHAVIOR_FREE;
+ break;
+ case MADV_ZERO_WIRED_PAGES:
+ new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES;
+ break;
+ case MADV_FREE_REUSABLE:
+ new_behavior = VM_BEHAVIOR_REUSABLE;
+ break;
+ case MADV_FREE_REUSE:
+ new_behavior = VM_BEHAVIOR_REUSE;
+ break;
+ case MADV_CAN_REUSE:
+ new_behavior = VM_BEHAVIOR_CAN_REUSE;
+ break;
+ case MADV_PAGEOUT:
#if MACH_ASSERT
- new_behavior = VM_BEHAVIOR_PAGEOUT;
- break;
+ new_behavior = VM_BEHAVIOR_PAGEOUT;
+ break;
#else /* MACH_ASSERT */
- return ENOTSUP;
+ return ENOTSUP;
#endif /* MACH_ASSERT */
- default:
- return(EINVAL);
+ default:
+ return EINVAL;
}
start = (mach_vm_offset_t) uap->addr;
size = (mach_vm_size_t) uap->len;
-
+ if (mach_vm_range_overflows(start, size)) {
+ return EINVAL;
+ }
+#if __arm64__
+ if (start == 0 &&
+ size != 0 &&
+ (uap->behav == MADV_FREE ||
+ uap->behav == MADV_FREE_REUSABLE)) {
+ printf("** FOURK_COMPAT: %d[%s] "
+ "failing madvise(0x%llx,0x%llx,%s)\n",
+ p->p_pid, p->p_comm, start, size,
+ ((uap->behav == MADV_FREE_REUSABLE)
+ ? "MADV_FREE_REUSABLE"
+ : "MADV_FREE"));
+ DTRACE_VM3(fourk_compat_madvise,
+ uint64_t, start,
+ uint64_t, size,
+ int, uap->behav);
+ return EINVAL;
+ }
+#endif /* __arm64__ */
user_map = current_map();
return 0;
case KERN_INVALID_ADDRESS:
return EINVAL;
- case KERN_NO_SPACE:
+ case KERN_NO_SPACE:
return ENOMEM;
}
int
mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval)
{
- mach_vm_offset_t addr, first_addr, end;
- vm_map_t map;
- user_addr_t vec;
- int error;
- int vecindex, lastvecindex;
- int mincoreinfo=0;
- int pqueryinfo;
- kern_return_t ret;
- int numref;
-
- char c;
+ mach_vm_offset_t addr = 0, first_addr = 0, end = 0, cur_end = 0;
+ vm_map_t map = VM_MAP_NULL;
+ user_addr_t vec = 0;
+ int error = 0;
+ int lastvecindex = 0;
+ int mincoreinfo = 0;
+ int pqueryinfo = 0;
+ unsigned int pqueryinfo_vec_size = 0;
+ vm_page_info_basic_t info = NULL;
+ mach_msg_type_number_t count = 0;
+ char *kernel_vec = NULL;
+ uint64_t req_vec_size_pages = 0, cur_vec_size_pages = 0, vecindex = 0;
+ kern_return_t kr = KERN_SUCCESS;
map = current_map();
* mode.
*/
first_addr = addr = vm_map_trunc_page(uap->addr,
- vm_map_page_mask(map));
- end = addr + vm_map_round_page(uap->len,
- vm_map_page_mask(map));
+ vm_map_page_mask(map));
+ end = vm_map_round_page(uap->addr + uap->len,
+ vm_map_page_mask(map));
+
+ if (end < addr) {
+ return EINVAL;
+ }
- if (end < addr)
- return (EINVAL);
+ if (end == addr) {
+ return 0;
+ }
/*
- * Address of byte vector
+ * We are going to loop through the whole 'req_vec_size' pages
+ * range in chunks of 'cur_vec_size'.
*/
- vec = uap->vec;
- map = current_map();
+ req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
+ cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
+
+ kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK | M_ZERO);
+
+ if (kernel_vec == NULL) {
+ return ENOMEM;
+ }
/*
- * Do this on a map entry basis so that if the pages are not
- * in the current processes address space, we can easily look
- * up the pages elsewhere.
+ * Address of byte vector
*/
- lastvecindex = -1;
- for( ; addr < end; addr += PAGE_SIZE ) {
- pqueryinfo = 0;
- ret = mach_vm_page_query(map, addr, &pqueryinfo, &numref);
- if (ret != KERN_SUCCESS)
- pqueryinfo = 0;
- mincoreinfo = 0;
- if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
- mincoreinfo |= MINCORE_INCORE;
- if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
- mincoreinfo |= MINCORE_REFERENCED;
- if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
- mincoreinfo |= MINCORE_MODIFIED;
-
-
- /*
- * calculate index into user supplied byte vector
- */
- vecindex = (addr - first_addr)>> PAGE_SHIFT;
+ vec = uap->vec;
+
+ pqueryinfo_vec_size = cur_vec_size_pages * sizeof(struct vm_page_info_basic);
+ info = (void*) _MALLOC(pqueryinfo_vec_size, M_TEMP, M_WAITOK);
+
+ if (info == NULL) {
+ FREE(kernel_vec, M_TEMP);
+ return ENOMEM;
+ }
+
+ while (addr < end) {
+ cur_end = addr + (cur_vec_size_pages * PAGE_SIZE_64);
+
+ count = VM_PAGE_INFO_BASIC_COUNT;
+ kr = vm_map_page_range_info_internal(map,
+ addr,
+ cur_end,
+ VM_PAGE_INFO_BASIC,
+ (vm_page_info_t) info,
+ &count);
+
+ assert(kr == KERN_SUCCESS);
/*
- * If we have skipped map entries, we need to make sure that
- * the byte vector is zeroed for those skipped entries.
+ * Do this on a map entry basis so that if the pages are not
+ * in the current processes address space, we can easily look
+ * up the pages elsewhere.
*/
- while((lastvecindex + 1) < vecindex) {
- c = 0;
- error = copyout(&c, vec + lastvecindex, 1);
- if (error) {
- return (EFAULT);
+ lastvecindex = -1;
+ for (; addr < cur_end; addr += PAGE_SIZE) {
+ pqueryinfo = info[lastvecindex + 1].disposition;
+
+ mincoreinfo = 0;
+
+ if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT) {
+ mincoreinfo |= MINCORE_INCORE;
}
- ++lastvecindex;
+ if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF) {
+ mincoreinfo |= MINCORE_REFERENCED;
+ }
+ if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY) {
+ mincoreinfo |= MINCORE_MODIFIED;
+ }
+ if (pqueryinfo & VM_PAGE_QUERY_PAGE_PAGED_OUT) {
+ mincoreinfo |= MINCORE_PAGED_OUT;
+ }
+ if (pqueryinfo & VM_PAGE_QUERY_PAGE_COPIED) {
+ mincoreinfo |= MINCORE_COPIED;
+ }
+ if ((pqueryinfo & VM_PAGE_QUERY_PAGE_EXTERNAL) == 0) {
+ mincoreinfo |= MINCORE_ANONYMOUS;
+ }
+ /*
+ * calculate index into user supplied byte vector
+ */
+ vecindex = (addr - first_addr) >> PAGE_SHIFT;
+ kernel_vec[vecindex] = (char)mincoreinfo;
+ lastvecindex = vecindex;
}
- /*
- * Pass the page information to the user
- */
- c = (char)mincoreinfo;
- error = copyout(&c, vec + vecindex, 1);
+
+ assert(vecindex == (cur_vec_size_pages - 1));
+
+ error = copyout(kernel_vec, vec, cur_vec_size_pages * sizeof(char) /* a char per page */);
+
if (error) {
- return (EFAULT);
+ break;
}
- lastvecindex = vecindex;
+
+ /*
+ * For the next chunk, we'll need:
+ * - bump the location in the user buffer for our next disposition.
+ * - new length
+ * - starting address
+ */
+ vec += cur_vec_size_pages * sizeof(char);
+ req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
+ cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
+
+ first_addr = addr;
}
+ FREE(kernel_vec, M_TEMP);
+ FREE(info, M_TEMP);
- /*
- * Zero the last entries in the byte vector.
- */
- vecindex = (end - first_addr) >> PAGE_SHIFT;
- while((lastvecindex + 1) < vecindex) {
- c = 0;
- error = copyout(&c, vec + lastvecindex, 1);
- if (error) {
- return (EFAULT);
- }
- ++lastvecindex;
+ if (error) {
+ return EFAULT;
}
-
- return (0);
+
+ return 0;
}
int
vm_map_t user_map;
vm_map_offset_t addr;
vm_map_size_t size, pageoff;
- kern_return_t result;
+ kern_return_t result;
AUDIT_ARG(addr, uap->addr);
AUDIT_ARG(len, uap->len);
addr = (vm_map_offset_t) uap->addr;
size = (vm_map_size_t)uap->len;
- /* disable wrap around */
- if (addr + size < addr)
- return (EINVAL);
+ if (vm_map_range_overflows(addr, size)) {
+ return EINVAL;
+ }
- if (size == 0)
- return (0);
+ if (size == 0) {
+ return 0;
+ }
user_map = current_map();
pageoff = (addr & vm_map_page_mask(user_map));
addr -= pageoff;
- size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));
+ size = vm_map_round_page(size + pageoff, vm_map_page_mask(user_map));
/* have to call vm_map_wire directly to pass "I don't know" protections */
- result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK), TRUE);
+ result = vm_map_wire_kernel(user_map, addr, addr + size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK, TRUE);
- if (result == KERN_RESOURCE_SHORTAGE)
+ if (result == KERN_RESOURCE_SHORTAGE) {
return EAGAIN;
- else if (result == KERN_PROTECTION_FAILURE)
+ } else if (result == KERN_PROTECTION_FAILURE) {
return EACCES;
- else if (result != KERN_SUCCESS)
+ } else if (result != KERN_SUCCESS) {
return ENOMEM;
+ }
- return 0; /* KERN_SUCCESS */
+ return 0; /* KERN_SUCCESS */
}
int
mach_vm_offset_t addr;
mach_vm_size_t size;
vm_map_t user_map;
- kern_return_t result;
+ kern_return_t result;
AUDIT_ARG(addr, uap->addr);
- AUDIT_ARG(addr, uap->len);
+ AUDIT_ARG(len, uap->len);
addr = (mach_vm_offset_t) uap->addr;
size = (mach_vm_size_t)uap->len;
user_map = current_map();
-
+ if (mach_vm_range_overflows(addr, size)) {
+ return EINVAL;
+ }
/* JMM - need to remove all wirings by spec - this just removes one */
- result = mach_vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE);
- return (result == KERN_SUCCESS ? 0 : ENOMEM);
+ result = mach_vm_wire_kernel(host_priv_self(), user_map, addr, size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK);
+ return result == KERN_SUCCESS ? 0 : ENOMEM;
}
int
mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused int32_t *retval)
{
- return (ENOSYS);
+ return ENOSYS;
}
int
munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused int32_t *retval)
{
- return(ENOSYS);
+ return ENOSYS;
}
#if CONFIG_CODE_DECRYPTION
int
mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __unused int32_t *retval)
{
- mach_vm_offset_t user_addr;
- mach_vm_size_t user_size;
- kern_return_t result;
- vm_map_t user_map;
- uint32_t cryptid;
- cpu_type_t cputype;
- cpu_subtype_t cpusubtype;
- pager_crypt_info_t crypt_info;
- const char * cryptname = 0;
- char *vpath;
- int len, ret;
- struct proc_regioninfo_internal pinfo;
- vnode_t vp;
- uintptr_t vnodeaddr;
- uint32_t vid;
-
- AUDIT_ARG(addr, uap->addr);
- AUDIT_ARG(len, uap->len);
-
- user_map = current_map();
- user_addr = (mach_vm_offset_t) uap->addr;
- user_size = (mach_vm_size_t) uap->len;
-
- cryptid = uap->cryptid;
- cputype = uap->cputype;
- cpusubtype = uap->cpusubtype;
-
- if (user_addr & vm_map_page_mask(user_map)) {
- /* UNIX SPEC: user address is not page-aligned, return EINVAL */
- return EINVAL;
- }
-
- switch(cryptid) {
- case 0:
- /* not encrypted, just an empty load command */
- return 0;
- case 1:
- cryptname="com.apple.unfree";
- break;
- case 0x10:
- /* some random cryptid that you could manually put into
- * your binary if you want NULL */
- cryptname="com.apple.null";
- break;
- default:
- return EINVAL;
- }
-
- if (NULL == text_crypter_create) return ENOTSUP;
-
- ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid);
- if (ret == 0 || !vnodeaddr) {
- /* No really, this returns 0 if the memory address is not backed by a file */
- return (EINVAL);
- }
-
- vp = (vnode_t)vnodeaddr;
- if ((vnode_getwithvid(vp, vid)) == 0) {
- MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
- if(vpath == NULL) {
- vnode_put(vp);
- return (ENOMEM);
- }
-
- len = MAXPATHLEN;
- ret = vn_getpath(vp, vpath, &len);
- if(ret) {
- FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
- vnode_put(vp);
- return (ret);
- }
-
- vnode_put(vp);
- } else {
- return (EINVAL);
- }
+ mach_vm_offset_t user_addr;
+ mach_vm_size_t user_size;
+ kern_return_t result;
+ vm_map_t user_map;
+ uint32_t cryptid;
+ cpu_type_t cputype;
+ cpu_subtype_t cpusubtype;
+ pager_crypt_info_t crypt_info;
+ const char * cryptname = 0;
+ char *vpath;
+ int len, ret;
+ struct proc_regioninfo_internal pinfo;
+ vnode_t vp;
+ uintptr_t vnodeaddr;
+ uint32_t vid;
+
+ AUDIT_ARG(addr, uap->addr);
+ AUDIT_ARG(len, uap->len);
+
+ user_map = current_map();
+ user_addr = (mach_vm_offset_t) uap->addr;
+ user_size = (mach_vm_size_t) uap->len;
+
+ cryptid = uap->cryptid;
+ cputype = uap->cputype;
+ cpusubtype = uap->cpusubtype;
+
+ if (mach_vm_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
+ if (user_addr & vm_map_page_mask(user_map)) {
+ /* UNIX SPEC: user address is not page-aligned, return EINVAL */
+ return EINVAL;
+ }
+
+ switch (cryptid) {
+ case 0:
+ /* not encrypted, just an empty load command */
+ return 0;
+ case 1:
+ cryptname = "com.apple.unfree";
+ break;
+ case 0x10:
+ /* some random cryptid that you could manually put into
+ * your binary if you want NULL */
+ cryptname = "com.apple.null";
+ break;
+ default:
+ return EINVAL;
+ }
+
+ if (NULL == text_crypter_create) {
+ return ENOTSUP;
+ }
+
+ ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid);
+ if (ret == 0 || !vnodeaddr) {
+ /* No really, this returns 0 if the memory address is not backed by a file */
+ return EINVAL;
+ }
+
+ vp = (vnode_t)vnodeaddr;
+ if ((vnode_getwithvid(vp, vid)) == 0) {
+ MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ if (vpath == NULL) {
+ vnode_put(vp);
+ return ENOMEM;
+ }
+
+ len = MAXPATHLEN;
+ ret = vn_getpath(vp, vpath, &len);
+ if (ret) {
+ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
+ vnode_put(vp);
+ return ret;
+ }
+
+ vnode_put(vp);
+ } else {
+ return EINVAL;
+ }
#if 0
- kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n",
- __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size);
+ kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n",
+ __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size);
#endif
- /* set up decrypter first */
- crypt_file_data_t crypt_data = {
- .filename = vpath,
- .cputype = cputype,
- .cpusubtype = cpusubtype };
- result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
+ /* set up decrypter first */
+ crypt_file_data_t crypt_data = {
+ .filename = vpath,
+ .cputype = cputype,
+ .cpusubtype = cpusubtype
+ };
+ result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
#if VM_MAP_DEBUG_APPLE_PROTECT
- if (vm_map_debug_apple_protect) {
- printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n",
- p->p_pid, p->p_comm,
- user_map,
- (uint64_t) user_addr,
- (uint64_t) (user_addr + user_size),
- __FUNCTION__, vpath, result);
- }
+ if (vm_map_debug_apple_protect) {
+ printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n",
+ p->p_pid, p->p_comm,
+ user_map,
+ (uint64_t) user_addr,
+ (uint64_t) (user_addr + user_size),
+ __FUNCTION__, vpath, result);
+ }
#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
- FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
-
- if(result) {
- printf("%s: unable to create decrypter %s, kr=%d\n",
- __FUNCTION__, cryptname, result);
- if (result == kIOReturnNotPrivileged) {
- /* text encryption returned decryption failure */
- return (EPERM);
- } else {
- return (ENOMEM);
- }
- }
-
- /* now remap using the decrypter */
- vm_object_offset_t crypto_backing_offset;
- crypto_backing_offset = -1; /* i.e. use map entry's offset */
- result = vm_map_apple_protected(user_map,
- user_addr,
- user_addr+user_size,
- crypto_backing_offset,
- &crypt_info);
- if (result) {
- printf("%s: mapping failed with %d\n", __FUNCTION__, result);
- }
-
- if (result) {
- return (EPERM);
- }
- return 0;
+ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
+
+ if (result) {
+ printf("%s: unable to create decrypter %s, kr=%d\n",
+ __FUNCTION__, cryptname, result);
+ if (result == kIOReturnNotPrivileged) {
+ /* text encryption returned decryption failure */
+ return EPERM;
+ } else {
+ return ENOMEM;
+ }
+ }
+
+ /* now remap using the decrypter */
+ vm_object_offset_t crypto_backing_offset;
+ crypto_backing_offset = -1; /* i.e. use map entry's offset */
+ result = vm_map_apple_protected(user_map,
+ user_addr,
+ user_addr + user_size,
+ crypto_backing_offset,
+ &crypt_info);
+ if (result) {
+ printf("%s: mapping failed with %d\n", __FUNCTION__, result);
+ }
+
+ if (result) {
+ return EPERM;
+ }
+ return 0;
}
#endif /* CONFIG_CODE_DECRYPTION */