/*
- * Copyright (c) 2007 Apple Inc. All Rights Reserved.
+ * Copyright (c) 2007-2019 Apple Inc. All Rights Reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#if CONFIG_MACF
#include <security/mac_framework.h>
#endif
+#include <os/overflow.h>
+
+#ifndef CONFIG_EMBEDDED
+#include <IOKit/IOBSD.h> /* for IOTaskHasEntitlement */
+#include <sys/csr.h> /* for csr_check */
+#define MAP_32BIT_ENTITLEMENT "com.apple.security.mmap-map-32bit"
+#endif
/*
* XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
vm_map_t user_map;
kern_return_t result;
vm_map_offset_t user_addr;
+ vm_map_offset_t sum;
vm_map_size_t user_size;
vm_object_offset_t pageoff;
vm_object_offset_t file_pos;
AUDIT_ARG(len, user_size);
AUDIT_ARG(fd, uap->fd);
+ if (vm_map_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
prot = (uap->prot & VM_PROT_ALL);
#if 3777787
/*
vp = NULLVP;
/*
- * The vm code does not have prototypes & compiler doesn't do the'
+ * The vm code does not have prototypes & compiler doesn't do
* the right thing when you cast 64bit value and pass it in function
* call. So here it is.
*/
/* make sure mapping fits into numeric range etc */
- if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64) {
+ if (os_add3_overflow(file_pos, user_size, PAGE_SIZE_64 - 1, &sum)) {
return EINVAL;
}
(flags & MAP_JIT)) {
return EINVAL;
}
+ }
+ if (flags & MAP_RESILIENT_CODESIGN) {
if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
return EPERM;
}
}
+ if (flags & MAP_SHARED) {
+ /*
+ * MAP_RESILIENT_MEDIA is not valid with MAP_SHARED because
+ * there is no place to inject zero-filled pages without
+ * actually adding them to the file.
+ * Since we didn't reject that combination before, there might
+ * already be callers using it and getting a valid MAP_SHARED
+ * mapping but without the resilience.
+ * For backwards compatibility's sake, let's keep ignoring
+ * MAP_RESILIENT_MEDIA in that case.
+ */
+ flags &= ~MAP_RESILIENT_MEDIA;
+ }
+ if (flags & MAP_RESILIENT_MEDIA) {
+ if ((flags & MAP_ANON) ||
+ (flags & MAP_SHARED)) {
+ return EINVAL;
+ }
+ }
/*
* Check for illegal addresses. Watch out for address wrap... Note
goto bad;
}
#endif /* MAC */
+ /*
+ * Consult the file system to determine if this
+ * particular file object can be mapped.
+ */
+ error = VNOP_MMAP_CHECK(vp, prot, ctx);
+ if (error) {
+ (void)vnode_put(vp);
+ goto bad;
+ }
}
+
+ /*
+ * No copy-on-read for mmap() mappings themselves.
+ */
+ vmk_flags.vmkf_no_copy_on_read = 1;
}
if (user_size == 0) {
if (flags & MAP_RESILIENT_CODESIGN) {
alloc_flags |= VM_FLAGS_RESILIENT_CODESIGN;
}
+ if (flags & MAP_RESILIENT_MEDIA) {
+ alloc_flags |= VM_FLAGS_RESILIENT_MEDIA;
+ }
+
+#ifndef CONFIG_EMBEDDED
+ if (flags & MAP_32BIT) {
+ if (csr_check(CSR_ALLOW_UNTRUSTED_KEXTS) == 0 ||
+ IOTaskHasEntitlement(current_task(), MAP_32BIT_ENTITLEMENT)) {
+ vmk_flags.vmkf_32bit_map_va = TRUE;
+ } else {
+ error = EPERM;
+ goto bad;
+ }
+ }
+#endif
/*
* Lookup/allocate object.
#endif /* radar 3777787 */
map_file_retry:
- if ((flags & MAP_RESILIENT_CODESIGN) ||
- (flags & MAP_RESILIENT_MEDIA)) {
+ if (flags & MAP_RESILIENT_CODESIGN) {
if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
assert(!mapanon);
vnode_put(vp);
user_map = current_map();
addr = (mach_vm_offset_t) uap->addr;
- size = (mach_vm_size_t)uap->len;
+ size = (mach_vm_size_t) uap->len;
#ifndef CONFIG_EMBEDDED
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
#endif
+ if (mach_vm_range_overflows(addr, size)) {
+ return EINVAL;
+ }
if (addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
return EINVAL;
}
- if (user_addr + user_size < user_addr) {
+ if (mach_vm_range_overflows(user_addr, user_size)) {
return EINVAL;
}
user_size = (mach_vm_size_t) uap->len;
prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ));
+ if (mach_vm_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
if (user_addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
addr = (mach_vm_offset_t)uap->addr;
size = (mach_vm_size_t)uap->len;
inherit = uap->inherit;
-
+ if (mach_vm_range_overflows(addr, size)) {
+ return EINVAL;
+ }
user_map = current_map();
result = mach_vm_inherit(user_map, addr, size,
inherit);
start = (mach_vm_offset_t) uap->addr;
size = (mach_vm_size_t) uap->len;
-
+ if (mach_vm_range_overflows(start, size)) {
+ return EINVAL;
+ }
#if __arm64__
if (start == 0 &&
size != 0 &&
addr = (vm_map_offset_t) uap->addr;
size = (vm_map_size_t)uap->len;
- /* disable wrap around */
- if (addr + size < addr) {
+ if (vm_map_range_overflows(addr, size)) {
return EINVAL;
}
kern_return_t result;
AUDIT_ARG(addr, uap->addr);
- AUDIT_ARG(addr, uap->len);
+ AUDIT_ARG(len, uap->len);
addr = (mach_vm_offset_t) uap->addr;
size = (mach_vm_size_t)uap->len;
user_map = current_map();
-
+ if (mach_vm_range_overflows(addr, size)) {
+ return EINVAL;
+ }
/* JMM - need to remove all wirings by spec - this just removes one */
result = mach_vm_wire_kernel(host_priv_self(), user_map, addr, size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK);
return result == KERN_SUCCESS ? 0 : ENOMEM;
cputype = uap->cputype;
cpusubtype = uap->cpusubtype;
+ if (mach_vm_range_overflows(user_addr, user_size)) {
+ return EINVAL;
+ }
if (user_addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;