#include <sys/ubc.h>
#include <sys/ubc_internal.h>
#include <sys/sysproto.h>
+#if CONFIG_PROTECT
+#include <sys/cprotect.h>
+#endif
#include <sys/syscall.h>
#include <sys/kdebug.h>
-#include <bsm/audit_kernel.h>
+#include <security/audit/audit.h>
#include <bsm/audit_kevents.h>
#include <mach/mach_types.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <vm/vm_pager.h>
-
-struct osmmap_args {
- caddr_t addr;
- int len;
- int prot;
- int share;
- int fd;
- long pos;
-};
+#include <vm/vm_protos.h>
/* XXX the following function should probably be static */
kern_return_t map_fd_funneled(int, vm_object_offset_t, vm_offset_t *,
boolean_t, vm_size_t);
-/* XXX the following two functions aren't used anywhere */
-int osmmap(proc_t , struct osmmap_args *, register_t *);
-int mremap(void);
-
-int
-sbrk(__unused proc_t p, __unused struct sbrk_args *uap, __unused register_t *retval)
-{
- /* Not yet implemented */
- return (ENOTSUP);
-}
-
-int
-sstk(__unused proc_t p, __unused struct sstk_args *uap, __unused register_t *retval)
-{
- /* Not yet implemented */
- return (ENOTSUP);
-}
-
-
-int
-osmmap(
- proc_t curp,
- struct osmmap_args *uap,
- register_t *retval)
-{
- struct mmap_args newargs;
- user_addr_t addr;
- int ret;
-
- if ((uap->share == MAP_SHARED )|| (uap->share == MAP_PRIVATE )) {
- newargs.addr = CAST_USER_ADDR_T(uap->addr);
- newargs.len = CAST_USER_ADDR_T(uap->len);
- newargs.prot = uap->prot;
- newargs.flags = uap->share;
- newargs.fd = uap->fd;
- newargs.pos = (off_t)uap->pos;
- ret = mmap(curp, &newargs, &addr);
- if (ret == 0)
- *retval = CAST_DOWN(register_t, addr);
- } else
- ret = EINVAL;
- return ret;
-}
-
-
/*
* XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
* XXX usage is PROT_* from an interface perspective. Thus the values of
boolean_t docow;
vm_prot_t maxprot;
void *handle;
- vm_pager_t pager;
+ memory_object_t pager = MEMORY_OBJECT_NULL;
+ memory_object_control_t control;
int mapanon=0;
int fpref=0;
int error =0;
int fd = uap->fd;
+ int num_retries = 0;
user_addr = (mach_vm_offset_t)uap->addr;
user_size = (mach_vm_size_t) uap->len;
user_size += pageoff; /* low end... */
user_size = mach_vm_round_page(user_size); /* hi end */
-
+ if ((flags & MAP_JIT) && ((flags & MAP_FIXED) || (flags & MAP_SHARED) || (flags & MAP_FILE))){
+ return EINVAL;
+ }
/*
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).
*/
user_addr -= pageoff;
if (user_addr & PAGE_MASK)
- return (EINVAL);
+ return (EINVAL);
}
#ifdef notyet
/* DO not have apis to get this info, need to wait till then*/
alloc_flags = 0;
if (flags & MAP_ANON) {
+
+ maxprot = VM_PROT_ALL;
+#if CONFIG_MACF
+ /*
+ * Entitlement check.
+ * Re-enable once mac* is implemented.
+ */
+ /*error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
+ if (error) {
+ return EINVAL;
+ }*/
+#endif /* MAC */
+
/*
* Mapping blank space is trivial. Use positive fds as the alias
* value for memory tracking.
* Use "fd" to pass (some) Mach VM allocation flags,
* (see the VM_FLAGS_* definitions).
*/
- alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
+ alloc_flags = fd & (VM_FLAGS_ALIAS_MASK | VM_FLAGS_SUPERPAGE_MASK |
VM_FLAGS_PURGABLE);
if (alloc_flags != fd) {
/* reject if there are any extra flags */
}
handle = NULL;
- maxprot = VM_PROT_ALL;
file_pos = 0;
mapanon = 1;
} else {
VATTR_SET_ACTIVE(&va, va_access_time);
vnode_setattr(vp, &va, ctx);
}
-
+
/*
* XXX hack to handle use of /dev/zero to map anon memory (ala
* SunOS).
*/
if ((flags & MAP_SHARED) != 0) {
- if ((fp->f_fglob->fg_flag & FWRITE) != 0) {
+ if ((fp->f_fglob->fg_flag & FWRITE) != 0 &&
+ /*
+ * Do not allow writable mappings of
+ * swap files (see vm_swapfile_pager.c).
+ */
+ !vnode_isswap(vp)) {
/*
* check for write access
*
goto bad;
}
#endif /* MAC */
+
+#if CONFIG_PROTECT
+ {
+ void *cnode;
+ if ((cnode = cp_get_protected_cnode(vp)) != NULL) {
+ error = cp_handle_vnop(cnode, CP_READ_ACCESS | CP_WRITE_ACCESS);
+ if (error) {
+ (void) vnode_put(vp);
+ goto bad;
+ }
+ }
+ }
+#endif /* CONFIG_PROTECT */
+
+
}
}
if (flags & MAP_NOCACHE)
alloc_flags |= VM_FLAGS_NO_CACHE;
+ if (flags & MAP_JIT){
+ alloc_flags |= VM_FLAGS_MAP_JIT;
+ }
/*
* Lookup/allocate object.
*/
if (handle == NULL) {
- pager = NULL;
+ control = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
maxprot |= VM_PROT_READ;
#endif /* radar 3777787 */
-
+map_anon_retry:
result = vm_map_enter_mem_object(user_map,
&user_addr, user_size,
0, alloc_flags,
(flags & MAP_SHARED) ?
VM_INHERIT_SHARE :
VM_INHERIT_DEFAULT);
- if (result != KERN_SUCCESS)
- goto out;
+
+ /* If a non-binding address was specified for this anonymous
+ * mapping, retry the mapping with a zero base
+ * in the event the mapping operation failed due to
+ * lack of space between the address and the map's maximum.
+ */
+ if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
+ user_addr = PAGE_SIZE;
+ goto map_anon_retry;
+ }
} else {
- pager = (vm_pager_t)ubc_getpager(vp);
+ if (vnode_isswap(vp)) {
+ /*
+ * Map swap files with a special pager
+ * that returns obfuscated contents.
+ */
+ control = NULL;
+ pager = swapfile_pager_setup(vp);
+ if (pager != MEMORY_OBJECT_NULL) {
+ control = swapfile_pager_control(pager);
+ }
+ } else {
+ control = ubc_getobject(vp, UBC_FLAGS_NONE);
+ }
- if (pager == NULL) {
+ if (control == NULL) {
(void)vnode_put(vp);
error = ENOMEM;
goto bad;
if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
maxprot |= VM_PROT_READ;
#endif /* radar 3777787 */
-
- result = vm_map_enter_mem_object(user_map,
+map_file_retry:
+ result = vm_map_enter_mem_object_control(user_map,
&user_addr, user_size,
0, alloc_flags,
- (ipc_port_t)pager, file_pos,
+ control, file_pos,
docow, prot, maxprot,
(flags & MAP_SHARED) ?
VM_INHERIT_SHARE :
VM_INHERIT_DEFAULT);
- if (result != KERN_SUCCESS) {
- (void)vnode_put(vp);
- goto out;
+ /* If a non-binding address was specified for this file backed
+ * mapping, retry the mapping with a zero base
+ * in the event the mapping operation failed due to
+ * lack of space between the address and the map's maximum.
+ */
+ if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
+ user_addr = PAGE_SIZE;
+ goto map_file_retry;
}
}
- if (!mapanon)
+ if (!mapanon) {
(void)vnode_put(vp);
+ }
-out:
switch (result) {
case KERN_SUCCESS:
*retval = user_addr + pageoff;
break;
}
bad:
+ if (pager != MEMORY_OBJECT_NULL) {
+ /*
+ * Release the reference on the pager.
+ * If the mapping was successful, it now holds
+ * an extra reference.
+ */
+ memory_object_deallocate(pager);
+ }
if (fpref)
fp_drop(p, fd, fp, 0);
}
int
-msync(__unused proc_t p, struct msync_args *uap, register_t *retval)
+msync(__unused proc_t p, struct msync_args *uap, int32_t *retval)
{
__pthread_testcancel(1);
return(msync_nocancel(p, (struct msync_nocancel_args *)uap, retval));
}
int
-msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused register_t *retval)
+msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t addr;
mach_vm_size_t size;
int
-mremap(void)
-{
- /* Not yet implemented */
- return (ENOTSUP);
-}
-
-int
-munmap(__unused proc_t p, struct munmap_args *uap, __unused register_t *retval)
+munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t user_addr;
mach_vm_size_t user_size;
}
int
-mprotect(__unused proc_t p, struct mprotect_args *uap, __unused register_t *retval)
+mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval)
{
register vm_prot_t prot;
mach_vm_offset_t user_addr;
AUDIT_ARG(addr, uap->addr);
AUDIT_ARG(len, uap->len);
- AUDIT_ARG(value, uap->prot);
+ AUDIT_ARG(value32, uap->prot);
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
- prot = (vm_prot_t)(uap->prot & VM_PROT_ALL);
+ prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED));
if (user_addr & PAGE_MASK_64) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
if (error)
return (error);
#endif
+
+ if(prot & VM_PROT_TRUSTED) {
+#if CONFIG_DYNAMIC_CODE_SIGNING
+ /* CODE SIGNING ENFORCEMENT - JIT support */
+ /* The special protection value VM_PROT_TRUSTED requests that we treat
+ * this page as if it had a valid code signature.
+ * If this is enabled, there MUST be a MAC policy implementing the
+ * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
+ * compromised because the check would always succeed and thusly any
+ * process could sign dynamically. */
+ result = vm_map_sign(user_map,
+ vm_map_trunc_page(user_addr),
+ vm_map_round_page(user_addr+user_size));
+ switch (result) {
+ case KERN_SUCCESS:
+ break;
+ case KERN_INVALID_ADDRESS:
+ /* UNIX SPEC: for an invalid address range, return ENOMEM */
+ return ENOMEM;
+ default:
+ return EINVAL;
+ }
+#else
+ return ENOTSUP;
+#endif
+ }
+ prot &= ~VM_PROT_TRUSTED;
+
result = mach_vm_protect(user_map, user_addr, user_size,
FALSE, prot);
switch (result) {
int
-minherit(__unused proc_t p, struct minherit_args *uap, __unused register_t *retval)
+minherit(__unused proc_t p, struct minherit_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t addr;
mach_vm_size_t size;
AUDIT_ARG(addr, uap->addr);
AUDIT_ARG(len, uap->len);
- AUDIT_ARG(value, uap->inherit);
+ AUDIT_ARG(value32, uap->inherit);
addr = (mach_vm_offset_t)uap->addr;
size = (mach_vm_size_t)uap->len;
}
int
-madvise(__unused proc_t p, struct madvise_args *uap, __unused register_t *retval)
+madvise(__unused proc_t p, struct madvise_args *uap, __unused int32_t *retval)
{
vm_map_t user_map;
mach_vm_offset_t start;
case MADV_DONTNEED:
new_behavior = VM_BEHAVIOR_DONTNEED;
break;
+ case MADV_FREE:
+ new_behavior = VM_BEHAVIOR_FREE;
+ break;
+ case MADV_ZERO_WIRED_PAGES:
+ new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES;
+ break;
+ case MADV_FREE_REUSABLE:
+ new_behavior = VM_BEHAVIOR_REUSABLE;
+ break;
+ case MADV_FREE_REUSE:
+ new_behavior = VM_BEHAVIOR_REUSE;
+ break;
+ case MADV_CAN_REUSE:
+ new_behavior = VM_BEHAVIOR_CAN_REUSE;
+ break;
default:
return(EINVAL);
}
result = mach_vm_behavior_set(user_map, start, size, new_behavior);
switch (result) {
- case KERN_SUCCESS:
- return (0);
- case KERN_INVALID_ADDRESS:
- return (ENOMEM);
+ case KERN_SUCCESS:
+ return 0;
+ case KERN_INVALID_ADDRESS:
+ return EINVAL;
+ case KERN_NO_SPACE:
+ return ENOMEM;
}
- return (EINVAL);
+ return EINVAL;
}
int
-mincore(__unused proc_t p, struct mincore_args *uap, __unused register_t *retval)
+mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t addr, first_addr, end;
vm_map_t map;
}
int
-mlock(__unused proc_t p, struct mlock_args *uap, __unused register_t *retvalval)
+mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval)
{
vm_map_t user_map;
vm_map_offset_t addr;
}
int
-munlock(__unused proc_t p, struct munlock_args *uap, __unused register_t *retval)
+munlock(__unused proc_t p, struct munlock_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t addr;
mach_vm_size_t size;
int
-mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused register_t *retval)
+mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused int32_t *retval)
{
return (ENOSYS);
}
int
-munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused register_t *retval)
+munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused int32_t *retval)
{
return(ENOSYS);
}
-
-/* BEGIN DEFUNCT */
-int
-obreak(__unused proc_t p, __unused struct obreak_args *uap, __unused register_t *retval)
-{
- /* Not implemented, obsolete */
- return (ENOMEM);
-}
-
-int both;
-
-int
-ovadvise(__unused proc_t p, __unused struct ovadvise_args *uap, __unused register_t *retval)
-{
-
-#ifdef lint
- both = 0;
-#endif
- return( 0 );
-}
-/* END DEFUNCT */
-
+#if !defined(CONFIG_EMBEDDED)
/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
kern_return_t
map_fd(struct map_fd_args *args)
kern_return_t ret;
AUDIT_MACH_SYSCALL_ENTER(AUE_MAPFD);
- AUDIT_ARG(addr, CAST_DOWN(user_addr_t, va));
+ AUDIT_ARG(addr, CAST_DOWN(user_addr_t, args->va));
AUDIT_ARG(fd, fd);
ret = map_fd_funneled( fd, (vm_object_offset_t)offset, va, findspace, size);
vm_offset_t map_addr=0;
vm_size_t map_size;
int err=0;
+ vm_prot_t maxprot = VM_PROT_ALL;
vm_map_t my_map;
proc_t p = current_proc();
struct vnode_attr vattr;
goto bad;
}
+#if CONFIG_MACF
+ err = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()),
+ fp->f_fglob, VM_PROT_DEFAULT, MAP_FILE, &maxprot);
+ if (err) {
+ (void)vnode_put(vp);
+ goto bad;
+ }
+#endif /* MAC */
+
+#if CONFIG_PROTECT
+ /* check for content protection access */
+ {
+ void *cnode;
+ if ((cnode = cp_get_protected_cnode(vp)) != NULL) {
+ err = cp_handle_vnop(cnode, CP_READ_ACCESS | CP_WRITE_ACCESS);
+ if (err != 0) {
+ (void)vnode_put(vp);
+ goto bad;
+ }
+ }
+ }
+#endif /* CONFIG_PROTECT */
+
AUDIT_ARG(vnpath, vp, ARG_VNODE1);
/*
my_map,
&map_addr, map_size, (vm_offset_t)0,
VM_FLAGS_ANYWHERE, pager, offset, TRUE,
- VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_PROT_DEFAULT, maxprot,
VM_INHERIT_DEFAULT);
if (result != KERN_SUCCESS) {
(void)vnode_put(vp);
if (!findspace) {
- vm_offset_t dst_addr;
+ //K64todo fix for 64bit user?
+ uint32_t dst_addr;
vm_map_copy_t tmp;
if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr)) ||
- trunc_page_32(dst_addr) != dst_addr) {
+ trunc_page(dst_addr) != dst_addr) {
(void) vm_map_remove(
my_map,
map_addr, map_addr + map_size,
goto bad;
}
} else {
- if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
+ // K64todo bug compatible now, should fix for 64bit user
+ uint32_t user_map_addr = CAST_DOWN_EXPLICIT(uint32_t, map_addr);
+ if (copyout(&user_map_addr, CAST_USER_ADDR_T(va), sizeof (user_map_addr))) {
(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
vm_map_round_page(map_addr + map_size),
VM_MAP_NO_FLAGS);
fp_drop(p, fd, fp, 0);
return (err);
}
+#endif /* !defined(CONFIG_EMBEDDED) */