X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b4c24cb9d3df001f2892dc4ed451bc769ff28a9f..5eebf7385fedb1517b66b53c28e5aa6bb0a2be50:/bsd/kern/kern_mman.c diff --git a/bsd/kern/kern_mman.c b/bsd/kern/kern_mman.c index 3febc75bf..e234d8955 100644 --- a/bsd/kern/kern_mman.c +++ b/bsd/kern/kern_mman.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -82,6 +82,9 @@ #include #include +#include +#include + #include #include @@ -148,6 +151,7 @@ struct osmmap_args { long pos; }; +int osmmap(curp, uap, retval) struct proc *curp; register struct osmmap_args *uap; @@ -216,6 +220,10 @@ mmap(p, uap, retval) user_addr = (vm_offset_t)uap->addr; user_size = (vm_size_t) uap->len; + AUDIT_ARG(addr, (void *)user_addr); + AUDIT_ARG(len, (int) user_size); + AUDIT_ARG(fd, uap->fd); + prot = (uap->prot & VM_PROT_ALL); flags = uap->flags; @@ -243,7 +251,7 @@ mmap(p, uap, retval) /* Adjust size for rounding (on both ends). */ user_size += pageoff; /* low end... */ - user_size = (vm_size_t) round_page(user_size); /* hi end */ + user_size = (vm_size_t) round_page_32(user_size); /* hi end */ /* @@ -277,8 +285,8 @@ mmap(p, uap, retval) * There should really be a pmap call to determine a reasonable * location. */ - else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ)) - addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); + else if (addr < round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ)) + addr = round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ); #endif @@ -300,7 +308,7 @@ mmap(p, uap, retval) if (err) return(err); if(fp->f_type == DTYPE_PSXSHM) { - uap->addr = user_addr; + uap->addr = (caddr_t)user_addr; uap->len = user_size; uap->prot = prot; uap->flags = flags; @@ -314,12 +322,15 @@ mmap(p, uap, retval) if (vp->v_type != VREG && vp->v_type != VCHR) return (EINVAL); + + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + /* * XXX hack to handle use of /dev/zero to map anon memory (ala * SunOS). */ if (vp->v_type == VCHR || vp->v_type == VSTR) { - return(EOPNOTSUPP); + return(ENODEV); } else { /* * Ensure that file and memory protections are @@ -371,7 +382,7 @@ mmap(p, uap, retval) * We bend a little - round the start and end addresses * to the nearest page boundary. */ - user_size = round_page(user_size); + user_size = round_page_32(user_size); if (file_pos & PAGE_MASK_64) return (EINVAL); @@ -380,9 +391,9 @@ mmap(p, uap, retval) if ((flags & MAP_FIXED) == 0) { find_space = TRUE; - user_addr = round_page(user_addr); + user_addr = round_page_32(user_addr); } else { - if (user_addr != trunc_page(user_addr)) + if (user_addr != trunc_page_32(user_addr)) return (EINVAL); find_space = FALSE; (void) vm_deallocate(user_map, user_addr, user_size); @@ -416,9 +427,16 @@ mmap(p, uap, retval) if (result != KERN_SUCCESS) goto out; + result = vm_protect(user_map, user_addr, user_size, TRUE, maxprot); + if (result != KERN_SUCCESS) + goto out; + result = vm_protect(user_map, user_addr, user_size, FALSE, prot); + if (result != KERN_SUCCESS) + goto out; + } else { UBCINFOCHECK("mmap", vp); - pager = ubc_getpager(vp); + pager = (vm_pager_t)ubc_getpager(vp); if (pager == NULL) return (ENOMEM); @@ -458,7 +476,7 @@ mmap(p, uap, retval) ubc_map(vp); } - if (flags & (MAP_SHARED|MAP_INHERIT)) { + if (flags & MAP_SHARED) { result = vm_inherit(user_map, user_addr, user_size, VM_INHERIT_SHARE); if (result != KERN_SUCCESS) { @@ -507,7 +525,7 @@ msync(p, uap, retval) pageoff = (addr & PAGE_MASK); addr -= pageoff; size = uap->len; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); flags = uap->flags; if (addr + size < addr) @@ -515,6 +533,9 @@ msync(p, uap, retval) user_map = current_map(); + if ((flags & (MS_ASYNC|MS_SYNC)) == (MS_ASYNC|MS_SYNC)) + return (EINVAL); + if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) return (EINVAL); @@ -526,7 +547,7 @@ msync(p, uap, retval) * inaccurate results, lets just return error as invalid size * specified */ - return(EINVAL); + return (EINVAL); /* XXX breaks posix apps */ } if (flags & MS_KILLPAGES) @@ -556,10 +577,10 @@ msync(p, uap, retval) } return (0); - } +int mremap() { /* Not yet implemented */ @@ -570,6 +591,7 @@ struct munmap_args { caddr_t addr; int len; }; +int munmap(p, uap, retval) struct proc *p; struct munmap_args *uap; @@ -583,11 +605,14 @@ munmap(p, uap, retval) user_addr = (vm_offset_t) uap->addr; user_size = (vm_size_t) uap->len; + AUDIT_ARG(addr, (void *)user_addr); + AUDIT_ARG(len, (int) user_size); + pageoff = (user_addr & PAGE_MASK); user_addr -= pageoff; user_size += pageoff; - user_size = round_page(user_size); + user_size = round_page_32(user_size); if (user_addr + user_size < user_addr) return(EINVAL); @@ -636,6 +661,9 @@ mprotect(p, uap, retval) kern_return_t result; vm_map_t user_map; + AUDIT_ARG(addr, uap->addr); + AUDIT_ARG(len, uap->len); + AUDIT_ARG(value, uap->prot); user_addr = (vm_offset_t) uap->addr; user_size = (vm_size_t) uap->len; prot = (vm_prot_t)(uap->prot & VM_PROT_ALL); @@ -651,7 +679,7 @@ mprotect(p, uap, retval) pageoff = (user_addr & PAGE_MASK); user_addr -= pageoff; user_size += pageoff; - user_size = round_page(user_size); + user_size = round_page_32(user_size); if (user_addr + user_size < user_addr) return(EINVAL); @@ -687,6 +715,9 @@ minherit(p, uap, retval) vm_map_t user_map; kern_return_t result; + AUDIT_ARG(addr, uap->addr); + AUDIT_ARG(len, uap->len); + AUDIT_ARG(value, uap->inherit); addr = (vm_offset_t)uap->addr; size = uap->len; inherit = uap->inherit; @@ -694,7 +725,7 @@ minherit(p, uap, retval) pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); if (addr + size < addr) return(EINVAL); @@ -744,8 +775,8 @@ madvise(p, uap, retval) * Since this routine is only advisory, we default to conservative * behavior. */ - start = trunc_page((vm_offset_t) uap->addr); - end = round_page((vm_offset_t) uap->addr + uap->len); + start = trunc_page_32((vm_offset_t) uap->addr); + end = round_page_32((vm_offset_t) uap->addr + uap->len); user_map = current_map(); @@ -809,8 +840,8 @@ mincore(p, uap, retval) * Make sure that the addresses presented are valid for user * mode. */ - first_addr = addr = trunc_page((vm_offset_t) uap->addr); - end = addr + (vm_size_t)round_page(uap->len); + first_addr = addr = trunc_page_32((vm_offset_t) uap->addr); + end = addr + (vm_size_t)round_page_32(uap->len); if (VM_MAX_ADDRESS > 0 && end > VM_MAX_ADDRESS) return (EINVAL); @@ -904,13 +935,15 @@ mlock(p, uap, retval) int error; kern_return_t result; + AUDIT_ARG(addr, uap->addr); + AUDIT_ARG(len, uap->len); addr = (vm_offset_t) uap->addr; size = uap->len; pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); /* disable wrap around */ if (addr + size < addr) @@ -953,13 +986,15 @@ munlock(p, uap, retval) vm_map_t user_map; kern_return_t result; + AUDIT_ARG(addr, uap->addr); + AUDIT_ARG(len, uap->len); addr = (vm_offset_t) uap->addr; size = uap->len; pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; - size = (vm_size_t) round_page(size); + size = (vm_size_t) round_page_32(size); /* disable wrap around */ if (addr + size < addr) @@ -1011,6 +1046,7 @@ munlockall(p, uap) struct obreak_args { char *nsiz; }; +int obreak(p, uap, retval) struct proc *p; struct obreak_args *uap; @@ -1022,6 +1058,7 @@ obreak(p, uap, retval) int both; +int ovadvise() { @@ -1030,12 +1067,11 @@ ovadvise() #endif } /* END DEFUNCT */ -#if 1 -int print_map_addr=0; -#endif /* 1 */ /* CDY need to fix interface to allow user to map above 32 bits */ -kern_return_t map_fd( +/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */ +kern_return_t +map_fd( int fd, vm_offset_t offset, vm_offset_t *va, @@ -1045,6 +1081,10 @@ kern_return_t map_fd( kern_return_t ret; boolean_t funnel_state; + AUDIT_MACH_SYSCALL_ENTER(AUE_MAPFD); + AUDIT_ARG(addr, va); + AUDIT_ARG(fd, fd); + funnel_state = thread_funnel_set(kernel_flock, TRUE); ret = map_fd_funneled( fd, (vm_object_offset_t)offset, @@ -1052,10 +1092,12 @@ kern_return_t map_fd( (void) thread_funnel_set(kernel_flock, FALSE); + AUDIT_MACH_SYSCALL_EXIT(ret); return ret; } -kern_return_t map_fd_funneled( +kern_return_t +map_fd_funneled( int fd, vm_object_offset_t offset, vm_offset_t *va, @@ -1072,9 +1114,6 @@ kern_return_t map_fd_funneled( int err=0; vm_map_t my_map; struct proc *p =(struct proc *)current_proc(); -#if 0 - extern int print_map_addr; -#endif /* 0 */ /* * Find the inode; verify that it's a regular file. @@ -1095,11 +1134,13 @@ kern_return_t map_fd_funneled( if (vp->v_type != VREG) return (KERN_INVALID_ARGUMENT); + AUDIT_ARG(vnpath, vp, ARG_VNODE1); + if (offset & PAGE_MASK_64) { printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm); return (KERN_INVALID_ARGUMENT); } - map_size = round_page(size); + map_size = round_page_32(size); /* * Allow user to map in a zero length file. @@ -1132,7 +1173,7 @@ kern_return_t map_fd_funneled( vm_map_copy_t tmp; if (copyin(va, &dst_addr, sizeof (dst_addr)) || - trunc_page(dst_addr) != dst_addr) { + trunc_page_32(dst_addr) != dst_addr) { (void) vm_map_remove( my_map, map_addr, map_addr + map_size,