- user_addr = (mach_vm_offset_t)uap->addr;
- user_size = (mach_vm_size_t) uap->len;
+ user_map = current_map();
+ user_addr = (vm_map_offset_t)uap->addr;
+ user_size = (vm_map_size_t) uap->len;
AUDIT_ARG(addr, user_addr);
AUDIT_ARG(len, user_size);
AUDIT_ARG(addr, user_addr);
AUDIT_ARG(len, user_size);
* Align the file position to a page boundary,
* and save its page offset component.
*/
* Align the file position to a page boundary,
* and save its page offset component.
*/
- user_size += pageoff; /* low end... */
- user_size = mach_vm_round_page(user_size); /* hi end */
+ user_size += pageoff; /* low end... */
+ user_size = vm_map_round_page(user_size,
+ vm_map_page_mask(user_map)); /* hi end */
- else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
- addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
+ else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
+ vm_map_page_mask(user_map)))
+ addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
+ vm_map_page_mask(user_map));
- /*error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
+ error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
/*
* Mapping file, get fp for validation. Obtain vnode and make
* sure it is of appropriate type.
/*
* Mapping file, get fp for validation. Obtain vnode and make
* sure it is of appropriate type.
uap->pos = file_pos;
error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
goto bad;
uap->pos = file_pos;
error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
goto bad;
- void *cnode;
- if ((cnode = cp_get_protected_cnode(vp)) != NULL) {
- error = cp_handle_vnop(cnode, CP_READ_ACCESS | CP_WRITE_ACCESS);
- if (error) {
- (void) vnode_put(vp);
- goto bad;
- }
+ error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
+ if (error) {
+ (void) vnode_put(vp);
+ goto bad;
* lack of space between the address and the map's maximum.
*/
if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
* lack of space between the address and the map's maximum.
*/
if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
* lack of space between the address and the map's maximum.
*/
if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
* lack of space between the address and the map's maximum.
*/
if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
(uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
(uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
rv = mach_vm_msync(user_map, addr, size, sync_flags);
switch (rv) {
rv = mach_vm_msync(user_map, addr, size, sync_flags);
switch (rv) {
munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t user_addr;
munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t user_addr;
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
AUDIT_ARG(addr, user_addr);
AUDIT_ARG(len, user_size);
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
AUDIT_ARG(addr, user_addr);
AUDIT_ARG(len, user_size);
AUDIT_ARG(len, uap->len);
AUDIT_ARG(value32, uap->prot);
AUDIT_ARG(len, uap->len);
AUDIT_ARG(value32, uap->prot);
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED));
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED));
* mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
* compromised because the check would always succeed and thusly any
* process could sign dynamically. */
* mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
* compromised because the check would always succeed and thusly any
* process could sign dynamically. */
- result = vm_map_sign(user_map,
- vm_map_trunc_page(user_addr),
- vm_map_round_page(user_addr+user_size));
+ result = vm_map_sign(
+ user_map,
+ vm_map_trunc_page(user_addr,
+ vm_map_page_mask(user_map)),
+ vm_map_round_page(user_addr+user_size,
+ vm_map_page_mask(user_map)));
- first_addr = addr = mach_vm_trunc_page(uap->addr);
- end = addr + mach_vm_round_page(uap->len);
+ first_addr = addr = vm_map_trunc_page(uap->addr,
+ vm_map_page_mask(map));
+ end = addr + vm_map_round_page(uap->len,
+ vm_map_page_mask(map));
/* have to call vm_map_wire directly to pass "I don't know" protections */
result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE, TRUE);
/* have to call vm_map_wire directly to pass "I don't know" protections */
result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE, TRUE);
/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
kern_return_t
map_fd(struct map_fd_args *args)
/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
kern_return_t
map_fd(struct map_fd_args *args)
- void *cnode;
- if ((cnode = cp_get_protected_cnode(vp)) != NULL) {
- err = cp_handle_vnop(cnode, CP_READ_ACCESS | CP_WRITE_ACCESS);
- if (err != 0) {
- (void)vnode_put(vp);
- goto bad;
- }
- }
+ err = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
+ if (err != 0) {
+ (void) vnode_put(vp);
+ goto bad;
+ }
printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
(void)vnode_put(vp);
err = KERN_INVALID_ARGUMENT;
goto bad;
}
printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
(void)vnode_put(vp);
err = KERN_INVALID_ARGUMENT;
goto bad;
}
result = vm_map_64(
my_map,
&map_addr, map_size, (vm_offset_t)0,
result = vm_map_64(
my_map,
&map_addr, map_size, (vm_offset_t)0,
(vm_map_size_t)map_size, TRUE, &tmp);
if (result != KERN_SUCCESS) {
(vm_map_size_t)map_size, TRUE, &tmp);
if (result != KERN_SUCCESS) {
- (void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size),
- VM_MAP_NO_FLAGS);
+ (void) vm_map_remove(
+ my_map,
+ vm_map_trunc_page(map_addr,
+ vm_map_page_mask(my_map)),
+ vm_map_round_page(map_addr + map_size,
+ vm_map_page_mask(my_map)),
+ VM_MAP_NO_FLAGS);
// K64todo bug compatible now, should fix for 64bit user
uint32_t user_map_addr = CAST_DOWN_EXPLICIT(uint32_t, map_addr);
if (copyout(&user_map_addr, CAST_USER_ADDR_T(va), sizeof (user_map_addr))) {
// K64todo bug compatible now, should fix for 64bit user
uint32_t user_map_addr = CAST_DOWN_EXPLICIT(uint32_t, map_addr);
if (copyout(&user_map_addr, CAST_USER_ADDR_T(va), sizeof (user_map_addr))) {
- (void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size),
- VM_MAP_NO_FLAGS);
+ (void) vm_map_remove(
+ my_map,
+ vm_map_trunc_page(map_addr,
+ vm_map_page_mask(my_map)),
+ vm_map_round_page(map_addr + map_size,
+ vm_map_page_mask(my_map)),
+ VM_MAP_NO_FLAGS);