int fd = uap->fd;
int num_retries = 0;
+ user_map = current_map();
user_addr = (vm_map_offset_t)uap->addr;
user_size = (vm_map_size_t) uap->len;
* Align the file position to a page boundary,
* and save its page offset component.
*/
- pageoff = (file_pos & PAGE_MASK);
+ pageoff = (file_pos & vm_map_page_mask(user_map));
file_pos -= (vm_object_offset_t)pageoff;
/* Adjust size for rounding (on both ends). */
- user_size += pageoff; /* low end... */
- user_size = mach_vm_round_page(user_size); /* hi end */
+ user_size += pageoff; /* low end... */
+ user_size = vm_map_round_page(user_size,
+ vm_map_page_mask(user_map)); /* hi end */
if ((flags & MAP_JIT) && ((flags & MAP_FIXED) || (flags & MAP_SHARED) || !(flags & MAP_ANON))){
return EINVAL;
* should be aligned after adjustment by pageoff.
*/
user_addr -= pageoff;
- if (user_addr & PAGE_MASK)
+ if (user_addr & vm_map_page_mask(user_map))
return (EINVAL);
}
#ifdef notyet
* There should really be a pmap call to determine a reasonable
* location.
*/
- else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
- addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
+ else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
+ vm_map_page_mask(user_map)))
+ addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
+ vm_map_page_mask(user_map));
#endif
if (err)
return(err);
fpref = 1;
- if(fp->f_fglob->fg_type == DTYPE_PSXSHM) {
+ switch (FILEGLOB_DTYPE(fp->f_fglob)) {
+ case DTYPE_PSXSHM:
uap->addr = (user_addr_t)user_addr;
uap->len = (user_size_t)user_size;
uap->prot = prot;
uap->pos = file_pos;
error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
goto bad;
- }
-
- if (fp->f_fglob->fg_type != DTYPE_VNODE) {
+ case DTYPE_VNODE:
+ break;
+ default:
error = EINVAL;
goto bad;
}
* We bend a little - round the start and end addresses
* to the nearest page boundary.
*/
- user_size = mach_vm_round_page(user_size);
+ user_size = vm_map_round_page(user_size,
+ vm_map_page_mask(user_map));
- if (file_pos & PAGE_MASK_64) {
+ if (file_pos & vm_map_page_mask(user_map)) {
if (!mapanon)
(void)vnode_put(vp);
error = EINVAL;
goto bad;
}
- user_map = current_map();
-
if ((flags & MAP_FIXED) == 0) {
alloc_flags |= VM_FLAGS_ANYWHERE;
- user_addr = mach_vm_round_page(user_addr);
+ user_addr = vm_map_round_page(user_addr,
+ vm_map_page_mask(user_map));
} else {
- if (user_addr != mach_vm_trunc_page(user_addr)) {
+ if (user_addr != vm_map_trunc_page(user_addr,
+ vm_map_page_mask(user_map))) {
if (!mapanon)
(void)vnode_put(vp);
error = EINVAL;
* lack of space between the address and the map's maximum.
*/
if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
- user_addr = PAGE_SIZE;
+ user_addr = vm_map_page_size(user_map);
goto map_anon_retry;
}
} else {
* lack of space between the address and the map's maximum.
*/
if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
- user_addr = PAGE_SIZE;
+ user_addr = vm_map_page_size(user_map);
goto map_file_retry;
}
}
int rv;
vm_sync_t sync_flags=0;
+ user_map = current_map();
addr = (mach_vm_offset_t) uap->addr;
size = (mach_vm_size_t)uap->len;
KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
- if (addr & PAGE_MASK_64) {
+ if (addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
}
sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */
- user_map = current_map();
rv = mach_vm_msync(user_map, addr, size, sync_flags);
switch (rv) {
munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
{
mach_vm_offset_t user_addr;
- mach_vm_size_t user_size;
- kern_return_t result;
+ mach_vm_size_t user_size;
+ kern_return_t result;
+ vm_map_t user_map;
+ user_map = current_map();
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
AUDIT_ARG(addr, user_addr);
AUDIT_ARG(len, user_size);
- if (user_addr & PAGE_MASK_64) {
+ if (user_addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
}
return EINVAL;
}
- result = mach_vm_deallocate(current_map(), user_addr, user_size);
+ result = mach_vm_deallocate(user_map, user_addr, user_size);
if (result != KERN_SUCCESS) {
return(EINVAL);
}
AUDIT_ARG(len, uap->len);
AUDIT_ARG(value32, uap->prot);
+ user_map = current_map();
user_addr = (mach_vm_offset_t) uap->addr;
user_size = (mach_vm_size_t) uap->len;
prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED));
- if (user_addr & PAGE_MASK_64) {
+ if (user_addr & vm_map_page_mask(user_map)) {
/* UNIX SPEC: user address is not page-aligned, return EINVAL */
return EINVAL;
}
prot |= VM_PROT_READ;
#endif /* 3936456 */
- user_map = current_map();
-
#if CONFIG_MACF
/*
* The MAC check for mprotect is of limited use for 2 reasons:
* mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
* compromised because the check would always succeed and thusly any
* process could sign dynamically. */
- result = vm_map_sign(user_map,
- vm_map_trunc_page(user_addr),
- vm_map_round_page(user_addr+user_size));
+ result = vm_map_sign(
+ user_map,
+ vm_map_trunc_page(user_addr,
+ vm_map_page_mask(user_map)),
+ vm_map_round_page(user_addr+user_size,
+ vm_map_page_mask(user_map)));
switch (result) {
case KERN_SUCCESS:
break;
* Make sure that the addresses presented are valid for user
* mode.
*/
- first_addr = addr = mach_vm_trunc_page(uap->addr);
- end = addr + mach_vm_round_page(uap->len);
+ first_addr = addr = vm_map_trunc_page(uap->addr,
+ vm_map_page_mask(map));
+ end = addr + vm_map_round_page(uap->len,
+ vm_map_page_mask(map));
if (end < addr)
return (EINVAL);
if (size == 0)
return (0);
- pageoff = (addr & PAGE_MASK);
- addr -= pageoff;
- size = vm_map_round_page(size+pageoff);
user_map = current_map();
+ pageoff = (addr & vm_map_page_mask(user_map));
+ addr -= pageoff;
+ size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));
/* have to call vm_map_wire directly to pass "I don't know" protections */
result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE, TRUE);
return(ENOSYS);
}
-#if !defined(CONFIG_EMBEDDED)
/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
kern_return_t
map_fd(struct map_fd_args *args)
proc_t p = current_proc();
struct vnode_attr vattr;
+ my_map = current_map();
+
/*
* Find the inode; verify that it's a regular file.
*/
if (err)
return(err);
- if (fp->f_fglob->fg_type != DTYPE_VNODE){
+ if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
err = KERN_INVALID_ARGUMENT;
goto bad;
}
vnode_setattr(vp, &vattr, vfs_context_current());
}
- if (offset & PAGE_MASK_64) {
+ if (offset & vm_map_page_mask(my_map)) {
printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
(void)vnode_put(vp);
err = KERN_INVALID_ARGUMENT;
goto bad;
}
- map_size = round_page(size);
+ map_size = vm_map_round_page(size, vm_map_page_mask(my_map));
/*
* Allow user to map in a zero length file.
goto bad;
}
-
- my_map = current_map();
-
result = vm_map_64(
my_map,
&map_addr, map_size, (vm_offset_t)0,
vm_map_copy_t tmp;
if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr)) ||
- trunc_page(dst_addr) != dst_addr) {
+ trunc_page(dst_addr) != dst_addr) {
(void) vm_map_remove(
my_map,
map_addr, map_addr + map_size,
(vm_map_size_t)map_size, TRUE, &tmp);
if (result != KERN_SUCCESS) {
- (void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size),
- VM_MAP_NO_FLAGS);
+ (void) vm_map_remove(
+ my_map,
+ vm_map_trunc_page(map_addr,
+ vm_map_page_mask(my_map)),
+ vm_map_round_page(map_addr + map_size,
+ vm_map_page_mask(my_map)),
+ VM_MAP_NO_FLAGS);
(void)vnode_put(vp);
err = result;
goto bad;
// K64todo bug compatible now, should fix for 64bit user
uint32_t user_map_addr = CAST_DOWN_EXPLICIT(uint32_t, map_addr);
if (copyout(&user_map_addr, CAST_USER_ADDR_T(va), sizeof (user_map_addr))) {
- (void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + map_size),
- VM_MAP_NO_FLAGS);
+ (void) vm_map_remove(
+ my_map,
+ vm_map_trunc_page(map_addr,
+ vm_map_page_mask(my_map)),
+ vm_map_round_page(map_addr + map_size,
+ vm_map_page_mask(my_map)),
+ VM_MAP_NO_FLAGS);
(void)vnode_put(vp);
err = KERN_INVALID_ADDRESS;
goto bad;
fp_drop(p, fd, fp, 0);
return (err);
}
-#endif /* !defined(CONFIG_EMBEDDED) */