/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
long pos;
};
+int
osmmap(curp, uap, retval)
struct proc *curp;
register struct osmmap_args *uap;
/* Adjust size for rounding (on both ends). */
user_size += pageoff; /* low end... */
- user_size = (vm_size_t) round_page(user_size); /* hi end */
+ user_size = (vm_size_t) round_page_32(user_size); /* hi end */
/*
* There should really be a pmap call to determine a reasonable
* location.
*/
- else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
- addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
+ else if (addr < round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ))
+ addr = round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ);
#endif
if (err)
return(err);
if(fp->f_type == DTYPE_PSXSHM) {
- uap->addr = user_addr;
+ uap->addr = (caddr_t)user_addr;
uap->len = user_size;
uap->prot = prot;
uap->flags = flags;
* SunOS).
*/
if (vp->v_type == VCHR || vp->v_type == VSTR) {
- return(EOPNOTSUPP);
+ return(ENODEV);
} else {
/*
* Ensure that file and memory protections are
* We bend a little - round the start and end addresses
* to the nearest page boundary.
*/
- user_size = round_page(user_size);
+ user_size = round_page_32(user_size);
if (file_pos & PAGE_MASK_64)
return (EINVAL);
if ((flags & MAP_FIXED) == 0) {
find_space = TRUE;
- user_addr = round_page(user_addr);
+ user_addr = round_page_32(user_addr);
} else {
- if (user_addr != trunc_page(user_addr))
+ if (user_addr != trunc_page_32(user_addr))
return (EINVAL);
find_space = FALSE;
(void) vm_deallocate(user_map, user_addr, user_size);
if (result != KERN_SUCCESS)
goto out;
+ result = vm_protect(user_map, user_addr, user_size, TRUE, maxprot);
+ if (result != KERN_SUCCESS)
+ goto out;
+ result = vm_protect(user_map, user_addr, user_size, FALSE, prot);
+ if (result != KERN_SUCCESS)
+ goto out;
+
} else {
UBCINFOCHECK("mmap", vp);
- pager = ubc_getpager(vp);
+ pager = (vm_pager_t)ubc_getpager(vp);
if (pager == NULL)
return (ENOMEM);
ubc_map(vp);
}
- if (flags & (MAP_SHARED|MAP_INHERIT)) {
+ if (flags & MAP_SHARED) {
result = vm_inherit(user_map, user_addr, user_size,
VM_INHERIT_SHARE);
if (result != KERN_SUCCESS) {
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size = uap->len;
- size = (vm_size_t) round_page(size);
+ size = (vm_size_t) round_page_32(size);
flags = uap->flags;
if (addr + size < addr)
user_map = current_map();
+ if ((flags & (MS_ASYNC|MS_SYNC)) == (MS_ASYNC|MS_SYNC))
+ return (EINVAL);
+
if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
return (EINVAL);
* inaccurate results, lets just return error as invalid size
* specified
*/
- return(EINVAL);
+ return (EINVAL); /* XXX breaks posix apps */
}
if (flags & MS_KILLPAGES)
}
return (0);
-
}
+int
mremap()
{
/* Not yet implemented */
caddr_t addr;
int len;
};
+int
munmap(p, uap, retval)
struct proc *p;
struct munmap_args *uap;
user_addr -= pageoff;
user_size += pageoff;
- user_size = round_page(user_size);
+ user_size = round_page_32(user_size);
if (user_addr + user_size < user_addr)
return(EINVAL);
pageoff = (user_addr & PAGE_MASK);
user_addr -= pageoff;
user_size += pageoff;
- user_size = round_page(user_size);
+ user_size = round_page_32(user_size);
if (user_addr + user_size < user_addr)
return(EINVAL);
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
- size = (vm_size_t) round_page(size);
+ size = (vm_size_t) round_page_32(size);
if (addr + size < addr)
return(EINVAL);
* Since this routine is only advisory, we default to conservative
* behavior.
*/
- start = trunc_page((vm_offset_t) uap->addr);
- end = round_page((vm_offset_t) uap->addr + uap->len);
+ start = trunc_page_32((vm_offset_t) uap->addr);
+ end = round_page_32((vm_offset_t) uap->addr + uap->len);
user_map = current_map();
* Make sure that the addresses presented are valid for user
* mode.
*/
- first_addr = addr = trunc_page((vm_offset_t) uap->addr);
- end = addr + (vm_size_t)round_page(uap->len);
+ first_addr = addr = trunc_page_32((vm_offset_t) uap->addr);
+ end = addr + (vm_size_t)round_page_32(uap->len);
if (VM_MAX_ADDRESS > 0 && end > VM_MAX_ADDRESS)
return (EINVAL);
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
- size = (vm_size_t) round_page(size);
+ size = (vm_size_t) round_page_32(size);
/* disable wrap around */
if (addr + size < addr)
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
- size = (vm_size_t) round_page(size);
+ size = (vm_size_t) round_page_32(size);
/* disable wrap around */
if (addr + size < addr)
struct obreak_args {
char *nsiz;
};
+int
obreak(p, uap, retval)
struct proc *p;
struct obreak_args *uap;
int both;
+int
ovadvise()
{
#endif
}
/* END DEFUNCT */
-#if 1
-int print_map_addr=0;
-#endif /* 1 */
/* CDY need to fix interface to allow user to map above 32 bits */
-kern_return_t map_fd(
+/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
+kern_return_t
+map_fd(
int fd,
vm_offset_t offset,
vm_offset_t *va,
return ret;
}
-kern_return_t map_fd_funneled(
+kern_return_t
+map_fd_funneled(
int fd,
vm_object_offset_t offset,
vm_offset_t *va,
int err=0;
vm_map_t my_map;
struct proc *p =(struct proc *)current_proc();
-#if 0
- extern int print_map_addr;
-#endif /* 0 */
/*
* Find the inode; verify that it's a regular file.
printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
return (KERN_INVALID_ARGUMENT);
}
- map_size = round_page(size);
+ map_size = round_page_32(size);
/*
* Allow user to map in a zero length file.
vm_map_copy_t tmp;
if (copyin(va, &dst_addr, sizeof (dst_addr)) ||
- trunc_page(dst_addr) != dst_addr) {
+ trunc_page_32(dst_addr) != dst_addr) {
(void) vm_map_remove(
my_map,
map_addr, map_addr + map_size,