/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*
* @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
*
- * History
- * 10-20-1997 Umesh Vaishampayan
- * Fixed the count to be off_t rather than int.
*/
#include <sys/param.h>
#include <sys/ioctl.h>
#include <sys/tty.h>
#include <sys/ubc.h>
-#include <mach/kern_return.h>
-#include <mach/memory_object_control.h>
-#include <mach/vm_prot.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+
+#include <vm/vm_kern.h>
+
+#include <miscfs/specfs/specdev.h>
+
+static int vn_closefile __P((struct file *fp, struct proc *p));
+static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
+ struct proc *p));
+static int vn_read __P((struct file *fp, struct uio *uio,
+ struct ucred *cred, int flags, struct proc *p));
+static int vn_write __P((struct file *fp, struct uio *uio,
+ struct ucred *cred, int flags, struct proc *p));
+static int vn_select __P(( struct file *fp, int which, void * wql,
+ struct proc *p));
struct fileops vnops =
{ vn_read, vn_write, vn_ioctl, vn_select, vn_closefile };
* Common code for vnode open operations.
* Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
*/
+int
vn_open(ndp, fmode, cmode)
register struct nameidata *ndp;
int fmode, cmode;
struct vattr vat;
struct vattr *vap = &vat;
int error;
+ int didhold = 0;
if (fmode & O_CREAT) {
ndp->ni_cnd.cn_nameiop = CREATE;
ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
if ((fmode & O_EXCL) == 0)
ndp->ni_cnd.cn_flags |= FOLLOW;
+ bwillwrite();
if (error = namei(ndp))
return (error);
if (ndp->ni_vp == NULL) {
error = EOPNOTSUPP;
goto bad;
}
+
+#if DIAGNOSTIC
+ if (UBCINFOMISSING(vp))
+ panic("vn_open: ubc_info_init");
+#endif /* DIAGNOSTIC */
+
+ if (UBCINFOEXISTS(vp) && ((didhold = ubc_hold(vp)) == 0)) {
+ error = ENOENT;
+ goto bad;
+ }
+
if ((fmode & O_CREAT) == 0) {
if (fmode & FREAD && fmode & (FWRITE | O_TRUNC)) {
int err = 0;
if (fmode & O_TRUNC) {
VOP_UNLOCK(vp, 0, p); /* XXX */
VOP_LEASE(vp, p, cred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
+ (void)vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */
VATTR_NULL(vap);
vap->va_size = 0;
if (error = VOP_SETATTR(vp, vap, cred, p))
goto bad;
}
- if (error = VOP_OPEN(vp, fmode, cred, p))
+
+ if (error = VOP_OPEN(vp, fmode, cred, p)) {
goto bad;
- if (UBCINFOMISSING(vp))
- panic("vn_open: ubc_info_init");
- if (UBCINFOEXISTS(vp) && !ubc_hold(vp))
- panic("vn_open: hold");
+ }
+
if (fmode & FWRITE)
if (++vp->v_writecount <= 0)
panic("vn_open: v_writecount");
return (0);
bad:
- vput(vp);
+ VOP_UNLOCK(vp, 0, p);
+ if (didhold)
+ ubc_rele(vp);
+ vrele(vp);
return (error);
}
* Check for write permissions on the specified vnode.
* Prototype text segments cannot be written.
*/
+int
vn_writechk(vp)
register struct vnode *vp;
{
/*
* Vnode close call
*/
+int
vn_close(vp, flags, cred, p)
register struct vnode *vp;
int flags;
struct proc *p;
{
int error;
- vm_map_t user_map;
- vm_offset_t addr, addr1;
- vm_size_t size, pageoff;
if (flags & FWRITE)
vp->v_writecount--;
/*
* Package up an I/O request on a vnode into a uio and do it.
*/
+int
vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
enum uio_rw rw;
struct vnode *vp;
/* FIXME XXX */
if ((ioflg & IO_NODELOCKED) == 0)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ (void)vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
aiov.iov_base = base;
/*
* File table vnode read routine.
*/
-vn_read(fp, uio, cred)
+static int
+vn_read(fp, uio, cred, flags, p)
struct file *fp;
struct uio *uio;
struct ucred *cred;
+ int flags;
+ struct proc *p;
{
- struct vnode *vp = (struct vnode *)fp->f_data;
- struct proc *p = uio->uio_procp;
- int error;
+ struct vnode *vp;
+ int error, ioflag;
off_t count;
+ if (p != uio->uio_procp)
+ panic("vn_read: uio_procp does not match p");
+
+ vp = (struct vnode *)fp->f_data;
+ ioflag = 0;
+ if (fp->f_flag & FNONBLOCK)
+ ioflag |= IO_NDELAY;
VOP_LEASE(vp, p, cred, LEASE_READ);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- uio->uio_offset = fp->f_offset;
+ error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ if (error)
+ return (error);
+ if ((flags & FOF_OFFSET) == 0)
+ uio->uio_offset = fp->f_offset;
count = uio->uio_resid;
- error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0, cred);
-
- fp->f_offset += count - uio->uio_resid;
+ if(UBCINFOEXISTS(vp)) {
+ memory_object_t pager;
+ struct iovec *iov;
+ off_t file_off;
+ kern_return_t kr = KERN_SUCCESS;
+ kern_return_t ret = KERN_SUCCESS;
+ int count;
+
+ pager = (memory_object_t)ubc_getpager(vp);
+ file_off = uio->uio_offset;
+ iov = uio->uio_iov;
+ count = uio->uio_iovcnt;
+ while(count) {
+ kr = vm_conflict_check(current_map(),
+ (vm_offset_t)iov->iov_base, iov->iov_len,
+ pager, file_off);
+ if(kr == KERN_ALREADY_WAITING) {
+ if((count != uio->uio_iovcnt) &&
+ (ret != KERN_ALREADY_WAITING)) {
+ error = EINVAL;
+ goto done;
+ }
+ ret = KERN_ALREADY_WAITING;
+ } else if (kr != KERN_SUCCESS) {
+ error = EINVAL;
+ goto done;
+ }
+ if(kr != ret) {
+ error = EINVAL;
+ goto done;
+ }
+ file_off += iov->iov_len;
+ iov++;
+ count--;
+ }
+ if(ret == KERN_ALREADY_WAITING) {
+ uio->uio_resid = 0;
+ if ((flags & FOF_OFFSET) == 0)
+ fp->f_offset +=
+ count - uio->uio_resid;
+ error = 0;
+ goto done;
+ }
+ }
+ error = VOP_READ(vp, uio, ioflag, cred);
+ if ((flags & FOF_OFFSET) == 0)
+ fp->f_offset += count - uio->uio_resid;
+done:
VOP_UNLOCK(vp, 0, p);
return (error);
}
/*
* File table vnode write routine.
*/
-vn_write(fp, uio, cred)
+static int
+vn_write(fp, uio, cred, flags, p)
struct file *fp;
struct uio *uio;
struct ucred *cred;
+ int flags;
+ struct proc *p;
{
- struct vnode *vp = (struct vnode *)fp->f_data;
- struct proc *p = uio->uio_procp;
- int error, ioflag = IO_UNIT;
+ struct vnode *vp;
+ int error, ioflag;
off_t count;
+ if (p != uio->uio_procp)
+ panic("vn_write: uio_procp does not match p");
+
+ vp = (struct vnode *)fp->f_data;
+ ioflag = IO_UNIT;
+ if (vp->v_type == VREG)
+ bwillwrite();
if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
ioflag |= IO_APPEND;
if (fp->f_flag & FNONBLOCK)
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
ioflag |= IO_SYNC;
VOP_LEASE(vp, p, cred, LEASE_WRITE);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- uio->uio_offset = fp->f_offset;
- count = uio->uio_resid;
+ error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ if (error)
+ return (error);
+ if ((flags & FOF_OFFSET) == 0) {
+ uio->uio_offset = fp->f_offset;
+ count = uio->uio_resid;
+ }
+ if(UBCINFOEXISTS(vp)) {
+ memory_object_t pager;
+ struct iovec *iov;
+ off_t file_off;
+ kern_return_t kr = KERN_SUCCESS;
+ kern_return_t ret = KERN_SUCCESS;
+ int count;
+
+ pager = (memory_object_t)ubc_getpager(vp);
+ file_off = uio->uio_offset;
+ iov = uio->uio_iov;
+ count = uio->uio_iovcnt;
+ while(count) {
+ kr = vm_conflict_check(current_map(),
+ (vm_offset_t)iov->iov_base,
+ iov->iov_len, pager, file_off);
+ if(kr == KERN_ALREADY_WAITING) {
+ if((count != uio->uio_iovcnt) &&
+ (ret != KERN_ALREADY_WAITING)) {
+ error = EINVAL;
+ goto done;
+ }
+ ret = KERN_ALREADY_WAITING;
+ } else if (kr != KERN_SUCCESS) {
+ error = EINVAL;
+ goto done;
+ }
+ if(kr != ret) {
+ error = EINVAL;
+ goto done;
+ }
+ file_off += iov->iov_len;
+ iov++;
+ count--;
+ }
+ if(ret == KERN_ALREADY_WAITING) {
+ uio->uio_resid = 0;
+ if ((flags & FOF_OFFSET) == 0)
+ fp->f_offset +=
+ count - uio->uio_resid;
+ error = 0;
+ goto done;
+ }
+ }
error = VOP_WRITE(vp, uio, ioflag, cred);
- if (ioflag & IO_APPEND)
- fp->f_offset = uio->uio_offset;
- else
- fp->f_offset += count - uio->uio_resid;
+ if ((flags & FOF_OFFSET) == 0) {
+ if (ioflag & IO_APPEND)
+ fp->f_offset = uio->uio_offset;
+ else
+ fp->f_offset += count - uio->uio_resid;
+ }
+
/*
* Set the credentials on successful writes
*/
ubc_setcred(vp, p);
}
+done:
VOP_UNLOCK(vp, 0, p);
return (error);
}
/*
* File table vnode stat routine.
*/
+int
vn_stat(vp, sb, p)
struct vnode *vp;
register struct stat *sb;
/*
* File table vnode ioctl routine.
*/
+static int
vn_ioctl(fp, com, data, p)
struct file *fp;
u_long com;
register struct vnode *vp = ((struct vnode *)fp->f_data);
struct vattr vattr;
int error;
-
+ struct vnode *ttyvp;
+
switch (vp->v_type) {
case VREG:
case VFIFO:
case VCHR:
case VBLK:
- error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
- if (error == 0 && com == TIOCSCTTY) {
- if (p->p_session->s_ttyvp)
- vrele(p->p_session->s_ttyvp);
- p->p_session->s_ttyvp = vp;
- VREF(vp);
- }
- return (error);
+
+ /* Should not be able to set block size from user space */
+ if(com == DKIOCSETBLOCKSIZE)
+ return (EPERM);
+
+ if (com == FIODTYPE) {
+ if (vp->v_type == VBLK) {
+ if (major(vp->v_rdev) >= nblkdev)
+ return (ENXIO);
+ *(int *)data = bdevsw[major(vp->v_rdev)].d_type;
+ } else if (vp->v_type == VCHR) {
+ if (major(vp->v_rdev) >= nchrdev)
+ return (ENXIO);
+ *(int *)data = cdevsw[major(vp->v_rdev)].d_type;
+ } else {
+ return (ENOTTY);
+ }
+ return (0);
+ }
+ error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
+ if (error == 0 && com == TIOCSCTTY) {
+ VREF(vp);
+ ttyvp = p->p_session->s_ttyvp;
+ p->p_session->s_ttyvp = vp;
+ if (ttyvp)
+ vrele(ttyvp);
+ }
+ return (error);
}
}
/*
* File table vnode select routine.
*/
-vn_select(fp, which, p)
+static int
+vn_select(fp, which, wql, p)
struct file *fp;
int which;
+ void * wql;
struct proc *p;
{
- return (VOP_SELECT(((struct vnode *)fp->f_data), which, fp->f_flag,
- fp->f_cred, p));
+ return(VOP_SELECT(((struct vnode *)fp->f_data), which, fp->f_flag,
+ fp->f_cred, wql, p));
}
/*
while (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
simple_unlock(&vp->v_interlock);
- tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
+ (void)tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
}
error = ENOENT;
} else {
/*
* File table vnode close routine.
*/
+static int
vn_closefile(fp, p)
struct file *fp;
struct proc *p;