/*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
ndp->ni_op = OP_LINK;
#endif
/* Inherit USEDVP, vnode_open() supported flags only */
- ndp->ni_cnd.cn_flags &= (USEDVP | NOCROSSMOUNT | DOWHITEOUT);
+ ndp->ni_cnd.cn_flags &= (USEDVP | NOCROSSMOUNT);
ndp->ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF | AUDITVNPATH1;
ndp->ni_flag = NAMEI_COMPOUNDOPEN;
#if NAMEDRSRCFORK
if (vp == NULL) {
/* must have attributes for a new file */
if (vap == NULL) {
+ vnode_put(dvp);
error = EINVAL;
goto out;
}
*/
ndp->ni_cnd.cn_nameiop = LOOKUP;
/* Inherit USEDVP, vnode_open() supported flags only */
- ndp->ni_cnd.cn_flags &= (USEDVP | NOCROSSMOUNT | DOWHITEOUT);
+ ndp->ni_cnd.cn_flags &= (USEDVP | NOCROSSMOUNT);
ndp->ni_cnd.cn_flags |= FOLLOW | LOCKLEAF | AUDITVNPATH1 | WANTPARENT;
#if NAMEDRSRCFORK
/* open calls are allowed for resource forks. */
if (vnode_isspec(vp))
(void)vnode_rele_ext(vp, flags, 0);
+ /*
+ * On HFS, we flush when the last writer closes. We do this
+ * because resource fork vnodes hold a reference on data fork
+ * vnodes and that will prevent them from getting VNOP_INACTIVE
+ * which will delay when we flush cached data. In future, we
+ * might find it beneficial to do this for all file systems.
+ * Note that it's OK to access v_writecount without the lock
+ * in this context.
+ */
+ if (vp->v_tag == VT_HFS && (flags & FWRITE) && vp->v_writecount == 1)
+ VNOP_FSYNC(vp, MNT_NOWAIT, ctx);
+
error = VNOP_CLOSE(vp, flags, ctx);
#if CONFIG_FSE
return (error);
}
+static inline void
+vn_offset_lock(struct fileglob *fg)
+{
+ lck_mtx_lock_spin(&fg->fg_lock);
+ while (fg->fg_lflags & FG_OFF_LOCKED) {
+ fg->fg_lflags |= FG_OFF_LOCKWANT;
+ msleep(&fg->fg_lflags, &fg->fg_lock, PVFS | PSPIN,
+ "fg_offset_lock_wait", 0);
+ }
+ fg->fg_lflags |= FG_OFF_LOCKED;
+ lck_mtx_unlock(&fg->fg_lock);
+}
+
+static inline void
+vn_offset_unlock(struct fileglob *fg)
+{
+ int lock_wanted = 0;
+
+ lck_mtx_lock_spin(&fg->fg_lock);
+ if (fg->fg_lflags & FG_OFF_LOCKWANT) {
+ lock_wanted = 1;
+ }
+ fg->fg_lflags &= ~(FG_OFF_LOCKED | FG_OFF_LOCKWANT);
+ lck_mtx_unlock(&fg->fg_lock);
+ if (lock_wanted) {
+ wakeup(&fg->fg_lflags);
+ }
+}
+
/*
* File table vnode read routine.
*/
int error;
int ioflag;
off_t count;
+ int offset_locked = 0;
vp = (struct vnode *)fp->f_fglob->fg_data;
if ( (error = vnode_getwithref(vp)) ) {
if (fp->f_fglob->fg_flag & FNORDAHEAD)
ioflag |= IO_RAOFF;
- if ((flags & FOF_OFFSET) == 0)
+ if ((flags & FOF_OFFSET) == 0) {
+ if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) {
+ vn_offset_lock(fp->f_fglob);
+ offset_locked = 1;
+ }
uio->uio_offset = fp->f_fglob->fg_offset;
+ }
count = uio_resid(uio);
if (vnode_isswap(vp)) {
} else {
error = VNOP_READ(vp, uio, ioflag, ctx);
}
- if ((flags & FOF_OFFSET) == 0)
+ if ((flags & FOF_OFFSET) == 0) {
fp->f_fglob->fg_offset += count - uio_resid(uio);
+ if (offset_locked) {
+ vn_offset_unlock(fp->f_fglob);
+ offset_locked = 0;
+ }
+ }
(void)vnode_put(vp);
return (error);
int clippedsize = 0;
int partialwrite=0;
int residcount, oldcount;
+ int offset_locked = 0;
proc_t p = vfs_context_proc(ctx);
count = 0;
}
if ((flags & FOF_OFFSET) == 0) {
+ if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) {
+ vn_offset_lock(fp->f_fglob);
+ offset_locked = 1;
+ }
uio->uio_offset = fp->f_fglob->fg_offset;
count = uio_resid(uio);
}
}
if (clippedsize >= residcount) {
psignal(p, SIGXFSZ);
- vnode_put(vp);
- return (EFBIG);
+ error = EFBIG;
+ goto error_out;
}
partialwrite = 1;
uio_setresid(uio, residcount-clippedsize);
if (p && (vp->v_type == VREG) &&
((rlim_t)uio->uio_offset >= p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) {
psignal(p, SIGXFSZ);
- vnode_put(vp);
- return (EFBIG);
+ error = EFBIG;
+ goto error_out;
}
if (p && (vp->v_type == VREG) &&
((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) {
fp->f_fglob->fg_offset = uio->uio_offset;
else
fp->f_fglob->fg_offset += count - uio_resid(uio);
+ if (offset_locked) {
+ vn_offset_unlock(fp->f_fglob);
+ offset_locked = 0;
+ }
}
/*
}
(void)vnode_put(vp);
return (error);
+
+error_out:
+ if (offset_locked) {
+ vn_offset_unlock(fp->f_fglob);
+ }
+ (void)vnode_put(vp);
+ return (error);
}
/*
off_t file_size;
int error;
struct vnode *ttyvp;
- int funnel_state;
struct session * sessp;
if ( (error = vnode_getwithref(vp)) ) {
error = VNOP_IOCTL(vp, com, data, fp->f_fglob->fg_flag, ctx);
if (error == 0 && com == TIOCSCTTY) {
- error = vnode_ref_ext(vp, 0, VNODE_REF_FORCE);
- if (error != 0) {
- panic("vnode_ref_ext() failed despite VNODE_REF_FORCE?!");
- }
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
sessp = proc_session(vfs_context_proc(ctx));
session_lock(sessp);
sessp->s_ttyvid = vnode_vid(vp);
session_unlock(sessp);
session_rele(sessp);
- thread_funnel_set(kernel_flock, funnel_state);
-
- if (ttyvp)
- vnode_rele(ttyvp);
}
}
out: