/*
- * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/task.h>
#include <kern/sched_prim.h>
+#define NFS_VNOP_DBG(...) NFS_DBG(NFS_FAC_VNOP, 7, ## __VA_ARGS__)
+
/*
* NFS vnode ops
*/
* in the cache.
*/
+ /*
+ * In addition if the kernel is checking for access, KAUTH_VNODE_ACCESS
+ * not set, just return. At this moment do not know what the state of
+ * the server is and what ever we get back be it either yea or nay is
+ * going to be stale. Finder (Desktop services/FileURL) might hang when
+ * going over the wire when just asking getattrlist for the roots FSID
+ * since we are going to be called to see if we're authorized for
+ * search. Since we are returning without checking the cache and/or
+ * going over the wire, it makes no sense to update the cache.
+ *
+ * N.B. This is also the strategy that SMB is using.
+ */
+ if (!(ap->a_action & KAUTH_VNODE_ACCESS))
+ return (0);
+
/*
* Convert KAUTH primitives to NFS access rights.
*/
NP(np, "nfs_vnop_open: error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
if (noop)
nfs_open_owner_rele(noop);
+ if (!error && vtype == VREG && (ap->a_mode & FWRITE)) {
+ lck_mtx_lock(&nmp->nm_lock);
+ nmp->nm_state &= ~NFSSTA_SQUISHY;
+ nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
+ if (nmp->nm_curdeadtimeout <= 0)
+ nmp->nm_deadto_start = 0;
+ nmp->nm_writers++;
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+
return (error);
}
+static uint32_t
+nfs_no_of_open_file_writers(nfsnode_t np)
+{
+ uint32_t writers = 0;
+ struct nfs_open_file *nofp;
+
+ TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
+ writers += nofp->nof_w + nofp->nof_rw + nofp->nof_w_dw + nofp->nof_rw_dw +
+ nofp->nof_w_drw + nofp->nof_rw_drw + nofp->nof_d_w_dw +
+ nofp->nof_d_rw_dw + nofp->nof_d_w_drw + nofp->nof_d_rw_drw +
+ nofp->nof_d_w + nofp->nof_d_rw;
+ }
+
+ return (writers);
+}
/*
* NFS close vnode op
* Guess this is the final close.
* We should unlock all locks and close all opens.
*/
+ uint32_t writers;
mount_t mp = vnode_mount(vp);
int force = (!mp || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT));
+
+ writers = nfs_no_of_open_file_writers(np);
nfs_release_open_state_for_node(np, force);
+ if (writers) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (writers > nmp->nm_writers) {
+ NP(np, "nfs_vnop_close: number of write opens for mount underrun. Node has %d"
+ " opens for write. Mount has total of %d opens for write\n",
+ writers, nmp->nm_writers);
+ nmp->nm_writers = 0;
+ } else {
+ nmp->nm_writers -= writers;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+
return (error);
+ } else if (fflag & FWRITE) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (nmp->nm_writers == 0) {
+ NP(np, "nfs_vnop_close: removing open writer from mount, but mount has no files open for writing");
+ } else {
+ nmp->nm_writers--;
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
}
+
noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
if (!noop) {
struct nfs_lock_owner *nlop;
int error = 0, changed = 0, delegated = 0, closed = 0, downgrade = 0;
uint32_t newAccessMode, newDenyMode;
-
+
/* warn if modes don't match current state */
if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode))
NP(np, "nfs_close: mode mismatch %d %d, current %d %d, %d",
NP(np, "nfs_close: LOST%s, %d", !nofp->nof_opencnt ? " (last)" : "",
kauth_cred_getuid(nofp->nof_owner->noo_cred));
}
+
return (error);
}
cache_purge(vp);
np->n_ncgen++;
NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap);
+ NFS_VNOP_DBG("Purge directory 0x%llx\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(vp));
}
if (NFS_CHANGED(nfsvers, np, nvap)) {
FSDBG(513, -1, np, -1, np);
- if (vtype == VDIR)
+ if (vtype == VDIR) {
+ NFS_VNOP_DBG("Invalidate directory 0x%llx\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(vp));
nfs_invaldir(np);
+ }
nfs_node_unlock(np);
if (wanted)
wakeup(np);
/*
* NFS getattr call from vfs.
*/
+
+/*
+ * The attributes we support over the wire.
+ * We also get fsid but the vfs layer gets it out of the mount
+ * structure after this calling us so there's no need to return it,
+ * and Finder expects to call getattrlist just looking for the FSID
+ * with out hanging on a non responsive server.
+ */
+#define NFS3_SUPPORTED_VATTRS \
+ (VNODE_ATTR_va_rdev | \
+ VNODE_ATTR_va_nlink | \
+ VNODE_ATTR_va_data_size | \
+ VNODE_ATTR_va_data_alloc | \
+ VNODE_ATTR_va_uid | \
+ VNODE_ATTR_va_gid | \
+ VNODE_ATTR_va_mode | \
+ VNODE_ATTR_va_modify_time | \
+ VNODE_ATTR_va_change_time | \
+ VNODE_ATTR_va_access_time | \
+ VNODE_ATTR_va_fileid | \
+ VNODE_ATTR_va_type)
+
int
nfs3_vnop_getattr(
struct vnop_getattr_args /* {
struct vnode_attr *vap = ap->a_vap;
dev_t rdev;
+ /*
+ * Lets don't go over the wire if we don't support any of the attributes.
+ * Just fall through at the VFS layer and let it cons up what it needs.
+ */
+ /* Return the io size no matter what, since we don't go over the wire for this */
+ VATTR_RETURN(vap, va_iosize, nfs_iosize);
+ if ((vap->va_active & NFS3_SUPPORTED_VATTRS) == 0)
+ return (0);
+
+ if (VATTR_IS_ACTIVE(ap->a_vap, va_name))
+ NFS_VNOP_DBG("Getting attrs for 0x%llx, vname is %s\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(ap->a_vp),
+ ap->a_vp->v_name ? ap->a_vp->v_name : "empty");
error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, NGA_CACHED);
if (error)
return (error);
VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
VATTR_RETURN(vap, va_data_size, nva.nva_size);
VATTR_RETURN(vap, va_data_alloc, nva.nva_bytes);
- VATTR_RETURN(vap, va_iosize, nfs_iosize);
vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
VATTR_SET_SUPPORTED(vap, va_access_time);
/* FALLTHROUGH */
case -1:
/* cache hit, not really an error */
- OSAddAtomic(1, &nfsstats.lookupcache_hits);
+ OSAddAtomic64(1, &nfsstats.lookupcache_hits);
nfs_node_clear_busy(dnp);
busyerror = ENOENT;
error = 0;
newvp = NULLVP;
- OSAddAtomic(1, &nfsstats.lookupcache_misses);
+ OSAddAtomic64(1, &nfsstats.lookupcache_misses);
error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &req);
nfsmout_if(error);
return (error);
}
- OSAddAtomic(1, &nfsstats.biocache_readlinks);
+ OSAddAtomic64(1, &nfsstats.biocache_readlinks);
error = nfs_buf_get(np, 0, NFS_MAXPATHLEN, vfs_context_thread(ctx), NBLK_READ, &bp);
if (error) {
FSDBG(531, np, 0xd1e0002, 0, error);
return (error);
}
if (!ISSET(bp->nb_flags, NB_CACHE)) {
- OSAddAtomic(1, &nfsstats.readlink_bios);
+ OSAddAtomic64(1, &nfsstats.readlink_bios);
buflen = bp->nb_bufsize;
error = nmp->nm_funcs->nf_readlink_rpc(np, bp->nb_data, &buflen, ctx);
if (error) {
}
do {
- OSAddAtomic(1, &nfsstats.biocache_writes);
+ OSAddAtomic64(1, &nfsstats.biocache_writes);
lbn = uio_offset(uio) / biosize;
on = uio_offset(uio) % biosize;
n = biosize - on;
{
struct nfsmount *nmp;
int error = 0, nfsvers;
- int backup, wverfset, commit, committed;
+ int wverfset, commit, committed;
uint64_t wverf = 0, wverf2;
size_t nmwsize, totalsize, tsiz, len, rlen;
struct nfsreq rq, *req = &rq;
uint32_t stategenid = 0, vrestart = 0, restart = 0;
+ uio_t uio_save = NULL;
#if DIAGNOSTIC
/* XXX limitation based on need to back up uio on short write */
return (EFBIG);
}
+ uio_save = uio_duplicate(uio);
+ if (uio_save == NULL) {
+ return (EIO);
+ }
+
while (tsiz > 0) {
len = (tsiz > nmwsize) ? nmwsize : tsiz;
FSDBG(537, np, uio_offset(uio), len, 0);
/* check for a short write */
if (rlen < len) {
- backup = len - rlen;
- uio_pushback(uio, backup);
+ /* Reset the uio to reflect the actual transfer */
+ *uio = *uio_save;
+ uio_update(uio, totalsize - (tsiz - rlen));
len = rlen;
}
error = EIO;
break;
}
- backup = totalsize - tsiz;
- uio_pushback(uio, backup);
+ *uio = *uio_save; // Reset the uio back to the start
committed = NFS_WRITE_FILESYNC;
wverfset = 0;
tsiz = totalsize;
}
}
+ if (uio_save)
+ uio_free(uio_save);
if (wverfset && wverfp)
*wverfp = wverf;
*iomodep = committed;
int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
struct timespec premtime = { 0, 0 };
u_int32_t rdev;
- u_int64_t xid, dxid;
+ u_int64_t xid = 0, dxid;
int nfsvers, gotuid, gotgid;
struct nfsm_chain nmreq, nmrep;
struct nfsreq rq, *req = &rq;
struct timespec premtime = { 0, 0 };
vnode_t newvp = NULL;
int nfsvers, gotuid, gotgid;
- u_int64_t xid, dxid;
+ u_int64_t xid = 0, dxid;
nfsnode_t np = NULL;
nfsnode_t dnp = VTONFS(dvp);
struct nfsmount *nmp;
int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
struct timespec premtime = { 0, 0 };
int nfsvers, gotuid, gotgid;
- u_int64_t xid, dxid;
+ u_int64_t xid= 0, dxid;
fhandle_t fh;
struct nfsm_chain nmreq, nmrep;
struct nfsreq rq, *req = &rq;
}
while (!error && !done) {
- OSAddAtomic(1, &nfsstats.biocache_readdirs);
+ OSAddAtomic64(1, &nfsstats.biocache_readdirs);
cookie = nextcookie;
getbuffer:
error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ, &bp);
if (cookie == dnp->n_eofcookie) { /* EOF cookie */
nfs_node_unlock(dnp);
- OSAddAtomic(1, &nfsstats.direofcache_hits);
+ OSAddAtomic64(1, &nfsstats.direofcache_hits);
*ptc = 0;
return (-1);
}
/* found a match for this cookie */
*lbnp = ndcc->cookies[i].lbn;
nfs_node_unlock(dnp);
- OSAddAtomic(1, &nfsstats.direofcache_hits);
+ OSAddAtomic64(1, &nfsstats.direofcache_hits);
*ptc = 0;
return (0);
}
if (eofptc) {
/* but 32-bit match hit the EOF cookie */
nfs_node_unlock(dnp);
- OSAddAtomic(1, &nfsstats.direofcache_hits);
+ OSAddAtomic64(1, &nfsstats.direofcache_hits);
return (-1);
}
if (iptc >= 0) {
/* but 32-bit match got a hit */
*lbnp = ndcc->cookies[iptc].lbn;
nfs_node_unlock(dnp);
- OSAddAtomic(1, &nfsstats.direofcache_hits);
+ OSAddAtomic64(1, &nfsstats.direofcache_hits);
return (0);
}
nfs_node_unlock(dnp);
}
lck_mtx_unlock(nfs_buf_mutex);
if (found) {
- OSAddAtomic(1, &nfsstats.direofcache_hits);
+ OSAddAtomic64(1, &nfsstats.direofcache_hits);
return (0);
}
/* still not found... oh well, just start a new block */
*lbnp = cookie;
- OSAddAtomic(1, &nfsstats.direofcache_misses);
+ OSAddAtomic64(1, &nfsstats.direofcache_misses);
return (0);
}
} else {
cookie = bp->nb_lblkno;
/* increment with every buffer read */
- OSAddAtomic(1, &nfsstats.readdir_bios);
+ OSAddAtomic64(1, &nfsstats.readdir_bios);
}
lastcookie = cookie;
space_free = nfs_dir_buf_freespace(bp, rdirplus);
dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
/* increment with every buffer read */
- OSAddAtomic(1, &nfsstats.readdir_bios);
+ OSAddAtomic64(1, &nfsstats.readdir_bios);
}
nmrepsave = nmrep;
dp->d_fileno = fileno;
{
vfs_context_t ctx = ap->a_context;
vnode_t vp = ap->a_vp;
+ struct nfsmount *mp = VTONMP(vp);
int error = ENOTTY;
+ if (mp == NULL)
+ return (ENXIO);
+
switch (ap->a_command) {
case F_FULLFSYNC:
if (vnode_vfsisrdonly(vp))
return (EROFS);
- if (!VTONMP(vp))
- return (ENXIO);
error = nfs_flush(VTONFS(vp), MNT_WAIT, vfs_context_thread(ctx), 0);
break;
-
+ case NFS_FSCTL_DESTROY_CRED:
+ error = nfs_gss_clnt_ctx_destroy(mp, vfs_context_ucred(ctx));
+ break;
}
return (error);
if (size <= 0) {
printf("nfs_pagein: invalid size %ld", size);
if (!nofreeupl)
- (void) ubc_upl_abort(pl, 0);
+ (void) ubc_upl_abort_range(pl, pl_offset, size, 0);
return (EINVAL);
}
if (f_offset < 0 || f_offset >= (off_t)np->n_size || (f_offset & PAGE_MASK_64)) {
#if UPL_DEBUG
upl_ubc_alias_set(pl, (uintptr_t) current_thread(), (uintptr_t) 2);
#endif /* UPL_DEBUG */
- OSAddAtomic(1, &nfsstats.pageins);
+ OSAddAtomic64(1, &nfsstats.pageins);
error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req[nextwait], uio, &retsize, NULL);
req[nextwait] = NULL;
nextwait = (nextwait + 1) % MAXPAGINGREQS;
* erroneous.
*/
char nfs_pageouterrorhandler(int);
-enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, RETRYWITHSLEEP, SEVER};
+enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, SEVER};
#define NFS_ELAST 88
static u_char errorcount[NFS_ELAST+1]; /* better be zeros when initialized */
static const char errortooutcome[NFS_ELAST+1] = {
if (size <= 0) {
printf("nfs_pageout: invalid size %ld", size);
if (!nofreeupl)
- ubc_upl_abort(pl, 0);
+ ubc_upl_abort_range(pl, pl_offset, size, 0);
return (EINVAL);
}
nfs_data_unlock_noupdate(np);
/* no panic. just tell vm we are busy */
if (!nofreeupl)
- ubc_upl_abort(pl, 0);
+ ubc_upl_abort_range(pl, pl_offset, size, 0);
return (EBUSY);
}
if (bp->nb_dirtyend > 0) {
lck_mtx_unlock(nfs_buf_mutex);
nfs_data_unlock_noupdate(np);
if (!nofreeupl)
- ubc_upl_abort(pl, 0);
+ ubc_upl_abort_range(pl, pl_offset, size, 0);
return (EBUSY);
}
if ((bp->nb_dirtyoff < start) ||
uio_reset(auio, txoffset, UIO_SYSSPACE, UIO_WRITE);
uio_addiov(auio, CAST_USER_ADDR_T(txaddr), iosize);
FSDBG(323, uio_offset(auio), iosize, txaddr, txsize);
- OSAddAtomic(1, &nfsstats.pageouts);
+ OSAddAtomic64(1, &nfsstats.pageouts);
nfs_node_lock_force(np);
np->n_numoutput++;
nfs_node_unlock(np);
case RETRY:
abortflags = UPL_ABORT_FREE_ON_EMPTY;
break;
- case RETRYWITHSLEEP:
- abortflags = UPL_ABORT_FREE_ON_EMPTY;
- /* pri unused. PSOCK for placeholder. */
- tsleep(&lbolt, PSOCK, "nfspageout", 0);
- break;
case SEVER: /* not implemented */
default:
NP(np, "nfs_pageout: action %d not expected", action);