/*
- * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/task.h>
#include <kern/sched_prim.h>
+#define NFS_VNOP_DBG(...) NFS_DBG(NFS_FAC_VNOP, 7, ## __VA_ARGS__)
+#define DEFAULT_READLINK_NOCACHE 0
+
/*
* NFS vnode ops
*/
{ &fifo_nfsv4nodeop_p, fifo_nfsv4nodeop_entries };
#endif /* FIFO */
-
int nfs_sillyrename(nfsnode_t,nfsnode_t,struct componentname *,vfs_context_t);
+int nfs_getattr_internal(nfsnode_t, struct nfs_vattr *, vfs_context_t, int);
+int nfs_refresh_fh(nfsnode_t, vfs_context_t);
/*
* Find the slot in the access cache for this UID.
}
int
-nfs3_access_rpc(nfsnode_t np, u_int32_t *access, vfs_context_t ctx)
+nfs3_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
{
int error = 0, lockerror = ENOENT, status, slot;
uint32_t access_result = 0;
nfsm_chain_add_32(error, &nmreq, *access);
nfsm_chain_build_done(error, &nmreq);
nfsmout_if(error);
- error = nfs_request(np, NULL, &nmreq, NFSPROC_ACCESS, ctx, NULL, &nmrep, &xid, &status);
+ error = nfs_request2(np, NULL, &nmreq, NFSPROC_ACCESS,
+ vfs_context_thread(ctx), vfs_context_ucred(ctx),
+ NULL, rpcflags, &nmrep, &xid, &status);
if ((lockerror = nfs_node_lock(np)))
error = lockerror;
nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
{
vfs_context_t ctx = ap->a_context;
vnode_t vp = ap->a_vp;
- int error = 0, slot, dorpc;
+ int error = 0, slot, dorpc, rpcflags = 0;
u_int32_t access, waccess;
nfsnode_t np = VTONFS(vp);
struct nfsmount *nmp;
uid_t uid;
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
* Does our cached result allow us to give a definite yes to
* this request?
*/
- uid = kauth_cred_getuid(vfs_context_ucred(ctx));
+ if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth))
+ uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
+ else
+ uid = kauth_cred_getuid(vfs_context_ucred(ctx));
slot = nfs_node_access_slot(np, uid, 0);
dorpc = 1;
if (access == 0) {
waccess = 0;
} else if (NACCESSVALID(np, slot)) {
microuptime(&now);
- if ((now.tv_sec < (np->n_accessstamp[slot] + nfs_access_cache_timeout)) &&
- ((np->n_access[slot] & access) == access)) {
+ if (((now.tv_sec < (np->n_accessstamp[slot] + nfs_access_cache_timeout)) &&
+ ((np->n_access[slot] & access) == access)) || nfs_use_cache(nmp)) {
/* OSAddAtomic(1, &nfsstats.accesscache_hits); */
dorpc = 0;
waccess = np->n_access[slot];
if (dorpc) {
/* Either a no, or a don't know. Go to the wire. */
/* OSAddAtomic(1, &nfsstats.accesscache_misses); */
- error = nmp->nm_funcs->nf_access_rpc(np, &waccess, ctx);
+
+ /*
+ * Allow an access call to timeout if we have it cached
+ * so we won't hang if the server isn't responding.
+ */
+ if (NACCESSVALID(np, slot))
+ rpcflags |= R_SOFT;
+
+ error = nmp->nm_funcs->nf_access_rpc(np, &waccess, rpcflags, ctx);
+
+ /*
+ * If the server didn't respond return the cached access.
+ */
+ if ((error == ETIMEDOUT) && (rpcflags & R_SOFT)) {
+ error = 0;
+ waccess = np->n_access[slot];
+ }
}
if (!error && ((waccess & access) != access))
error = EACCES;
return (EINVAL);
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (np->n_flag & NREVOKE)
return (EIO);
*/
uint32_t writers;
mount_t mp = vnode_mount(vp);
- int force = (!mp || (mp->mnt_kern_flag & MNTK_FRCUNMOUNT));
+ int force = (!mp || vfs_isforce(mp));
writers = nfs_no_of_open_file_writers(np);
nfs_release_open_state_for_node(np, force);
}
-
-
int
nfs3_getattr_rpc(
nfsnode_t np,
int error = 0, status, nfsvers, rpcflags = 0;
struct nfsm_chain nmreq, nmrep;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */
rpcflags = R_RECOVER;
+ if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */
+ rpcflags |= R_SOFT;
+
nfsm_chain_null(&nmreq);
nfsm_chain_null(&nmrep);
return (error);
}
+/*
+ * nfs_refresh_fh will attempt to update the file handle for the node.
+ *
+ * It only does this for symbolic links and regular files that are not currently opened.
+ *
+ * On Success returns 0 and the nodes file handle is updated, or ESTALE on failure.
+ */
+int
+nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx)
+{
+ vnode_t dvp, vp = NFSTOV(np);
+ nfsnode_t dnp;
+ const char *v_name = vnode_getname(vp);
+ char *name;
+ int namelen, fhsize, refreshed;
+ int error, wanted = 0;
+ uint8_t *fhp;
+ struct timespec ts = {2, 0};
+
+ NFS_VNOP_DBG("vnode is %d\n", vnode_vtype(vp));
+
+ dvp = vnode_parent(vp);
+ if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VLNK) ||
+ v_name == NULL || *v_name == '\0' || dvp == NULL) {
+ if (v_name != NULL)
+ vnode_putname(v_name);
+ return (ESTALE);
+ }
+ dnp = VTONFS(dvp);
+
+ namelen = strlen(v_name);
+ MALLOC(name, char *, namelen + 1, M_TEMP, M_WAITOK);
+ if (name == NULL) {
+ vnode_putname(v_name);
+ return (ESTALE);
+ }
+ bcopy(v_name, name, namelen+1);
+ NFS_VNOP_DBG("Trying to refresh %s : %s\n", v_name, name);
+ vnode_putname(v_name);
+
+ /* Allocate the maximum size file handle */
+ MALLOC(fhp, uint8_t *, NFS4_FHSIZE, M_TEMP, M_WAITOK);
+ if (fhp == NULL) {
+ FREE(name, M_TEMP);
+ return (ESTALE);
+ }
+
+ if ((error = nfs_node_lock(np))) {
+ FREE(name, M_TEMP);
+ FREE(fhp, M_TEMP);
+ return (ESTALE);
+ }
+
+ fhsize = np->n_fhsize;
+ bcopy(np->n_fhp, fhp, fhsize);
+ while (ISSET(np->n_flag, NREFRESH)) {
+ SET(np->n_flag, NREFRESHWANT);
+ NFS_VNOP_DBG("Waiting for refresh of %s\n", name);
+ msleep(np, &np->n_lock, PZERO-1, "nfsrefreshwant", &ts);
+ if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0)))
+ break;
+ }
+ refreshed = error ? 0 : !NFS_CMPFH(np, fhp, fhsize);
+ SET(np->n_flag, NREFRESH);
+ nfs_node_unlock(np);
+
+ NFS_VNOP_DBG("error = %d, refreshed = %d\n", error, refreshed);
+ if (error || refreshed)
+ goto nfsmout;
+
+ /* Check that there are no open references for this file */
+ lck_mtx_lock(&np->n_openlock);
+ if (np->n_openrefcnt || !TAILQ_EMPTY(&np->n_opens) || !TAILQ_EMPTY(&np->n_lock_owners)) {
+ int cnt = 0;
+ struct nfs_open_file *ofp;
+
+ TAILQ_FOREACH(ofp, &np->n_opens, nof_link) {
+ cnt += ofp->nof_opencnt;
+ }
+ if (cnt) {
+ lck_mtx_unlock(&np->n_openlock);
+ NFS_VNOP_DBG("Can not refresh file handle for %s with open state\n", name);
+ NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n",
+ np->n_openrefcnt, cnt, !TAILQ_EMPTY(&np->n_lock_owners));
+ error = ESTALE;
+ goto nfsmout;
+ }
+ }
+ lck_mtx_unlock(&np->n_openlock);
+ /*
+ * Since the FH is currently stale we should not be able to
+ * establish any open state until the FH is refreshed.
+ */
+
+ error = nfs_node_lock(np);
+ nfsmout_if(error);
+ /*
+ * Symlinks should never need invalidations and are holding
+ * the one and only nfsbuf in an uncached acquired state
+ * trying to do a readlink. So we will hang if we invalidate
+ * in that case. Only in in the VREG case do we need to
+ * invalidate.
+ */
+ if (vnode_vtype(vp) == VREG) {
+ np->n_flag &= ~NNEEDINVALIDATE;
+ nfs_node_unlock(np);
+ error = nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ctx, 1);
+ if (error)
+ NFS_VNOP_DBG("nfs_vinvalbuf returned %d\n", error);
+ nfsmout_if(error);
+ } else {
+ nfs_node_unlock(np);
+ }
+
+ NFS_VNOP_DBG("Looking up %s\n", name);
+ error = nfs_lookitup(dnp, name, namelen, ctx, &np);
+ if (error)
+ NFS_VNOP_DBG("nfs_lookitup returned %d\n", error);
+
+nfsmout:
+ nfs_node_lock_force(np);
+ wanted = ISSET(np->n_flag, NREFRESHWANT);
+ CLR(np->n_flag, NREFRESH|NREFRESHWANT);
+ nfs_node_unlock(np);
+ if (wanted)
+ wakeup(np);
+
+ if (error == 0)
+ NFS_VNOP_DBG("%s refreshed file handle\n", name);
+
+ FREE(name, M_TEMP);
+ FREE(fhp, M_TEMP);
+
+ return (error ? ESTALE : 0);
+}
int
nfs_getattr(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
+{
+ int error;
+
+retry:
+ error = nfs_getattr_internal(np, nvap, ctx, flags);
+ if (error == ESTALE) {
+ error = nfs_refresh_fh(np, ctx);
+ if (!error)
+ goto retry;
+ }
+ return (error);
+}
+
+int
+nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
{
struct nfsmount *nmp;
int error = 0, nfsvers, inprogset = 0, wanted = 0, avoidfloods;
FSDBG_TOP(513, np->n_size, np, np->n_vattr.nva_size, np->n_flag);
- if (!(nmp = NFSTONMP(np)))
+ nmp = NFSTONMP(np);
+
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
/*
* Use the cache or wait for any getattr in progress if:
* - it's a cached request, or
- * - we have a delegation
+ * - we have a delegation, or
+ * - the server isn't responding
*/
while (1) {
error = nfs_getattrcache(np, nvap, flags);
nfs_node_unlock(np);
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
error = ENXIO;
if (error)
goto nfsmout;
+ /*
+ * Return cached attributes if they are valid,
+ * if the server doesn't respond, and this is
+ * some softened up style of mount.
+ */
+ if (NATTRVALID(np) && nfs_use_cache(nmp))
+ flags |= NGA_SOFT;
+
/*
* We might want to try to get both the attributes and access info by
* making an ACCESS call and seeing if it returns updated attributes.
if (nfs_attrcachetimeout(np) > 0) {
/* OSAddAtomic(1, &nfsstats.accesscache_misses); */
u_int32_t access = NFS_ACCESS_ALL;
- error = nmp->nm_funcs->nf_access_rpc(np, &access, ctx);
+ int rpcflags = 0;
+
+ /* Return cached attrs if server doesn't respond */
+ if (flags & NGA_SOFT)
+ rpcflags |= R_SOFT;
+
+ error = nmp->nm_funcs->nf_access_rpc(np, &access, rpcflags, ctx);
+
+ if (error == ETIMEDOUT)
+ goto returncached;
+
if (error)
goto nfsmout;
nfs_node_lock_force(np);
}
avoidfloods = 0;
+
tryagain:
error = nmp->nm_funcs->nf_getattr_rpc(np, NULL, np->n_fhp, np->n_fhsize, flags, ctx, nvap, &xid);
if (!error) {
error = nfs_loadattrcache(np, nvap, &xid, 0);
nfs_node_unlock(np);
}
+
+ /*
+ * If the server didn't respond, return cached attributes.
+ */
+returncached:
+ if ((flags & NGA_SOFT) && (error == ETIMEDOUT)) {
+ nfs_node_lock_force(np);
+ error = nfs_getattrcache(np, nvap, flags);
+ if (!error || (error != ENOENT)) {
+ nfs_node_unlock(np);
+ goto nfsmout;
+ }
+ nfs_node_unlock(np);
+ }
nfsmout_if(error);
+
if (!xid) { /* out-of-order rpc - attributes were dropped */
FSDBG(513, -1, np, np->n_xid >> 32, np->n_xid);
if (avoidfloods++ < 20)
cache_purge(vp);
np->n_ncgen++;
NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap);
+ NFS_VNOP_DBG("Purge directory 0x%llx\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(vp));
}
if (NFS_CHANGED(nfsvers, np, nvap)) {
FSDBG(513, -1, np, -1, np);
- if (vtype == VDIR)
+ if (vtype == VDIR) {
+ NFS_VNOP_DBG("Invalidate directory 0x%llx\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(vp));
nfs_invaldir(np);
+ }
nfs_node_unlock(np);
if (wanted)
wakeup(np);
/*
* NFS getattr call from vfs.
*/
+
+/*
+ * The attributes we support over the wire.
+ * We also get fsid but the vfs layer gets it out of the mount
+ * structure after this calling us so there's no need to return it,
+ * and Finder expects to call getattrlist just looking for the FSID
+ * with out hanging on a non responsive server.
+ */
+#define NFS3_SUPPORTED_VATTRS \
+ (VNODE_ATTR_va_rdev | \
+ VNODE_ATTR_va_nlink | \
+ VNODE_ATTR_va_data_size | \
+ VNODE_ATTR_va_data_alloc | \
+ VNODE_ATTR_va_uid | \
+ VNODE_ATTR_va_gid | \
+ VNODE_ATTR_va_mode | \
+ VNODE_ATTR_va_modify_time | \
+ VNODE_ATTR_va_change_time | \
+ VNODE_ATTR_va_access_time | \
+ VNODE_ATTR_va_fileid | \
+ VNODE_ATTR_va_type)
+
int
nfs3_vnop_getattr(
struct vnop_getattr_args /* {
struct vnode_attr *vap = ap->a_vap;
dev_t rdev;
+ /*
+ * Lets don't go over the wire if we don't support any of the attributes.
+ * Just fall through at the VFS layer and let it cons up what it needs.
+ */
+ /* Return the io size no matter what, since we don't go over the wire for this */
+ VATTR_RETURN(vap, va_iosize, nfs_iosize);
+ if ((vap->va_active & NFS3_SUPPORTED_VATTRS) == 0)
+ return (0);
+
+ if (VATTR_IS_ACTIVE(ap->a_vap, va_name))
+ NFS_VNOP_DBG("Getting attrs for 0x%llx, vname is %s\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(ap->a_vp),
+ ap->a_vp->v_name ? ap->a_vp->v_name : "empty");
error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, NGA_CACHED);
if (error)
return (error);
VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
VATTR_RETURN(vap, va_data_size, nva.nva_size);
VATTR_RETURN(vap, va_data_alloc, nva.nva_bytes);
- VATTR_RETURN(vap, va_iosize, nfs_iosize);
vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
VATTR_SET_SUPPORTED(vap, va_access_time);
struct nfs_open_file *nofp = NULL;
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
u_int64_t xid, nextxid;
struct nfsm_chain nmreq, nmrep;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
mp = vnode_mount(dvp);
nmp = VFSTONFS(mp);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto error_return;
}
/* do we know this name is too long? */
nmp = VTONMP(dvp);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
error = ENXIO;
goto error_return;
}
return (error);
}
+int nfs_readlink_nocache = DEFAULT_READLINK_NOCACHE;
+
/*
* NFS readlink call
*/
uint32_t buflen;
uio_t uio = ap->a_uio;
struct nfsbuf *bp = NULL;
+ struct timespec ts;
+ int timeo;
if (vnode_vtype(ap->a_vp) != VLNK)
return (EPERM);
return (EINVAL);
nmp = VTONMP(ap->a_vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
+
/* nfs_getattr() will check changed and purge caches */
- if ((error = nfs_getattr(np, NULL, ctx, NGA_CACHED))) {
+ if ((error = nfs_getattr(np, NULL, ctx, nfs_readlink_nocache ? NGA_UNCACHED : NGA_CACHED))) {
FSDBG(531, np, 0xd1e0001, 0, error);
return (error);
}
+ if (nfs_readlink_nocache) {
+ timeo = nfs_attrcachetimeout(np);
+ nanouptime(&ts);
+ }
+
+retry:
OSAddAtomic64(1, &nfsstats.biocache_readlinks);
- error = nfs_buf_get(np, 0, NFS_MAXPATHLEN, vfs_context_thread(ctx), NBLK_READ, &bp);
+ error = nfs_buf_get(np, 0, NFS_MAXPATHLEN, vfs_context_thread(ctx), NBLK_META, &bp);
if (error) {
FSDBG(531, np, 0xd1e0002, 0, error);
return (error);
}
+
+ if (nfs_readlink_nocache) {
+ NFS_VNOP_DBG("timeo = %d ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo, ts.tv_sec,
+ (np->n_rltim.tv_sec + timeo) < ts.tv_sec || nfs_readlink_nocache > 1,
+ ISSET(bp->nb_flags, NB_CACHE) == NB_CACHE);
+ /* n_rltim is synchronized by the associated nfs buf */
+ if (ISSET(bp->nb_flags, NB_CACHE) && ((nfs_readlink_nocache > 1) || ((np->n_rltim.tv_sec + timeo) < ts.tv_sec))) {
+ SET(bp->nb_flags, NB_INVAL);
+ nfs_buf_release(bp, 0);
+ goto retry;
+ }
+ }
if (!ISSET(bp->nb_flags, NB_CACHE)) {
+readagain:
OSAddAtomic64(1, &nfsstats.readlink_bios);
buflen = bp->nb_bufsize;
error = nmp->nm_funcs->nf_readlink_rpc(np, bp->nb_data, &buflen, ctx);
if (error) {
+ if (error == ESTALE) {
+ NFS_VNOP_DBG("Stale FH from readlink rpc\n");
+ error = nfs_refresh_fh(np, ctx);
+ if (error == 0)
+ goto readagain;
+ }
SET(bp->nb_flags, NB_ERROR);
bp->nb_error = error;
+ NFS_VNOP_DBG("readlink failed %d\n", error);
} else {
bp->nb_validoff = 0;
bp->nb_validend = buflen;
+ np->n_rltim = ts;
+ NFS_VNOP_DBG("readlink of %.*s\n", bp->nb_validend, (char *)bp->nb_data);
}
+ } else {
+ NFS_VNOP_DBG("got cached link of %.*s\n", bp->nb_validend, (char *)bp->nb_data);
}
+
if (!error && (bp->nb_validend > 0))
error = uiomove(bp->nb_data, bp->nb_validend, uio);
FSDBG(531, np, bp->nb_validend, 0, error);
struct nfsm_chain nmreq, nmrep;
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
nfsm_chain_null(&nmreq);
FSDBG_TOP(536, np, uio_offset(uio), uio_resid(uio), 0);
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
nmrsize = nmp->nm_rsize;
struct nfsm_chain nmreq;
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
struct nfsm_chain nmrep;
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
nfs_request_async_cancel(req);
return (ENXIO);
}
goto out;
if (((uio_offset(uio) + uio_resid(uio)) > (off_t)np->n_size) && !(ioflag & IO_APPEND)) {
- /* it looks like we'll be extending the file, so take the data lock exclusive */
+ /*
+ * It looks like we'll be extending the file, so take the data lock exclusive.
+ */
nfs_data_unlock(np);
nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
+
+ /*
+ * Also, if the write begins after the previous EOF buffer, make sure to zero
+ * and validate the new bytes in that buffer.
+ */
+ struct nfsbuf *eofbp = NULL;
+ daddr64_t eofbn = np->n_size / biosize;
+ int eofoff = np->n_size % biosize;
+ lbn = uio_offset(uio) / biosize;
+
+ if (eofoff && (eofbn < lbn)) {
+ if ((error = nfs_buf_get(np, eofbn, biosize, thd, NBLK_WRITE|NBLK_ONLYVALID, &eofbp)))
+ goto out;
+ np->n_size += (biosize - eofoff);
+ nfs_node_lock_force(np);
+ CLR(np->n_flag, NUPDATESIZE);
+ np->n_flag |= NMODIFIED;
+ nfs_node_unlock(np);
+ FSDBG(516, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
+ ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */
+ if (eofbp) {
+ /*
+ * For the old last page, don't zero bytes if there
+ * are invalid bytes in that page (i.e. the page isn't
+ * currently valid).
+ * For pages after the old last page, zero them and
+ * mark them as valid.
+ */
+ char *d;
+ int i;
+ if (ioflag & IO_NOCACHE)
+ SET(eofbp->nb_flags, NB_NOCACHE);
+ NFS_BUF_MAP(eofbp);
+ FSDBG(516, eofbp, eofoff, biosize - eofoff, 0xe0fff01e);
+ d = eofbp->nb_data;
+ i = eofoff/PAGE_SIZE;
+ while (eofoff < biosize) {
+ int poff = eofoff & PAGE_MASK;
+ if (!poff || NBPGVALID(eofbp,i)) {
+ bzero(d + eofoff, PAGE_SIZE - poff);
+ NBPGVALID_SET(eofbp, i);
+ }
+ eofoff += PAGE_SIZE - poff;
+ i++;
+ }
+ nfs_buf_release(eofbp, 1);
+ }
+ }
}
do {
* and zero the new bytes.
*/
if ((uio_offset(uio) + n) > (off_t)np->n_size) {
- struct nfsbuf *eofbp = NULL;
daddr64_t eofbn = np->n_size / biosize;
- int eofoff = np->n_size % biosize;
int neweofoff = (uio_offset(uio) + n) % biosize;
FSDBG(515, 0xb1ffa000, uio_offset(uio) + n, eofoff, neweofoff);
- if (eofoff && (eofbn < lbn) &&
- ((error = nfs_buf_get(np, eofbn, biosize, thd, NBLK_WRITE|NBLK_ONLYVALID, &eofbp))))
- goto out;
-
/* if we're extending within the same last block */
/* and the block is flagged as being cached... */
if ((lbn == eofbn) && ISSET(bp->nb_flags, NB_CACHE)) {
nfs_node_unlock(np);
FSDBG(516, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */
- if (eofbp) {
- /*
- * We may need to zero any previously invalid data
- * after the old EOF in the previous EOF buffer.
- *
- * For the old last page, don't zero bytes if there
- * are invalid bytes in that page (i.e. the page isn't
- * currently valid).
- * For pages after the old last page, zero them and
- * mark them as valid.
- */
- char *d;
- int i;
- if (ioflag & IO_NOCACHE)
- SET(eofbp->nb_flags, NB_NOCACHE);
- NFS_BUF_MAP(eofbp);
- FSDBG(516, eofbp, eofoff, biosize - eofoff, 0xe0fff01e);
- d = eofbp->nb_data;
- i = eofoff/PAGE_SIZE;
- while (eofoff < biosize) {
- int poff = eofoff & PAGE_MASK;
- if (!poff || NBPGVALID(eofbp,i)) {
- bzero(d + eofoff, PAGE_SIZE - poff);
- NBPGVALID_SET(eofbp, i);
- }
- if (bp->nb_validend == eofoff)
- bp->nb_validend += PAGE_SIZE - poff;
- eofoff += PAGE_SIZE - poff;
- i++;
- }
- nfs_buf_release(eofbp, 1);
- }
}
/*
* If dirtyend exceeds file size, chop it down. This should
{
struct nfsmount *nmp;
int error = 0, nfsvers;
- int backup, wverfset, commit, committed;
+ int wverfset, commit, committed;
uint64_t wverf = 0, wverf2;
size_t nmwsize, totalsize, tsiz, len, rlen;
struct nfsreq rq, *req = &rq;
uint32_t stategenid = 0, vrestart = 0, restart = 0;
+ uio_t uio_save = NULL;
#if DIAGNOSTIC
/* XXX limitation based on need to back up uio on short write */
#endif
FSDBG_TOP(537, np, uio_offset(uio), uio_resid(uio), *iomodep);
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
nmwsize = nmp->nm_wsize;
return (EFBIG);
}
+ uio_save = uio_duplicate(uio);
+ if (uio_save == NULL) {
+ return (EIO);
+ }
+
while (tsiz > 0) {
len = (tsiz > nmwsize) ? nmwsize : tsiz;
FSDBG(537, np, uio_offset(uio), len, 0);
if (!error)
error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &commit, &rlen, &wverf2);
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
error = ENXIO;
if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) &&
(++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */
/* check for a short write */
if (rlen < len) {
- backup = len - rlen;
- uio_pushback(uio, backup);
+ /* Reset the uio to reflect the actual transfer */
+ *uio = *uio_save;
+ uio_update(uio, totalsize - (tsiz - rlen));
len = rlen;
}
error = EIO;
break;
}
- backup = totalsize - tsiz;
- uio_pushback(uio, backup);
+ *uio = *uio_save; // Reset the uio back to the start
committed = NFS_WRITE_FILESYNC;
wverfset = 0;
tsiz = totalsize;
}
}
+ if (uio_save)
+ uio_free(uio_save);
if (wverfset && wverfp)
*wverfp = wverf;
*iomodep = committed;
struct nfsm_chain nmreq;
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
struct nfsm_chain nmrep;
nmp = NFSTONMP(np);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
nfs_request_async_cancel(req);
return (ENXIO);
}
if (error == EINPROGRESS) /* async request restarted */
return (error);
nmp = NFSTONMP(np);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
error = ENXIO;
if (!error && (lockerror = nfs_node_lock(np)))
error = lockerror;
int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
struct timespec premtime = { 0, 0 };
u_int32_t rdev;
- u_int64_t xid, dxid;
+ u_int64_t xid = 0, dxid;
int nfsvers, gotuid, gotgid;
struct nfsm_chain nmreq, nmrep;
struct nfsreq rq, *req = &rq;
nmp = VTONMP(dvp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
struct nfs_dulookup dul;
nmp = VTONMP(dvp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
/* XXX prevent removing a sillyrenamed file? */
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
nfs_removeit(struct nfs_sillyrename *nsp)
{
struct nfsmount *nmp = NFSTONMP(nsp->nsr_dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
return nmp->nm_funcs->nf_remove_rpc(nsp->nsr_dnp, nsp->nsr_name, nsp->nsr_namlen, NULL, nsp->nsr_cred);
}
struct nfsm_chain nmreq, nmrep;
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
if ((nfsvers == NFS_VER2) && (namelen > NFS_MAXNAMLEN))
tnp = tvp ? VTONFS(tvp) : NULL;
nmp = NFSTONMP(fdnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
struct nfsm_chain nmreq, nmrep;
nmp = NFSTONMP(fdnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
if ((nfsvers == NFS_VER2) &&
return (EXDEV);
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN))
struct timespec premtime = { 0, 0 };
vnode_t newvp = NULL;
int nfsvers, gotuid, gotgid;
- u_int64_t xid, dxid;
+ u_int64_t xid = 0, dxid;
nfsnode_t np = NULL;
nfsnode_t dnp = VTONFS(dvp);
struct nfsmount *nmp;
struct nfs_dulookup dul;
nmp = VTONMP(dvp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
struct timespec premtime = { 0, 0 };
int nfsvers, gotuid, gotgid;
- u_int64_t xid, dxid;
+ u_int64_t xid= 0, dxid;
fhandle_t fh;
struct nfsm_chain nmreq, nmrep;
struct nfsreq rq, *req = &rq;
struct nfs_dulookup dul;
nmp = VTONMP(dvp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN))
struct nfs_dulookup dul;
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN))
thread_t thd;
nmp = VTONMP(dvp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
bigcookies = (nmp->nm_state & NFSSTA_BIGCOOKIES);
* Let's search the directory's buffers for the cookie.
*/
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
dpptc = NULL;
found = 0;
daddr64_t lbn, nextlbn;
int dotunder = (cnp->cn_namelen > 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '_');
- if (!(nmp = NFSTONMP(dnp)))
+ nmp = NFSTONMP(dnp);
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (!purge)
*npp = NULL;
struct timeval now;
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
nmreaddirsize = nmp->nm_readdirsize;
struct nfsmount *nmp;
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfs_name_cache_purge(dnp, np, cnp, ctx);
int error = 0, nfsvers;
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
struct nfsm_chain nmrep;
nmp = NFSTONMP(dnp);
+ if (nmp == NULL)
+ return (ENXIO);
nfsvers = nmp->nm_vers;
nfsm_chain_null(&nmrep);
struct nfsreq rq, *req = &rq;
nmp = NFSTONMP(dnp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) &&
nmp = NFSTONMP(np);
FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
return (0);
struct nfsmount *nmp = NFSTONMP(np);
uint32_t val = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
nfsvers = nmp->nm_vers;
error = status;
nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxlink);
nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxname);
+ nfsap->nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC|NFS_FSFLAG_CHOWN_RESTRICTED|NFS_FSFLAG_CASE_INSENSITIVE|NFS_FSFLAG_CASE_PRESERVING);
nfsm_chain_get_32(error, &nmrep, val);
if (val)
nfsap->nfsa_flags |= NFS_FSFLAG_NO_TRUNC;
{
nmp->nm_fsattr.nfsa_maxlink = nfsap->nfsa_maxlink;
nmp->nm_fsattr.nfsa_maxname = nfsap->nfsa_maxname;
+ nmp->nm_fsattr.nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC|NFS_FSFLAG_CHOWN_RESTRICTED|NFS_FSFLAG_CASE_INSENSITIVE|NFS_FSFLAG_CASE_PRESERVING);
nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC;
nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED;
nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE;
uint nbits;
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
switch (ap->a_name) {
if (error)
return (error);
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS) {
if (error)
return (error);
nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
lck_mtx_lock(&nmp->nm_lock);
nfsap = &nfsa;
{
vfs_context_t ctx = ap->a_context;
vnode_t vp = ap->a_vp;
+ struct nfsmount *mp = VTONMP(vp);
+ struct user_nfs_gss_principal gprinc;
+ uint32_t len;
int error = ENOTTY;
+ if (mp == NULL)
+ return (ENXIO);
+
switch (ap->a_command) {
case F_FULLFSYNC:
if (vnode_vfsisrdonly(vp))
return (EROFS);
- if (!VTONMP(vp))
- return (ENXIO);
error = nfs_flush(VTONFS(vp), MNT_WAIT, vfs_context_thread(ctx), 0);
break;
-
+ case NFS_FSCTL_DESTROY_CRED:
+ if (!auth_is_kerberized(mp->nm_auth))
+ return (ENOTSUP);
+ error = nfs_gss_clnt_ctx_remove(mp, vfs_context_ucred(ctx));
+ break;
+ case NFS_FSCTL_SET_CRED:
+ if (!auth_is_kerberized(mp->nm_auth))
+ return (ENOTSUP);
+ NFS_DBG(NFS_FAC_GSS, 7, "Enter NFS_FSCTL_SET_CRED (proc %d) data = %p\n", vfs_context_is64bit(ctx), (void *)ap->a_data);
+ if (vfs_context_is64bit(ctx)) {
+ gprinc = *(struct user_nfs_gss_principal *)ap->a_data;
+ } else {
+ struct nfs_gss_principal *tp;
+ tp = (struct nfs_gss_principal *)ap->a_data;
+ gprinc.princlen = tp->princlen;
+ gprinc.nametype = tp->nametype;
+ gprinc.principal = CAST_USER_ADDR_T(tp->principal);
+ }
+ if (gprinc.princlen > MAXPATHLEN)
+ return (EINVAL);
+ NFS_DBG(NFS_FAC_GSS, 7, "Received principal length %d name type = %d\n", gprinc.princlen, gprinc.nametype);
+ uint8_t *p;
+ MALLOC(p, uint8_t *, gprinc.princlen+1, M_TEMP, M_WAITOK|M_ZERO);
+ if (p == NULL)
+ return (ENOMEM);
+ error = copyin(gprinc.principal, p, gprinc.princlen);
+ if (error) {
+ NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %d: %d\n",
+ gprinc.princlen, error);
+ FREE(p, M_TEMP);
+ return (error);
+ }
+ NFS_DBG(NFS_FAC_GSS, 7, "Seting credential to principal %s\n", p);
+ error = nfs_gss_clnt_ctx_set_principal(mp, ctx, p, gprinc.princlen, gprinc.nametype);
+ NFS_DBG(NFS_FAC_GSS, 7, "Seting credential to principal %s returned %d\n", p, error);
+ FREE(p, M_TEMP);
+ break;
+ case NFS_FSCTL_GET_CRED:
+ if (!auth_is_kerberized(mp->nm_auth))
+ return (ENOTSUP);
+ error = nfs_gss_clnt_ctx_get_principal(mp, ctx, &gprinc);
+ if (error)
+ break;
+ if (vfs_context_is64bit(ctx)) {
+ struct user_nfs_gss_principal *upp = (struct user_nfs_gss_principal *)ap->a_data;
+ len = upp->princlen;
+ if (gprinc.princlen < len)
+ len = gprinc.princlen;
+ upp->princlen = gprinc.princlen;
+ upp->nametype = gprinc.nametype;
+ upp->flags = gprinc.flags;
+ if (gprinc.principal)
+ error = copyout((void *)gprinc.principal, upp->principal, len);
+ else
+ upp->principal = USER_ADDR_NULL;
+ } else {
+ struct nfs_gss_principal *u32pp = (struct nfs_gss_principal *)ap->a_data;
+ len = u32pp->princlen;
+ if (gprinc.princlen < len)
+ len = gprinc.princlen;
+ u32pp->princlen = gprinc.princlen;
+ u32pp->nametype = gprinc.nametype;
+ u32pp->flags = gprinc.flags;
+ if (gprinc.principal)
+ error = copyout((void *)gprinc.principal, u32pp->principal, len);
+ else
+ u32pp->principal = (user32_addr_t)0;
+ }
+ if (error) {
+ NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %d: %d\n",
+ gprinc.princlen, error);
+ }
+ FREE(gprinc.principal, M_TEMP);
}
return (error);
&uio_buf, sizeof(uio_buf));
nmp = VTONMP(vp);
- if (!nmp) {
+ if (nfs_mount_gone(nmp)) {
if (!nofreeupl)
ubc_upl_abort_range(pl, pl_offset, size,
UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
* erroneous.
*/
char nfs_pageouterrorhandler(int);
-enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, RETRYWITHSLEEP, SEVER};
+enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, SEVER};
#define NFS_ELAST 88
static u_char errorcount[NFS_ELAST+1]; /* better be zeros when initialized */
static const char errortooutcome[NFS_ELAST+1] = {
case RETRY:
abortflags = UPL_ABORT_FREE_ON_EMPTY;
break;
- case RETRYWITHSLEEP:
- abortflags = UPL_ABORT_FREE_ON_EMPTY;
- /* pri unused. PSOCK for placeholder. */
- tsleep(&lbolt, PSOCK, "nfspageout", 0);
- break;
case SEVER: /* not implemented */
default:
NP(np, "nfs_pageout: action %d not expected", action);
vnode_t vp = ap->a_vp;
struct nfsmount *nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
biosize = nmp->nm_biosize;
vnode_t vp = ap->a_vp;
struct nfsmount *nmp = VTONMP(vp);
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
biosize = nmp->nm_biosize;
struct nfsmount *nmp = VTONMP(ap->a_vp);
int error = 0;
- if (!nmp)
+ if (nfs_mount_gone(nmp))
return (ENXIO);
/* make sure that the vnode's monitoring status is up to date */