+out:
+ if (nofp) {
+ nfs_open_file_clear_busy(nofp);
+ }
+ if (nfs_mount_state_in_use_end(nmp, error)) {
+ nofp = NULL;
+ goto restart;
+ }
+ if (!error) {
+ error = error1;
+ }
+ if (error) {
+ NP(np, "nfs_vnop_close: error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
+ }
+ if (noop) {
+ nfs_open_owner_rele(noop);
+ }
+ return error;
+}
+
+/*
+ * nfs_close(): common function that does all the heavy lifting of file closure
+ *
+ * Takes an open file structure and a set of access/deny modes and figures out how
+ * to update the open file structure (and the state on the server) appropriately.
+ */
+int
+nfs_close(
+ nfsnode_t np,
+ struct nfs_open_file *nofp,
+ uint32_t accessMode,
+ uint32_t denyMode,
+ __unused vfs_context_t ctx)
+{
+#if CONFIG_NFS4
+ struct nfs_lock_owner *nlop;
+#endif
+ int error = 0, changed = 0, delegated = 0, closed = 0, downgrade = 0;
+ uint8_t newAccessMode, newDenyMode;
+
+ /* warn if modes don't match current state */
+ if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) {
+ NP(np, "nfs_close: mode mismatch %d %d, current %d %d, %d",
+ accessMode, denyMode, nofp->nof_access, nofp->nof_deny,
+ kauth_cred_getuid(nofp->nof_owner->noo_cred));
+ }
+
+ /*
+ * If we're closing a write-only open, we may not have a write-only count
+ * if we also grabbed read access. So, check the read-write count.
+ */
+ if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
+ if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
+ (nofp->nof_w == 0) && (nofp->nof_d_w == 0) &&
+ (nofp->nof_rw || nofp->nof_d_rw)) {
+ accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
+ }
+ } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
+ if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
+ (nofp->nof_w_dw == 0) && (nofp->nof_d_w_dw == 0) &&
+ (nofp->nof_rw_dw || nofp->nof_d_rw_dw)) {
+ accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
+ }
+ } else { /* NFS_OPEN_SHARE_DENY_BOTH */
+ if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
+ (nofp->nof_w_drw == 0) && (nofp->nof_d_w_drw == 0) &&
+ (nofp->nof_rw_drw || nofp->nof_d_rw_drw)) {
+ accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
+ }
+ }
+
+ nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
+ if ((newAccessMode != nofp->nof_access) || (newDenyMode != nofp->nof_deny)) {
+ changed = 1;
+ } else {
+ changed = 0;
+ }
+
+ if (NFSTONMP(np)->nm_vers < NFS_VER4) {
+ /* NFS v2/v3 closes simply need to remove the open. */
+ goto v3close;
+ }
+#if CONFIG_NFS4
+ if ((newAccessMode == 0) || (nofp->nof_opencnt == 1)) {
+ /*
+ * No more access after this close, so clean up and close it.
+ * Don't send a close RPC if we're closing a delegated open.
+ */
+ nfs_wait_bufs(np);
+ closed = 1;
+ if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
+ error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
+ }
+ if (error == NFSERR_LOCKS_HELD) {
+ /*
+ * Hmm... the server says we have locks we need to release first
+ * Find the lock owner and try to unlock everything.
+ */
+ nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), 0);
+ if (nlop) {
+ nfs4_unlock_rpc(np, nlop, F_WRLCK, 0, UINT64_MAX,
+ 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
+ nfs_lock_owner_rele(nlop);
+ }
+ error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
+ }
+ } else if (changed) {
+ /*
+ * File is still open but with less access, so downgrade the open.
+ * Don't send a downgrade RPC if we're closing a delegated open.
+ */
+ if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
+ downgrade = 1;
+ /*
+ * If we have delegated opens, we should probably claim them before sending
+ * the downgrade because the server may not know the open we are downgrading to.
+ */
+ if (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
+ nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
+ nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) {
+ nfs4_claim_delegated_state_for_open_file(nofp, 0);
+ }
+ /* need to remove the open before sending the downgrade */
+ nfs_open_file_remove_open(nofp, accessMode, denyMode);
+ error = nfs4_open_downgrade_rpc(np, nofp, ctx);
+ if (error) { /* Hmm.. that didn't work. Add the open back in. */
+ nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
+ }
+ }
+ }
+#endif
+v3close:
+ if (error) {
+ NP(np, "nfs_close: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
+ return error;
+ }
+
+ if (!downgrade) {
+ nfs_open_file_remove_open(nofp, accessMode, denyMode);
+ }
+
+ if (closed) {
+ lck_mtx_lock(&nofp->nof_lock);
+ if (nofp->nof_r || nofp->nof_d_r || nofp->nof_w || nofp->nof_d_w || nofp->nof_d_rw ||
+ (nofp->nof_rw && !((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && !nofp->nof_creator && (nofp->nof_rw == 1))) ||
+ nofp->nof_r_dw || nofp->nof_d_r_dw || nofp->nof_w_dw || nofp->nof_d_w_dw ||
+ nofp->nof_rw_dw || nofp->nof_d_rw_dw || nofp->nof_r_drw || nofp->nof_d_r_drw ||
+ nofp->nof_w_drw || nofp->nof_d_w_drw || nofp->nof_rw_drw || nofp->nof_d_rw_drw) {
+ NP(np, "nfs_close: unexpected count: %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u flags 0x%x, %d",
+ nofp->nof_r, nofp->nof_d_r, nofp->nof_w, nofp->nof_d_w,
+ nofp->nof_rw, nofp->nof_d_rw, nofp->nof_r_dw, nofp->nof_d_r_dw,
+ nofp->nof_w_dw, nofp->nof_d_w_dw, nofp->nof_rw_dw, nofp->nof_d_rw_dw,
+ nofp->nof_r_drw, nofp->nof_d_r_drw, nofp->nof_w_drw, nofp->nof_d_w_drw,
+ nofp->nof_rw_drw, nofp->nof_d_rw_drw, nofp->nof_flags,
+ kauth_cred_getuid(nofp->nof_owner->noo_cred));
+ }
+ /* clear out all open info, just to be safe */
+ nofp->nof_access = nofp->nof_deny = 0;
+ nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
+ nofp->nof_r = nofp->nof_d_r = 0;
+ nofp->nof_w = nofp->nof_d_w = 0;
+ nofp->nof_rw = nofp->nof_d_rw = 0;
+ nofp->nof_r_dw = nofp->nof_d_r_dw = 0;
+ nofp->nof_w_dw = nofp->nof_d_w_dw = 0;
+ nofp->nof_rw_dw = nofp->nof_d_rw_dw = 0;
+ nofp->nof_r_drw = nofp->nof_d_r_drw = 0;
+ nofp->nof_w_drw = nofp->nof_d_w_drw = 0;
+ nofp->nof_rw_drw = nofp->nof_d_rw_drw = 0;
+ nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
+ lck_mtx_unlock(&nofp->nof_lock);
+ /* XXX we may potentially want to clean up idle/unused open file structures */
+ }
+ if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
+ error = EIO;
+ NP(np, "nfs_close: LOST%s, %d", !nofp->nof_opencnt ? " (last)" : "",
+ kauth_cred_getuid(nofp->nof_owner->noo_cred));
+ }
+
+ return error;
+}
+
+
+int
+nfs3_getattr_rpc(
+ nfsnode_t np,
+ mount_t mp,
+ u_char *fhp,
+ size_t fhsize,
+ int flags,
+ vfs_context_t ctx,
+ struct nfs_vattr *nvap,
+ u_int64_t *xidp)
+{
+ struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
+ int error = 0, status = 0, nfsvers, rpcflags = 0;
+ struct nfsm_chain nmreq, nmrep;
+
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+ nfsvers = nmp->nm_vers;
+
+ if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
+ rpcflags = R_RECOVER;
+ }
+
+ if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
+ rpcflags |= R_SOFT;
+ }
+
+ nfsm_chain_null(&nmreq);
+ nfsm_chain_null(&nmrep);
+
+ nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
+ if (nfsvers != NFS_VER2) {
+ nfsm_chain_add_32(error, &nmreq, fhsize);
+ }
+ nfsm_chain_add_opaque(error, &nmreq, fhp, fhsize);
+ nfsm_chain_build_done(error, &nmreq);
+ nfsmout_if(error);
+ error = nfs_request2(np, mp, &nmreq, NFSPROC_GETATTR,
+ vfs_context_thread(ctx), vfs_context_ucred(ctx),
+ NULL, rpcflags, &nmrep, xidp, &status);
+ if (!error) {
+ error = status;
+ }
+ nfsmout_if(error);
+ error = nfs_parsefattr(nmp, &nmrep, nfsvers, nvap);
+nfsmout:
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+ return error;
+}
+
+/*
+ * nfs_refresh_fh will attempt to update the file handle for the node.
+ *
+ * It only does this for symbolic links and regular files that are not currently opened.
+ *
+ * On Success returns 0 and the nodes file handle is updated, or ESTALE on failure.
+ */
+int
+nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx)
+{
+ vnode_t dvp, vp = NFSTOV(np);
+ nfsnode_t dnp;
+ const char *v_name = vnode_getname(vp);
+ char *name;
+ int namelen, refreshed;
+ uint32_t fhsize;
+ int error, wanted = 0;
+ uint8_t *fhp;
+ struct timespec ts = {.tv_sec = 2, .tv_nsec = 0};
+
+ NFS_VNOP_DBG("vnode is %d\n", vnode_vtype(vp));
+
+ dvp = vnode_parent(vp);
+ if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VLNK) ||
+ v_name == NULL || *v_name == '\0' || dvp == NULL) {
+ if (v_name != NULL) {
+ vnode_putname(v_name);
+ }
+ return ESTALE;
+ }
+ dnp = VTONFS(dvp);
+
+ namelen = NFS_STRLEN_INT(v_name);
+ MALLOC(name, char *, namelen + 1, M_TEMP, M_WAITOK);
+ if (name == NULL) {
+ vnode_putname(v_name);
+ return ESTALE;
+ }
+ bcopy(v_name, name, namelen + 1);
+ NFS_VNOP_DBG("Trying to refresh %s : %s\n", v_name, name);
+ vnode_putname(v_name);
+
+ /* Allocate the maximum size file handle */
+ MALLOC(fhp, uint8_t *, NFS4_FHSIZE, M_FHANDLE, M_WAITOK);
+ if (fhp == NULL) {
+ FREE(name, M_TEMP);
+ return ESTALE;
+ }
+
+ if ((error = nfs_node_lock(np))) {
+ FREE(name, M_TEMP);
+ FREE(fhp, M_FHANDLE);
+ return ESTALE;
+ }
+
+ fhsize = np->n_fhsize;
+ bcopy(np->n_fhp, fhp, fhsize);
+ while (ISSET(np->n_flag, NREFRESH)) {
+ SET(np->n_flag, NREFRESHWANT);
+ NFS_VNOP_DBG("Waiting for refresh of %s\n", name);
+ msleep(np, &np->n_lock, PZERO - 1, "nfsrefreshwant", &ts);
+ if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) {
+ break;
+ }
+ }
+ refreshed = error ? 0 : !NFS_CMPFH(np, fhp, fhsize);
+ SET(np->n_flag, NREFRESH);
+ nfs_node_unlock(np);
+
+ NFS_VNOP_DBG("error = %d, refreshed = %d\n", error, refreshed);
+ if (error || refreshed) {
+ goto nfsmout;
+ }
+
+ /* Check that there are no open references for this file */
+ lck_mtx_lock(&np->n_openlock);
+ if (np->n_openrefcnt || !TAILQ_EMPTY(&np->n_opens) || !TAILQ_EMPTY(&np->n_lock_owners)) {
+ int cnt = 0;
+ struct nfs_open_file *ofp;
+
+ TAILQ_FOREACH(ofp, &np->n_opens, nof_link) {
+ cnt += ofp->nof_opencnt;
+ }
+ if (cnt) {
+ lck_mtx_unlock(&np->n_openlock);
+ NFS_VNOP_DBG("Can not refresh file handle for %s with open state\n", name);
+ NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n",
+ np->n_openrefcnt, cnt, !TAILQ_EMPTY(&np->n_lock_owners));
+ error = ESTALE;
+ goto nfsmout;
+ }
+ }
+ lck_mtx_unlock(&np->n_openlock);
+ /*
+ * Since the FH is currently stale we should not be able to
+ * establish any open state until the FH is refreshed.
+ */
+
+ error = nfs_node_lock(np);
+ nfsmout_if(error);
+ /*
+ * Symlinks should never need invalidations and are holding
+ * the one and only nfsbuf in an uncached acquired state
+ * trying to do a readlink. So we will hang if we invalidate
+ * in that case. Only in in the VREG case do we need to
+ * invalidate.
+ */
+ if (vnode_vtype(vp) == VREG) {
+ np->n_flag &= ~NNEEDINVALIDATE;
+ nfs_node_unlock(np);
+ error = nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ctx, 1);
+ if (error) {
+ NFS_VNOP_DBG("nfs_vinvalbuf returned %d\n", error);
+ }
+ nfsmout_if(error);
+ } else {
+ nfs_node_unlock(np);
+ }
+
+ NFS_VNOP_DBG("Looking up %s\n", name);
+ error = nfs_lookitup(dnp, name, namelen, ctx, &np);
+ if (error) {
+ NFS_VNOP_DBG("nfs_lookitup returned %d\n", error);
+ }
+
+nfsmout:
+ nfs_node_lock_force(np);
+ wanted = ISSET(np->n_flag, NREFRESHWANT);
+ CLR(np->n_flag, NREFRESH | NREFRESHWANT);
+ nfs_node_unlock(np);
+ if (wanted) {
+ wakeup(np);
+ }
+
+ if (error == 0) {
+ NFS_VNOP_DBG("%s refreshed file handle\n", name);
+ }
+
+ FREE(name, M_TEMP);
+ FREE(fhp, M_FHANDLE);
+
+ return error ? ESTALE : 0;
+}
+
+int
+nfs_getattr(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
+{
+ int error;
+
+retry:
+ error = nfs_getattr_internal(np, nvap, ctx, flags);
+ if (error == ESTALE) {
+ error = nfs_refresh_fh(np, ctx);
+ if (!error) {
+ goto retry;
+ }
+ }
+ return error;
+}
+
+int
+nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
+{
+ struct nfsmount *nmp;
+ int error = 0, nfsvers, inprogset = 0, wanted = 0, avoidfloods = 0;
+ struct nfs_vattr *nvattr = NULL;
+ struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
+ u_int64_t xid = 0;
+
+ FSDBG_TOP(513, np->n_size, np, np->n_vattr.nva_size, np->n_flag);
+
+ nmp = NFSTONMP(np);
+
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+ nfsvers = nmp->nm_vers;
+
+ if (!nvap) {
+ MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
+ nvap = nvattr;
+ }
+ NVATTR_INIT(nvap);
+
+ /* Update local times for special files. */