}
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_vers == NFS_VER3) {
if (!(nmp->nm_state & NFSSTA_GOTPATHCONF)) {
}
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_vers == NFS_VER3) {
if (!(nmp->nm_state & NFSSTA_GOTPATHCONF)) {
*npp = NULL;
error = ENXIO;
FSDBG_BOT(263, mp, dnp, 0xd1e, error);
*npp = NULL;
error = ENXIO;
FSDBG_BOT(263, mp, dnp, 0xd1e, error);
for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
mp2 = (np->n_hflag & NHINIT) ? np->n_mount : NFSTOMP(np);
if (mp != mp2 || np->n_fhsize != fhsize ||
for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
mp2 = (np->n_hflag & NHINIT) ? np->n_mount : NFSTOMP(np);
if (mp != mp2 || np->n_fhsize != fhsize ||
if (nvap && (nvap->nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) &&
cnp && (cnp->cn_namelen > (fhsize - (int)sizeof(dnp)))) {
/* The name was too long to fit in the file handle. Check it against the node's name. */
int namecmp = 0;
const char *vname = vnode_getname(NFSTOV(np));
if (vname) {
if (nvap && (nvap->nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) &&
cnp && (cnp->cn_namelen > (fhsize - (int)sizeof(dnp)))) {
/* The name was too long to fit in the file handle. Check it against the node's name. */
int namecmp = 0;
const char *vname = vnode_getname(NFSTOV(np));
if (vname) {
}
FSDBG(263, dnp, np, np->n_flag, 0xcace0000);
/* if the node is locked, sleep on it */
}
FSDBG(263, dnp, np, np->n_flag, 0xcace0000);
/* if the node is locked, sleep on it */
* changed identity, no need to wait.
*/
FSDBG_BOT(263, dnp, *npp, 0xcace0d1e, error);
* changed identity, no need to wait.
*/
FSDBG_BOT(263, dnp, *npp, 0xcace0d1e, error);
if (flags & NG_NOCREATE) {
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0xcaced1e0, ENOENT);
if (flags & NG_NOCREATE) {
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0xcaced1e0, ENOENT);
error = nfs_loadattrcache(np, nvap, xidp, 0);
error = nfs_loadattrcache(np, nvap, xidp, 0);
*
* This is clearly not perfect due to races, but this is
* as good as its going to get. You can defeat the
*
* This is clearly not perfect due to races, but this is
* as good as its going to get. You can defeat the
- if (vp->v_name && cnp->cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cnp->cn_namelen))
+ if (vp->v_name && (size_t)cnp->cn_namelen != strnlen(vp->v_name, MAXPATHLEN)) {
+ update_flags |= VNODE_UPDATE_NAME;
+ }
+ if (vp->v_name && cnp->cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cnp->cn_namelen)) {
- vp->v_name, cnp->cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags);
+ vp->v_name, cnp->cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags);
vnode_update_identity(vp, NFSTOV(dnp), cnp->cn_nameptr, cnp->cn_namelen, 0, update_flags);
}
}
vnode_update_identity(vp, NFSTOV(dnp), cnp->cn_nameptr, cnp->cn_namelen, 0, update_flags);
}
}
*npp = np;
}
FSDBG_BOT(263, dnp, *npp, 0xcace0000, error);
*npp = np;
}
FSDBG_BOT(263, dnp, *npp, 0xcace0000, error);
}
FSDBG(263, mp, dnp, npp, 0xaaaaaaaa);
}
FSDBG(263, mp, dnp, npp, 0xaaaaaaaa);
lck_mtx_unlock(nfs_node_hash_mutex);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOENT);
lck_mtx_unlock(nfs_node_hash_mutex);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOENT);
lck_mtx_unlock(nfs_node_hash_mutex);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOMEM);
lck_mtx_unlock(nfs_node_hash_mutex);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOMEM);
/* ugh... need to keep track of ".zfs" directories to workaround server bugs */
if ((nvap->nva_type == VDIR) && cnp && (cnp->cn_namelen == 4) &&
(cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == 'z') &&
/* ugh... need to keep track of ".zfs" directories to workaround server bugs */
if ((nvap->nva_type == VDIR) && cnp && (cnp->cn_namelen == 4) &&
(cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == 'z') &&
if (dnp && cnp && ((cnp->cn_namelen != 2) ||
(cnp->cn_nameptr[0] != '.') || (cnp->cn_nameptr[1] != '.'))) {
vnode_t dvp = NFSTOV(dnp);
if (!vnode_get(dvp)) {
if (dnp && cnp && ((cnp->cn_namelen != 2) ||
(cnp->cn_nameptr[0] != '.') || (cnp->cn_nameptr[1] != '.'))) {
vnode_t dvp = NFSTOV(dnp);
if (!vnode_get(dvp)) {
/* setup node's file handle */
if (fhsize > NFS_SMALLFH) {
MALLOC_ZONE(np->n_fhp, u_char *,
/* setup node's file handle */
if (fhsize > NFS_SMALLFH) {
MALLOC_ZONE(np->n_fhp, u_char *,
if (!np->n_fhp) {
lck_mtx_unlock(nfs_node_hash_mutex);
FREE_ZONE(np, sizeof *np, M_NFSNODE);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000002, ENOMEM);
if (!np->n_fhp) {
lck_mtx_unlock(nfs_node_hash_mutex);
FREE_ZONE(np, sizeof *np, M_NFSNODE);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000002, ENOMEM);
lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
FREE_ZONE(np, sizeof *np, M_NFSNODE);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000003, error);
FREE_ZONE(np, sizeof *np, M_NFSNODE);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000003, error);
struct vnode_trigger_param vtp;
bzero(&vtp, sizeof(vtp));
bcopy(&vfsp, &vtp.vnt_params, sizeof(vfsp));
vtp.vnt_resolve_func = nfs_mirror_mount_trigger_resolve;
vtp.vnt_unresolve_func = nfs_mirror_mount_trigger_unresolve;
vtp.vnt_rearm_func = nfs_mirror_mount_trigger_rearm;
struct vnode_trigger_param vtp;
bzero(&vtp, sizeof(vtp));
bcopy(&vfsp, &vtp.vnt_params, sizeof(vfsp));
vtp.vnt_resolve_func = nfs_mirror_mount_trigger_resolve;
vtp.vnt_unresolve_func = nfs_mirror_mount_trigger_unresolve;
vtp.vnt_rearm_func = nfs_mirror_mount_trigger_rearm;
error = vnode_create(VNCREATE_TRIGGER, VNCREATE_TRIGGER_SIZE, &vtp, &np->n_vnode);
} else
#endif
{
error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &np->n_vnode);
}
error = vnode_create(VNCREATE_TRIGGER, VNCREATE_TRIGGER_SIZE, &vtp, &np->n_vnode);
} else
#endif
{
error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &np->n_vnode);
}
if (error) {
FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
nfs_node_unlock(np);
lck_mtx_lock(nfs_node_hash_mutex);
LIST_REMOVE(np, n_hash);
if (error) {
FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
nfs_node_unlock(np);
lck_mtx_lock(nfs_node_hash_mutex);
LIST_REMOVE(np, n_hash);
lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp);
lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp);
lck_mtx_destroy(&np->n_openlock, nfs_open_grp);
FREE_ZONE(np, sizeof *np, M_NFSNODE);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000004, error);
FREE_ZONE(np, sizeof *np, M_NFSNODE);
*npp = 0;
FSDBG_BOT(263, dnp, *npp, 0x80000004, error);
*npp = np;
FSDBG_BOT(263, dnp, vp, *npp, error);
*npp = np;
FSDBG_BOT(263, dnp, vp, *npp, error);
TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
lck_mtx_lock(&nofp->nof_lock);
if (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
lck_mtx_lock(&nofp->nof_lock);
if (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
* node has gone inactive without being open, we need to
* clean up (close) the open done in the create.
*/
* node has gone inactive without being open, we need to
* clean up (close) the open done in the create.
*/
if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && nofp->nof_creator && !force) {
if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
lck_mtx_unlock(&np->n_openlock);
if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && nofp->nof_creator && !force) {
if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
lck_mtx_unlock(&np->n_openlock);
NP(np, "nfs_vnop_inactive: create close error: %d", error);
nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
}
NP(np, "nfs_vnop_inactive: create close error: %d", error);
nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
}
if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
/*
* If the file is marked as needing reopen, but this was the only
if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
/*
* If the file is marked as needing reopen, but this was the only
}
error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
if (error) {
NP(np, "nfs_vnop_inactive: need close error: %d", error);
nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
}
}
error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
if (error) {
NP(np, "nfs_vnop_inactive: need close error: %d", error);
nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
}
if (!force && (nofp->nof_access || nofp->nof_deny ||
nofp->nof_mmap_access || nofp->nof_mmap_deny ||
nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
if (!force && (nofp->nof_access || nofp->nof_deny ||
nofp->nof_mmap_access || nofp->nof_mmap_deny ||
nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw ||
nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) {
NP(np, "nfs_vnop_inactive: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw ||
nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) {
NP(np, "nfs_vnop_inactive: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
- nofp->nof_access, nofp->nof_deny,
- nofp->nof_mmap_access, nofp->nof_mmap_deny,
- nofp->nof_r, nofp->nof_d_r,
- nofp->nof_w, nofp->nof_d_w,
- nofp->nof_rw, nofp->nof_d_rw,
- nofp->nof_r_dw, nofp->nof_d_r_dw,
- nofp->nof_w_dw, nofp->nof_d_w_dw,
- nofp->nof_rw_dw, nofp->nof_d_rw_dw,
- nofp->nof_r_drw, nofp->nof_d_r_drw,
- nofp->nof_w_drw, nofp->nof_d_w_drw,
- nofp->nof_rw_drw, nofp->nof_d_rw_drw);
+ nofp->nof_access, nofp->nof_deny,
+ nofp->nof_mmap_access, nofp->nof_mmap_deny,
+ nofp->nof_r, nofp->nof_d_r,
+ nofp->nof_w, nofp->nof_d_w,
+ nofp->nof_rw, nofp->nof_d_rw,
+ nofp->nof_r_dw, nofp->nof_d_r_dw,
+ nofp->nof_w_dw, nofp->nof_d_w_dw,
+ nofp->nof_rw_dw, nofp->nof_d_rw_dw,
+ nofp->nof_r_drw, nofp->nof_d_r_drw,
+ nofp->nof_w_drw, nofp->nof_d_w_drw,
+ nofp->nof_rw_drw, nofp->nof_d_rw_drw);
np->n_flag &= (NMODIFIED);
nfs_node_unlock(np);
FSDBG_BOT(264, vp, np, np->n_flag, 0);
np->n_flag &= (NMODIFIED);
nfs_node_unlock(np);
FSDBG_BOT(264, vp, np, np->n_flag, 0);
if (unhash && vnode_isinuse(vp, 0)) {
/* vnode now inuse after silly remove? */
if (unhash && vnode_isinuse(vp, 0)) {
/* vnode now inuse after silly remove? */
vnode_rele(NFSTOV(nsp->nsr_dnp));
FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ);
FSDBG_BOT(264, vp, np, np->n_flag, 0);
vnode_rele(NFSTOV(nsp->nsr_dnp));
FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ);
FSDBG_BOT(264, vp, np, np->n_flag, 0);
FSDBG_TOP(265, vp, np, np->n_flag, 0);
force = (!mp || vfs_isforce(mp) || nfs_mount_gone(nmp));
FSDBG_TOP(265, vp, np, np->n_flag, 0);
force = (!mp || vfs_isforce(mp) || nfs_mount_gone(nmp));
if (nmp && (nmp->nm_vers >= NFS_VER4)) {
/* need to drop a delegation */
if (np->n_dreturn.tqe_next != NFSNOLIST) {
if (nmp && (nmp->nm_vers >= NFS_VER4)) {
/* need to drop a delegation */
if (np->n_dreturn.tqe_next != NFSNOLIST) {
/* try to return the delegation */
np->n_openflags &= ~N_DELEG_MASK;
nfs4_delegreturn_rpc(nmp, np->n_fhp, np->n_fhsize, &np->n_dstateid,
/* try to return the delegation */
np->n_openflags &= ~N_DELEG_MASK;
nfs4_delegreturn_rpc(nmp, np->n_fhp, np->n_fhsize, &np->n_dstateid,
/* clean up file locks */
TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !force) {
NP(np, "nfs_vnop_reclaim: lock 0x%llx 0x%llx 0x%x (bc %d)",
/* clean up file locks */
TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !force) {
NP(np, "nfs_vnop_reclaim: lock 0x%llx 0x%llx 0x%x (bc %d)",
- nflp->nfl_start, nflp->nfl_end, nflp->nfl_flags, nflp->nfl_blockcnt);
+ nflp->nfl_start, nflp->nfl_end, nflp->nfl_flags, nflp->nfl_blockcnt);
nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
}
/* clean up lock owners */
TAILQ_FOREACH_SAFE(nlop, &np->n_lock_owners, nlo_link, nextnlop) {
}
/* clean up lock owners */
TAILQ_FOREACH_SAFE(nlop, &np->n_lock_owners, nlo_link, nextnlop) {
TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
nfs_lock_owner_destroy(nlop);
}
/* clean up open state */
TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
nfs_lock_owner_destroy(nlop);
}
/* clean up open state */
TAILQ_FOREACH_SAFE(nofp, &np->n_opens, nof_link, nextnofp) {
TAILQ_FOREACH_SAFE(nofp, &np->n_opens, nof_link, nextnofp) {
if (!force && (nofp->nof_access || nofp->nof_deny ||
nofp->nof_mmap_access || nofp->nof_mmap_deny ||
nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
if (!force && (nofp->nof_access || nofp->nof_deny ||
nofp->nof_mmap_access || nofp->nof_mmap_deny ||
nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw ||
nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) {
NP(np, "nfs_vnop_reclaim: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw ||
nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) {
NP(np, "nfs_vnop_reclaim: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u",
- nofp->nof_access, nofp->nof_deny,
- nofp->nof_mmap_access, nofp->nof_mmap_deny,
- nofp->nof_r, nofp->nof_d_r,
- nofp->nof_w, nofp->nof_d_w,
- nofp->nof_rw, nofp->nof_d_rw,
- nofp->nof_r_dw, nofp->nof_d_r_dw,
- nofp->nof_w_dw, nofp->nof_d_w_dw,
- nofp->nof_rw_dw, nofp->nof_d_rw_dw,
- nofp->nof_r_drw, nofp->nof_d_r_drw,
- nofp->nof_w_drw, nofp->nof_d_w_drw,
- nofp->nof_rw_drw, nofp->nof_d_rw_drw);
+ nofp->nof_access, nofp->nof_deny,
+ nofp->nof_mmap_access, nofp->nof_mmap_deny,
+ nofp->nof_r, nofp->nof_d_r,
+ nofp->nof_w, nofp->nof_d_w,
+ nofp->nof_rw, nofp->nof_d_rw,
+ nofp->nof_r_dw, nofp->nof_d_r_dw,
+ nofp->nof_w_dw, nofp->nof_d_w_dw,
+ nofp->nof_rw_dw, nofp->nof_d_rw_dw,
+ nofp->nof_r_drw, nofp->nof_d_r_drw,
+ nofp->nof_w_drw, nofp->nof_d_w_drw,
+ nofp->nof_rw_drw, nofp->nof_d_rw_drw);
+#if CONFIG_NFS4
/* try sending a close RPC if it wasn't delegated */
if (nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw ||
/* try sending a close RPC if it wasn't delegated */
if (nofp->nof_r || nofp->nof_w || nofp->nof_rw ||
nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw ||
nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
/* then remove this node from the monitored node list. */
lck_mtx_lock(&nmp->nm_lock);
while (np->n_mflag & NMMONSCANINPROG) {
/* then remove this node from the monitored node list. */
lck_mtx_lock(&nmp->nm_lock);
while (np->n_mflag & NMMONSCANINPROG) {
- msleep(&np->n_mflag, &nmp->nm_lock, PZERO-1, "nfswaitmonscan", &ts);
+ msleep(&np->n_mflag, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts);
}
if (np->n_monlink.le_next != NFSNOLIST) {
LIST_REMOVE(np, n_monlink);
}
if (np->n_monlink.le_next != NFSNOLIST) {
LIST_REMOVE(np, n_monlink);
lck_mtx_unlock(nfs_buf_mutex);
nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ap->a_context, 0);
lck_mtx_lock(nfs_node_hash_mutex);
if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
lck_mtx_unlock(nfs_buf_mutex);
nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ap->a_context, 0);
lck_mtx_lock(nfs_node_hash_mutex);
if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
vnode_rele(NFSTOV(np->n_sillyrename->nsr_dnp));
FREE_ZONE(np->n_sillyrename, sizeof(*np->n_sillyrename), M_NFSREQ);
}
vnode_rele(NFSTOV(np->n_sillyrename->nsr_dnp));
FREE_ZONE(np->n_sillyrename, sizeof(*np->n_sillyrename), M_NFSREQ);
}
FSDBG_BOT(265, vp, np, np->n_flag, 0xd1ed1e);
FREE_ZONE(np, sizeof(struct nfsnode), M_NFSNODE);
FSDBG_BOT(265, vp, np, np->n_flag, 0xd1ed1e);
FREE_ZONE(np, sizeof(struct nfsnode), M_NFSNODE);
if (!force && !(np->n_hflag && NHHASHED)) {
FSDBG_BOT(268, np, 0xdead, 0, 0);
lck_mtx_unlock(&np->n_lock);
if (!force && !(np->n_hflag && NHHASHED)) {
FSDBG_BOT(268, np, 0xdead, 0, 0);
lck_mtx_unlock(&np->n_lock);
}
FSDBG_BOT(268, np, force, 0, 0);
}
FSDBG_BOT(268, np, force, 0, 0);
while (ISSET(np->n_flag, NBUSY)) {
SET(np->n_flag, NBUSYWANT);
while (ISSET(np->n_flag, NBUSY)) {
SET(np->n_flag, NBUSYWANT);
- msleep(np, &np->n_lock, PZERO-1, "nfsbusywant", &ts);
- if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0)))
+ msleep(np, &np->n_lock, PZERO - 1, "nfsbusywant", &ts);
+ if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0))) {
- if ((error = nfs_node_set_busy(first, thd)))
- return (error);
- if (np1 == np2)
- return (error);
- if ((error = nfs_node_set_busy(second, thd)))
+ if ((error = nfs_node_set_busy(first, thd))) {
+ return error;
+ }
+ if (np1 == np2) {
+ return error;
+ }
+ if ((error = nfs_node_set_busy(second, thd))) {
}
void
nfs_node_clear_busy2(nfsnode_t np1, nfsnode_t np2)
{
nfs_node_clear_busy(np1);
}
void
nfs_node_clear_busy2(nfsnode_t np1, nfsnode_t np2)
{
nfs_node_clear_busy(np1);
nb[0] = (np3 > np4) ? np3 : np4;
nb[1] = (np3 > np4) ? np4 : np3;
for (a = b = i = lcnt = 0; i < 4; i++) {
nb[0] = (np3 > np4) ? np3 : np4;
nb[1] = (np3 > np4) ? np4 : np3;
for (a = b = i = lcnt = 0; i < 4; i++) {
nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
/* Now we can lock using list[0 - lcnt-1] */
nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
/* Now we can lock using list[0 - lcnt-1] */
if ((error = nfs_node_set_busy(list[i], thd))) {
/* Drop any locks we acquired. */
if ((error = nfs_node_set_busy(list[i], thd))) {
/* Drop any locks we acquired. */
int lcnt;
nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
int lcnt;
nfs_node_sort4(np1, np2, np3, np4, list, &lcnt);
{
FSDBG_TOP(270, np, locktype, np->n_datalockowner, 0);
if (locktype == NFS_DATA_LOCK_SHARED) {
{
FSDBG_TOP(270, np, locktype, np->n_datalockowner, 0);
if (locktype == NFS_DATA_LOCK_SHARED) {
lck_rw_lock_shared(&np->n_datalock);
} else {
lck_rw_lock_exclusive(&np->n_datalock);
np->n_datalockowner = current_thread();
lck_rw_lock_shared(&np->n_datalock);
} else {
lck_rw_lock_exclusive(&np->n_datalock);
np->n_datalockowner = current_thread();
}
FSDBG_BOT(270, np, locktype, np->n_datalockowner, 0);
}
}
FSDBG_BOT(270, np, locktype, np->n_datalockowner, 0);
}
{
int mine = (np->n_datalockowner == current_thread());
FSDBG_TOP(271, np, np->n_datalockowner, current_thread(), 0);
{
int mine = (np->n_datalockowner == current_thread());
FSDBG_TOP(271, np, np->n_datalockowner, current_thread(), 0);
FSDBG_BOT(271, np, np->n_datalockowner, current_thread(), 0);
}
FSDBG_BOT(271, np, np->n_datalockowner, current_thread(), 0);
}
}
error = nfs_node_lock(np);
if (error || !ISSET(np->n_flag, NUPDATESIZE)) {
}
error = nfs_node_lock(np);
if (error || !ISSET(np->n_flag, NUPDATESIZE)) {
FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
return;
}
FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize);
return;
}
NFS_DBG(NFS_FAC_SOCK, 7, "mount_is_dirty for %s took %lld mics for %ld slots and %ld nodes return %d\n",
NFS_DBG(NFS_FAC_SOCK, 7, "mount_is_dirty for %s took %lld mics for %ld slots and %ld nodes return %d\n",
- vfs_statfs(mp)->f_mntfromname, (uint64_t)diff.tv_sec * 1000000LL + diff.tv_usec, i, ncnt, (i <= nfsnodehash));
+ vfs_statfs(mp)->f_mntfromname, (uint64_t)diff.tv_sec * 1000000LL + diff.tv_usec, i, ncnt, (i <= nfsnodehash));