TAILQ_HEAD(nfsrv_reqcache_lru, nfsrvcache) nfsrv_reqcache_lruhead;
u_long nfsrv_reqcache_hash;
-lck_grp_t *nfsrv_reqcache_lck_grp;
-lck_mtx_t *nfsrv_reqcache_mutex;
+static LCK_GRP_DECLARE(nfsrv_reqcache_lck_grp, "nfsrv_reqcache");
+LCK_MTX_DECLARE(nfsrv_reqcache_mutex, &nfsrv_reqcache_lck_grp);
/*
* Static array that defines which nfs rpc's are nonidempotent
return;
}
- lck_mtx_lock(nfsrv_reqcache_mutex);
+ lck_mtx_lock(&nfsrv_reqcache_mutex);
/* init nfs server request cache hash table */
nfsrv_reqcache_hashtbl = hashinit(nfsrv_reqcache_size, M_NFSD, &nfsrv_reqcache_hash);
TAILQ_INIT(&nfsrv_reqcache_lruhead);
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
}
/*
if (!nd->nd_nam2) {
return RC_DOIT;
}
- lck_mtx_lock(nfsrv_reqcache_mutex);
+ lck_mtx_lock(&nfsrv_reqcache_mutex);
loop:
for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
rp = rp->rc_hash.le_next) {
netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) {
if ((rp->rc_flag & RC_LOCKED) != 0) {
rp->rc_flag |= RC_WANTED;
- msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
+ msleep(rp, &nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
goto loop;
}
rp->rc_flag |= RC_LOCKED;
rp->rc_flag &= ~RC_WANTED;
wakeup(rp);
}
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
return ret;
}
}
if (!rp) {
/* no entry to reuse? */
/* OK, we just won't be able to cache this request */
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
return RC_DOIT;
}
while ((rp->rc_flag & RC_LOCKED) != 0) {
rp->rc_flag |= RC_WANTED;
- msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
+ msleep(rp, &nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
rp = nfsrv_reqcache_lruhead.tqh_first;
}
rp->rc_flag |= RC_LOCKED;
rp->rc_flag &= ~RC_WANTED;
wakeup(rp);
}
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
return RC_DOIT;
}
if (!nd->nd_nam2) {
return;
}
- lck_mtx_lock(nfsrv_reqcache_mutex);
+ lck_mtx_lock(&nfsrv_reqcache_mutex);
loop:
for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
rp = rp->rc_hash.le_next) {
netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) {
if ((rp->rc_flag & RC_LOCKED) != 0) {
rp->rc_flag |= RC_WANTED;
- msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
+ msleep(rp, &nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL);
goto loop;
}
rp->rc_flag |= RC_LOCKED;
rp->rc_flag &= ~RC_WANTED;
wakeup(rp);
}
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
return;
}
}
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
}
/*
{
struct nfsrvcache *rp, *nextrp;
- lck_mtx_lock(nfsrv_reqcache_mutex);
+ lck_mtx_lock(&nfsrv_reqcache_mutex);
for (rp = nfsrv_reqcache_lruhead.tqh_first; rp != 0; rp = nextrp) {
nextrp = rp->rc_lru.tqe_next;
LIST_REMOVE(rp, rc_hash);
}
nfsrv_reqcache_count = 0;
FREE(nfsrv_reqcache_hashtbl, M_TEMP);
- lck_mtx_unlock(nfsrv_reqcache_mutex);
+ lck_mtx_unlock(&nfsrv_reqcache_mutex);
}
#endif /* CONFIG_NFS_SERVER */