/*
- * Copyright (c) 2002-2014 Apple Inc. All rights reserved.
+ * Copyright (c) 2002-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*-
* from BSDI nfs_lock.c,v 2.4 1998/12/14 23:49:56 jch Exp
*/
+#include <nfs/nfs_conf.h>
+#if CONFIG_NFS_CLIENT
+
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/fcntl.h>
-#include <sys/kernel.h> /* for hz */
+#include <sys/kernel.h> /* for hz */
#include <sys/file_internal.h>
#include <sys/malloc.h>
-#include <sys/lockf.h> /* for hz */ /* Must come after sys/malloc.h */
+#include <sys/lockf.h> /* for hz */ /* Must come after sys/malloc.h */
#include <sys/kpi_mbuf.h>
#include <sys/mount_internal.h>
-#include <sys/proc_internal.h> /* for p_start */
+#include <sys/proc_internal.h> /* for p_start */
#include <sys/kauth.h>
#include <sys/resourcevar.h>
#include <sys/socket.h>
* kept sorted by transaction ID (xid).
*/
static uint64_t nfs_lockxid = 0;
-static LOCKD_MSG_QUEUE nfs_pendlockq;
+static LOCKD_MSG_QUEUE nfs_pendlockq = TAILQ_HEAD_INITIALIZER(nfs_pendlockq);
/* list of mounts that are (potentially) making lockd requests */
-TAILQ_HEAD(nfs_lockd_mount_list,nfsmount) nfs_lockd_mount_list;
+TAILQ_HEAD(nfs_lockd_mount_list, nfsmount) nfs_lockd_mount_list =
+ TAILQ_HEAD_INITIALIZER(nfs_lockd_mount_list);
-static lck_grp_t *nfs_lock_lck_grp;
-static lck_mtx_t *nfs_lock_mutex;
+static LCK_GRP_DECLARE(nfs_lock_lck_grp, "nfs_lock");
+static LCK_MTX_DECLARE(nfs_lock_mutex, &nfs_lock_lck_grp);
void nfs_lockdmsg_enqueue(LOCKD_MSG_REQUEST *);
void nfs_lockdmsg_dequeue(LOCKD_MSG_REQUEST *);
uint64_t nfs_lockxid_get(void);
int nfs_lockd_send_request(LOCKD_MSG *, int);
-/*
- * initialize global nfs lock state
- */
-void
-nfs_lockinit(void)
-{
- TAILQ_INIT(&nfs_pendlockq);
- TAILQ_INIT(&nfs_lockd_mount_list);
-
- nfs_lock_lck_grp = lck_grp_alloc_init("nfs_lock", LCK_GRP_ATTR_NULL);
- nfs_lock_mutex = lck_mtx_alloc_init(nfs_lock_lck_grp, LCK_ATTR_NULL);
-}
-
/*
* Register a mount as (potentially) making lockd requests.
*/
void
nfs_lockd_mount_register(struct nfsmount *nmp)
{
- lck_mtx_lock(nfs_lock_mutex);
+ lck_mtx_lock(&nfs_lock_mutex);
TAILQ_INSERT_HEAD(&nfs_lockd_mount_list, nmp, nm_ldlink);
nfs_lockd_mounts++;
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
}
/*
mach_port_t lockd_port = IPC_PORT_NULL;
kern_return_t kr;
- lck_mtx_lock(nfs_lock_mutex);
+ lck_mtx_lock(&nfs_lock_mutex);
if (nmp->nm_ldlink.tqe_next == NFSNOLIST) {
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
return;
}
-
+
TAILQ_REMOVE(&nfs_lockd_mount_list, nmp, nm_ldlink);
nmp->nm_ldlink.tqe_next = NFSNOLIST;
/* send a shutdown request if there are no more lockd mounts */
send_shutdown = ((nfs_lockd_mounts == 0) && nfs_lockd_request_sent);
- if (send_shutdown)
+ if (send_shutdown) {
nfs_lockd_request_sent = 0;
+ }
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
- if (!send_shutdown)
+ if (!send_shutdown) {
return;
+ }
/*
* Let lockd know that it is no longer needed for any NFS mounts
kr = host_get_lockd_port(host_priv_self(), &lockd_port);
if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(lockd_port)) {
printf("nfs_lockd_mount_change: shutdown couldn't get port, kr %d, port %s\n",
- kr, (lockd_port == IPC_PORT_NULL) ? "NULL" :
- (lockd_port == IPC_PORT_DEAD) ? "DEAD" : "VALID");
+ kr, (lockd_port == IPC_PORT_NULL) ? "NULL" :
+ (lockd_port == IPC_PORT_DEAD) ? "DEAD" : "VALID");
return;
}
kr = lockd_shutdown(lockd_port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
printf("nfs_lockd_mount_change: shutdown %d\n", kr);
+ }
ipc_port_release_send(lockd_port);
}
LOCKD_MSG_REQUEST *mr;
TAILQ_FOREACH(mr, &nfs_pendlockq, lmr_next) {
- if (mr->lmr_msg.lm_xid == lockxid)
+ if (mr->lmr_msg.lm_xid == lockxid) {
return mr;
- if (mr->lmr_msg.lm_xid > lockxid)
+ }
+ if (mr->lmr_msg.lm_xid > lockxid) {
return NULL;
+ }
}
return mr;
}
int
nfs_lockdmsg_compare_to_answer(LOCKD_MSG_REQUEST *msgreq, struct lockd_ans *ansp)
{
- if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO))
+ if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO)) {
return 1;
- if (msgreq->lmr_msg.lm_fl.l_pid != ansp->la_pid)
+ }
+ if (msgreq->lmr_msg.lm_fl.l_pid != ansp->la_pid) {
return 1;
- if (msgreq->lmr_msg.lm_fl.l_start != ansp->la_start)
+ }
+ if (msgreq->lmr_msg.lm_fl.l_start != ansp->la_start) {
return 1;
- if (msgreq->lmr_msg.lm_fl.l_len != ansp->la_len)
+ }
+ if (msgreq->lmr_msg.lm_fl.l_len != ansp->la_len) {
return 1;
- if (msgreq->lmr_msg.lm_fh_len != ansp->la_fh_len)
+ }
+ if (msgreq->lmr_msg.lm_fh_len != ansp->la_fh_len) {
return 1;
- if (bcmp(msgreq->lmr_msg.lm_fh, ansp->la_fh, ansp->la_fh_len))
+ }
+ if (bcmp(msgreq->lmr_msg.lm_fh, ansp->la_fh, ansp->la_fh_len)) {
return 1;
+ }
return 0;
}
{
LOCKD_MSG_REQUEST *mr;
- if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO))
+ if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO)) {
return NULL;
+ }
TAILQ_FOREACH(mr, &nfs_pendlockq, lmr_next) {
- if (!nfs_lockdmsg_compare_to_answer(mr, ansp))
+ if (!nfs_lockdmsg_compare_to_answer(mr, ansp)) {
break;
+ }
}
return mr;
}
/* make sure we get a unique xid */
do {
/* Skip zero xid if it should ever happen. */
- if (++nfs_lockxid == 0)
+ if (++nfs_lockxid == 0) {
nfs_lockxid++;
+ }
if (!(mr = TAILQ_LAST(&nfs_pendlockq, nfs_lock_msg_queue)) ||
- (mr->lmr_msg.lm_xid < nfs_lockxid)) {
+ (mr->lmr_msg.lm_xid < nfs_lockxid)) {
/* fast path: empty queue or new largest xid */
break;
}
mach_port_t lockd_port = IPC_PORT_NULL;
kr = host_get_lockd_port(host_priv_self(), &lockd_port);
- if (kr != KERN_SUCCESS || !IPC_PORT_VALID(lockd_port))
- return (ENOTSUP);
+ if (kr != KERN_SUCCESS || !IPC_PORT_VALID(lockd_port)) {
+ return ENOTSUP;
+ }
do {
/* In the kernel all mach messaging is interruptable */
(uint32_t *)&msg->lm_cred,
msg->lm_fh_len,
msg->lm_fh);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
printf("lockd_request received %d!\n", kr);
+ }
} while (!interruptable && kr == MACH_SEND_INTERRUPTED);
} while (kr == MIG_SERVER_DIED && retries++ < MACH_MAX_TRIES);
ipc_port_release_send(lockd_port);
switch (kr) {
- case MACH_SEND_INTERRUPTED:
- return (EINTR);
+ case MACH_SEND_INTERRUPTED:
+ return EINTR;
default:
/*
* Other MACH or MIG errors we will retry. Eventually
- * we will call nfs_down and allow the user to disable
+ * we will call nfs_down and allow the user to disable
* locking.
*/
- return (EAGAIN);
+ return EAGAIN;
}
- return (kr);
}
-
/*
* NFS advisory byte-level locks (client)
int interruptable, slpflag;
struct nfsmount *nmp;
struct timeval now;
- int timeo, starttime, endtime, lastmsg, wentdown = 0;
+ int timeo, wentdown = 0;
+ long starttime, endtime, lastmsg;
struct timespec ts;
struct sockaddr *saddr;
nmp = NFSTONMP(np);
- if (!nmp || !nmp->nm_saddr)
- return (ENXIO);
+ if (!nmp || !nmp->nm_saddr) {
+ return ENXIO;
+ }
lck_mtx_lock(&nmp->nm_lock);
saddr = nmp->nm_saddr;
bcopy(saddr, &msg->lm_addr, min(sizeof msg->lm_addr, saddr->sa_len));
- if (nmp->nm_vers == NFS_VER3)
+ if (nmp->nm_vers == NFS_VER3) {
msg->lm_flags |= LOCKD_MSG_NFSV3;
+ }
- if (nmp->nm_sotype != SOCK_DGRAM)
+ if (nmp->nm_sotype != SOCK_DGRAM) {
msg->lm_flags |= LOCKD_MSG_TCP;
+ }
microuptime(&now);
starttime = now.tv_sec;
interruptable = NMFLAG(nmp, INTR);
lck_mtx_unlock(&nmp->nm_lock);
- lck_mtx_lock(nfs_lock_mutex);
+ lck_mtx_lock(&nfs_lock_mutex);
/* allocate unique xid */
msg->lm_xid = nfs_lockxid_get();
nfs_lockd_request_sent = 1;
/* need to drop nfs_lock_mutex while calling nfs_lockd_send_request() */
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
error = nfs_lockd_send_request(msg, interruptable);
- lck_mtx_lock(nfs_lock_mutex);
- if (error && error != EAGAIN)
+ lck_mtx_lock(&nfs_lock_mutex);
+ if (error && error != EAGAIN) {
break;
+ }
/*
* Always wait for an answer. Not waiting for unlocks could
while (now.tv_sec < endtime) {
error = error2 = 0;
if (!msgreq->lmr_answered) {
- error = msleep(msgreq, nfs_lock_mutex, slpflag | PUSER, "lockd", &ts);
+ error = msleep(msgreq, &nfs_lock_mutex, slpflag | PUSER, "lockd", &ts);
slpflag = 0;
}
if (msgreq->lmr_answered) {
}
break;
}
- if (error != EWOULDBLOCK)
+ if (error != EWOULDBLOCK) {
break;
+ }
/* check that we still have our mount... */
/* ...and that we still support locks */
/* ...and that there isn't a recovery pending */
nmp = NFSTONMP(np);
if ((error2 = nfs_sigintr(nmp, NULL, NULL, 0))) {
error = error2;
- if (type == F_UNLCK)
+ if (type == F_UNLCK) {
printf("nfs3_lockd_request: aborting unlock request, error %d\n", error);
+ }
break;
}
lck_mtx_lock(&nmp->nm_lock);
if ((error2 = nfs_sigintr(nmp, NULL, NULL, 0))) {
error = error2;
if (error2 != EINTR) {
- if (type == F_UNLCK)
+ if (type == F_UNLCK) {
printf("nfs3_lockd_request: aborting unlock request, error %d\n", error);
+ }
break;
}
}
/* ...and that we still support locks */
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) {
- if (error == EWOULDBLOCK)
+ if (error == EWOULDBLOCK) {
error = ENOTSUP;
+ }
lck_mtx_unlock(&nmp->nm_lock);
break;
}
((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
lck_mtx_unlock(&nmp->nm_lock);
lastmsg = now.tv_sec;
- nfs_down(nmp, thd, 0, NFSSTA_LOCKTIMEO, "lockd not responding", 0);
+ nfs_down(nmp, thd, 0, NFSSTA_LOCKTIMEO, "lockd not responding", 1);
wentdown = 1;
- } else
+ } else {
lck_mtx_unlock(&nmp->nm_lock);
+ }
if (msgreq->lmr_errno == EINPROGRESS) {
/*
/*
* We timed out, so we will resend the request.
*/
- if (!(flags & R_RECOVER))
+ if (!(flags & R_RECOVER)) {
timeo *= 2;
- if (timeo > 30)
+ }
+ if (timeo > 30) {
timeo = 30;
+ }
/* resend request */
continue;
}
/* we got a reponse, so the server's lockd is OK */
nfs_up(NFSTONMP(np), thd, NFSSTA_LOCKTIMEO,
- wentdown ? "lockd alive again" : NULL);
+ wentdown ? "lockd alive again" : NULL);
wentdown = 0;
if (msgreq->lmr_answered && (msg->lm_flags & LOCKD_MSG_DENIED_GRACE)) {
* higher levels can resend the request.
*/
msg->lm_flags &= ~LOCKD_MSG_CANCEL;
- nfs_lockdmsg_dequeue(msgreq);
error = NFSERR_DENIED;
+ /* Will dequeue msgreq after the following break at the end of this routine */
break;
}
* for this mount.
*/
nfs_lockdmsg_dequeue(msgreq);
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
lck_mtx_lock(&nmp->nm_lock);
if (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED) {
nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
lck_mtx_unlock(&nmp->nm_lock);
printf("lockd returned ENOTSUP, disabling locks for nfs server: %s\n",
- vfs_statfs(nmp->nm_mountp)->f_mntfromname);
- return (error);
+ vfs_statfs(nmp->nm_mountp)->f_mntfromname);
+ return error;
}
if (!error) {
/* record that NFS file locking has worked on this mount */
if (nmp) {
lck_mtx_lock(&nmp->nm_lock);
- if (!(nmp->nm_state & NFSSTA_LOCKSWORK))
+ if (!(nmp->nm_state & NFSSTA_LOCKSWORK)) {
nmp->nm_state |= NFSSTA_LOCKSWORK;
+ }
lck_mtx_unlock(&nmp->nm_lock);
}
}
nfs_lockdmsg_dequeue(msgreq);
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
- return (error);
+ return error;
}
/*
LOCKD_MSG *msg;
nmp = NFSTONMP(np);
- if (nfs_mount_gone(nmp))
- return (ENXIO);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
if (!nlop->nlo_open_owner) {
nfs_open_owner_ref(nofp->nof_owner);
nlop->nlo_open_owner = nofp->nof_owner;
}
- if ((error = nfs_lock_owner_set_busy(nlop, thd)))
- return (error);
+ if ((error = nfs_lock_owner_set_busy(nlop, thd))) {
+ return error;
+ }
/* set up lock message request structure */
bzero(&msgreq, sizeof(msgreq));
msg = &msgreq.lmr_msg;
msg->lm_version = LOCKD_MSG_VERSION;
- if ((nflp->nfl_flags & NFS_FILE_LOCK_WAIT) && !reclaim)
+ if ((nflp->nfl_flags & NFS_FILE_LOCK_WAIT) && !reclaim) {
msg->lm_flags |= LOCKD_MSG_BLOCK;
- if (reclaim)
+ }
+ if (reclaim) {
msg->lm_flags |= LOCKD_MSG_RECLAIM;
+ }
msg->lm_fh_len = (nmp->nm_vers == NFS_VER2) ? NFSX_V2FH : np->n_fhsize;
bcopy(np->n_fhp, msg->lm_fh, msg->lm_fh_len);
cru2x(cred, &msg->lm_cred);
error = nfs3_lockd_request(np, 0, &msgreq, flags, thd);
nfs_lock_owner_clear_busy(nlop);
- return (error);
+ return error;
}
/*
LOCKD_MSG *msg;
nmp = NFSTONMP(np);
- if (!nmp)
- return (ENXIO);
+ if (!nmp) {
+ return ENXIO;
+ }
/* set up lock message request structure */
bzero(&msgreq, sizeof(msgreq));
msg->lm_fl.l_type = F_UNLCK;
msg->lm_fl.l_pid = nlop->nlo_pid;
- return (nfs3_lockd_request(np, F_UNLCK, &msgreq, flags, thd));
+ return nfs3_lockd_request(np, F_UNLCK, &msgreq, flags, thd);
}
/*
LOCKD_MSG *msg;
nmp = NFSTONMP(np);
- if (nfs_mount_gone(nmp))
- return (ENXIO);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
/* set up lock message request structure */
bzero(&msgreq, sizeof(msgreq));
fl->l_start = msg->lm_fl.l_start;
fl->l_len = msg->lm_fl.l_len;
fl->l_whence = SEEK_SET;
- } else
+ } else {
fl->l_type = F_UNLCK;
+ }
}
- return (error);
+ return error;
}
/*
/* Let root make this call. */
error = proc_suser(p);
- if (error)
- return (error);
+ if (error) {
+ return error;
+ }
/* the version should match, or we're out of sync */
- if (ansp->la_version != LOCKD_ANS_VERSION)
- return (EINVAL);
+ if (ansp->la_version != LOCKD_ANS_VERSION) {
+ return EINVAL;
+ }
- lck_mtx_lock(nfs_lock_mutex);
+ lck_mtx_lock(&nfs_lock_mutex);
/* try to find the lockd message by transaction id (cookie) */
msgreq = nfs_lockdmsg_find_by_xid(ansp->la_xid);
* If no message was found or it doesn't match the answer,
* we look for the lockd message by the answer's lock info.
*/
- if (!msgreq || nfs_lockdmsg_compare_to_answer(msgreq, ansp))
+ if (!msgreq || nfs_lockdmsg_compare_to_answer(msgreq, ansp)) {
msgreq = nfs_lockdmsg_find_by_answer(ansp);
+ }
/*
* We need to make sure this request isn't being cancelled
* If it is, we don't want to accept the granted message.
*/
- if (msgreq && (msgreq->lmr_msg.lm_flags & LOCKD_MSG_CANCEL))
+ if (msgreq && (msgreq->lmr_msg.lm_flags & LOCKD_MSG_CANCEL)) {
msgreq = NULL;
+ }
}
if (!msgreq) {
- lck_mtx_unlock(nfs_lock_mutex);
- return (EPIPE);
+ lck_mtx_unlock(&nfs_lock_mutex);
+ return EPIPE;
}
msgreq->lmr_errno = ansp->la_errno;
if ((msgreq->lmr_msg.lm_flags & LOCKD_MSG_TEST) && msgreq->lmr_errno == 0) {
if (ansp->la_flags & LOCKD_ANS_LOCK_INFO) {
- if (ansp->la_flags & LOCKD_ANS_LOCK_EXCL)
+ if (ansp->la_flags & LOCKD_ANS_LOCK_EXCL) {
msgreq->lmr_msg.lm_fl.l_type = F_WRLCK;
- else
+ } else {
msgreq->lmr_msg.lm_fl.l_type = F_RDLCK;
+ }
msgreq->lmr_msg.lm_fl.l_pid = ansp->la_pid;
msgreq->lmr_msg.lm_fl.l_start = ansp->la_start;
msgreq->lmr_msg.lm_fl.l_len = ansp->la_len;
msgreq->lmr_msg.lm_fl.l_type = F_UNLCK;
}
}
- if (ansp->la_flags & LOCKD_ANS_DENIED_GRACE)
+ if (ansp->la_flags & LOCKD_ANS_DENIED_GRACE) {
msgreq->lmr_msg.lm_flags |= LOCKD_MSG_DENIED_GRACE;
+ }
msgreq->lmr_answered = 1;
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
wakeup(msgreq);
- return (0);
+ return 0;
}
/*
/* Let root make this call. */
error = proc_suser(p);
- if (error)
- return (error);
+ if (error) {
+ return error;
+ }
headsize = (char*)&ln.ln_addr[0] - (char*)&ln.ln_version;
error = copyin(argp, &ln, headsize);
- if (error)
- return (error);
- if (ln.ln_version != LOCKD_NOTIFY_VERSION)
- return (EINVAL);
- if ((ln.ln_addrcount < 1) || (ln.ln_addrcount > 128))
- return (EINVAL);
+ if (error) {
+ return error;
+ }
+ if (ln.ln_version != LOCKD_NOTIFY_VERSION) {
+ return EINVAL;
+ }
+ if ((ln.ln_addrcount < 1) || (ln.ln_addrcount > 128)) {
+ return EINVAL;
+ }
argp += headsize;
saddr = (struct sockaddr *)&ln.ln_addr[0];
- lck_mtx_lock(nfs_lock_mutex);
+ lck_mtx_lock(&nfs_lock_mutex);
- for (i=0; i < ln.ln_addrcount; i++) {
+ for (i = 0; i < ln.ln_addrcount; i++) {
error = copyin(argp, &ln.ln_addr[0], sizeof(ln.ln_addr[0]));
- if (error)
+ if (error) {
break;
+ }
argp += sizeof(ln.ln_addr[0]);
/* scan lockd mount list for match to this address */
TAILQ_FOREACH(nmp, &nfs_lockd_mount_list, nm_ldlink) {
/* check if address matches this mount's server address */
- if (!nmp->nm_saddr || nfs_sockaddr_cmp(saddr, nmp->nm_saddr))
+ if (!nmp->nm_saddr || nfs_sockaddr_cmp(saddr, nmp->nm_saddr)) {
continue;
+ }
/* We have a match! Mark it as needing recovery. */
lck_mtx_lock(&nmp->nm_lock);
nfs_need_recover(nmp, 0);
}
}
- lck_mtx_unlock(nfs_lock_mutex);
+ lck_mtx_unlock(&nfs_lock_mutex);
- return (error);
+ return error;
}
+#endif /* CONFIG_NFS_CLIENT */