2 * Copyright (c) 2002-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. Berkeley Software Design Inc's name may not be used to endorse or
40 * promote products derived from this software without specific prior
43 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * from BSDI nfs_lock.c,v 2.4 1998/12/14 23:49:56 jch Exp
58 #include <nfs/nfs_conf.h>
61 #include <sys/cdefs.h>
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h> /* for hz */
66 #include <sys/file_internal.h>
67 #include <sys/malloc.h>
68 #include <sys/lockf.h> /* for hz */ /* Must come after sys/malloc.h */
69 #include <sys/kpi_mbuf.h>
70 #include <sys/mount_internal.h>
71 #include <sys/proc_internal.h> /* for p_start */
72 #include <sys/kauth.h>
73 #include <sys/resourcevar.h>
74 #include <sys/socket.h>
75 #include <sys/unistd.h>
77 #include <sys/vnode_internal.h>
79 #include <kern/thread.h>
80 #include <kern/host.h>
82 #include <machine/limits.h>
86 #include <nfs/rpcv2.h>
87 #include <nfs/nfsproto.h>
89 #include <nfs/nfs_gss.h>
90 #include <nfs/nfsmount.h>
91 #include <nfs/nfsnode.h>
92 #include <nfs/nfs_lock.h>
94 #include <mach/host_priv.h>
95 #include <mach/mig_errors.h>
96 #include <mach/host_special_ports.h>
97 #include <lockd/lockd_mach.h>
99 extern void ipc_port_release_send(ipc_port_t
);
102 * pending lock request messages are kept in this queue which is
103 * kept sorted by transaction ID (xid).
105 static uint64_t nfs_lockxid
= 0;
106 static LOCKD_MSG_QUEUE nfs_pendlockq
;
108 /* list of mounts that are (potentially) making lockd requests */
109 TAILQ_HEAD(nfs_lockd_mount_list
, nfsmount
) nfs_lockd_mount_list
;
111 static lck_grp_t
*nfs_lock_lck_grp
;
112 static lck_mtx_t
*nfs_lock_mutex
;
114 void nfs_lockdmsg_enqueue(LOCKD_MSG_REQUEST
*);
115 void nfs_lockdmsg_dequeue(LOCKD_MSG_REQUEST
*);
116 int nfs_lockdmsg_compare_to_answer(LOCKD_MSG_REQUEST
*, struct lockd_ans
*);
117 LOCKD_MSG_REQUEST
*nfs_lockdmsg_find_by_answer(struct lockd_ans
*);
118 LOCKD_MSG_REQUEST
*nfs_lockdmsg_find_by_xid(uint64_t);
119 uint64_t nfs_lockxid_get(void);
120 int nfs_lockd_send_request(LOCKD_MSG
*, int);
123 * initialize global nfs lock state
128 TAILQ_INIT(&nfs_pendlockq
);
129 TAILQ_INIT(&nfs_lockd_mount_list
);
131 nfs_lock_lck_grp
= lck_grp_alloc_init("nfs_lock", LCK_GRP_ATTR_NULL
);
132 nfs_lock_mutex
= lck_mtx_alloc_init(nfs_lock_lck_grp
, LCK_ATTR_NULL
);
136 * Register a mount as (potentially) making lockd requests.
139 nfs_lockd_mount_register(struct nfsmount
*nmp
)
141 lck_mtx_lock(nfs_lock_mutex
);
142 TAILQ_INSERT_HEAD(&nfs_lockd_mount_list
, nmp
, nm_ldlink
);
144 lck_mtx_unlock(nfs_lock_mutex
);
148 * Unregister a mount as (potentially) making lockd requests.
150 * When the lockd mount count drops to zero, then send a shutdown request to
151 * lockd if we've sent any requests to it.
154 nfs_lockd_mount_unregister(struct nfsmount
*nmp
)
157 mach_port_t lockd_port
= IPC_PORT_NULL
;
160 lck_mtx_lock(nfs_lock_mutex
);
161 if (nmp
->nm_ldlink
.tqe_next
== NFSNOLIST
) {
162 lck_mtx_unlock(nfs_lock_mutex
);
166 TAILQ_REMOVE(&nfs_lockd_mount_list
, nmp
, nm_ldlink
);
167 nmp
->nm_ldlink
.tqe_next
= NFSNOLIST
;
171 /* send a shutdown request if there are no more lockd mounts */
172 send_shutdown
= ((nfs_lockd_mounts
== 0) && nfs_lockd_request_sent
);
174 nfs_lockd_request_sent
= 0;
177 lck_mtx_unlock(nfs_lock_mutex
);
179 if (!send_shutdown
) {
184 * Let lockd know that it is no longer needed for any NFS mounts
186 kr
= host_get_lockd_port(host_priv_self(), &lockd_port
);
187 if ((kr
!= KERN_SUCCESS
) || !IPC_PORT_VALID(lockd_port
)) {
188 printf("nfs_lockd_mount_change: shutdown couldn't get port, kr %d, port %s\n",
189 kr
, (lockd_port
== IPC_PORT_NULL
) ? "NULL" :
190 (lockd_port
== IPC_PORT_DEAD
) ? "DEAD" : "VALID");
194 kr
= lockd_shutdown(lockd_port
);
195 if (kr
!= KERN_SUCCESS
) {
196 printf("nfs_lockd_mount_change: shutdown %d\n", kr
);
199 ipc_port_release_send(lockd_port
);
203 * insert a lock request message into the pending queue
204 * (nfs_lock_mutex must be held)
207 nfs_lockdmsg_enqueue(LOCKD_MSG_REQUEST
*msgreq
)
209 LOCKD_MSG_REQUEST
*mr
;
211 mr
= TAILQ_LAST(&nfs_pendlockq
, nfs_lock_msg_queue
);
212 if (!mr
|| (msgreq
->lmr_msg
.lm_xid
> mr
->lmr_msg
.lm_xid
)) {
213 /* fast path: empty queue or new largest xid */
214 TAILQ_INSERT_TAIL(&nfs_pendlockq
, msgreq
, lmr_next
);
217 /* slow path: need to walk list to find insertion point */
218 while (mr
&& (msgreq
->lmr_msg
.lm_xid
> mr
->lmr_msg
.lm_xid
)) {
219 mr
= TAILQ_PREV(mr
, nfs_lock_msg_queue
, lmr_next
);
222 TAILQ_INSERT_AFTER(&nfs_pendlockq
, mr
, msgreq
, lmr_next
);
224 TAILQ_INSERT_HEAD(&nfs_pendlockq
, msgreq
, lmr_next
);
229 * remove a lock request message from the pending queue
230 * (nfs_lock_mutex must be held)
233 nfs_lockdmsg_dequeue(LOCKD_MSG_REQUEST
*msgreq
)
235 TAILQ_REMOVE(&nfs_pendlockq
, msgreq
, lmr_next
);
239 * find a pending lock request message by xid
241 * We search from the head of the list assuming that the message we're
242 * looking for is for an older request (because we have an answer to it).
243 * This assumes that lock request will be answered primarily in FIFO order.
244 * However, this may not be the case if there are blocked requests. We may
245 * want to move blocked requests to a separate queue (but that'll complicate
246 * duplicate xid checking).
248 * (nfs_lock_mutex must be held)
251 nfs_lockdmsg_find_by_xid(uint64_t lockxid
)
253 LOCKD_MSG_REQUEST
*mr
;
255 TAILQ_FOREACH(mr
, &nfs_pendlockq
, lmr_next
) {
256 if (mr
->lmr_msg
.lm_xid
== lockxid
) {
259 if (mr
->lmr_msg
.lm_xid
> lockxid
) {
267 * Because we can't depend on nlm_granted messages containing the same
268 * cookie we sent with the original lock request, we need code to test
269 * if an nlm_granted answer matches the lock request. We also need code
270 * that can find a lockd message based solely on the nlm_granted answer.
274 * compare lockd message to answer
276 * returns 0 on equality and 1 if different
279 nfs_lockdmsg_compare_to_answer(LOCKD_MSG_REQUEST
*msgreq
, struct lockd_ans
*ansp
)
281 if (!(ansp
->la_flags
& LOCKD_ANS_LOCK_INFO
)) {
284 if (msgreq
->lmr_msg
.lm_fl
.l_pid
!= ansp
->la_pid
) {
287 if (msgreq
->lmr_msg
.lm_fl
.l_start
!= ansp
->la_start
) {
290 if (msgreq
->lmr_msg
.lm_fl
.l_len
!= ansp
->la_len
) {
293 if (msgreq
->lmr_msg
.lm_fh_len
!= ansp
->la_fh_len
) {
296 if (bcmp(msgreq
->lmr_msg
.lm_fh
, ansp
->la_fh
, ansp
->la_fh_len
)) {
303 * find a pending lock request message based on the lock info provided
304 * in the lockd_ans/nlm_granted data. We need this because we can't
305 * depend on nlm_granted messages containing the same cookie we sent
306 * with the original lock request.
308 * We search from the head of the list assuming that the message we're
309 * looking for is for an older request (because we have an answer to it).
310 * This assumes that lock request will be answered primarily in FIFO order.
311 * However, this may not be the case if there are blocked requests. We may
312 * want to move blocked requests to a separate queue (but that'll complicate
313 * duplicate xid checking).
315 * (nfs_lock_mutex must be held)
318 nfs_lockdmsg_find_by_answer(struct lockd_ans
*ansp
)
320 LOCKD_MSG_REQUEST
*mr
;
322 if (!(ansp
->la_flags
& LOCKD_ANS_LOCK_INFO
)) {
325 TAILQ_FOREACH(mr
, &nfs_pendlockq
, lmr_next
) {
326 if (!nfs_lockdmsg_compare_to_answer(mr
, ansp
)) {
334 * return the next unique lock request transaction ID
335 * (nfs_lock_mutex must be held)
338 nfs_lockxid_get(void)
340 LOCKD_MSG_REQUEST
*mr
;
342 /* derive initial lock xid from system time */
345 * Note: it's OK if this code inits nfs_lockxid to 0 (for example,
346 * due to a broken clock) because we immediately increment it
347 * and we guarantee to never use xid 0. So, nfs_lockxid should only
348 * ever be 0 the first time this function is called.
352 nfs_lockxid
= (uint64_t)tv
.tv_sec
<< 12;
355 /* make sure we get a unique xid */
357 /* Skip zero xid if it should ever happen. */
358 if (++nfs_lockxid
== 0) {
361 if (!(mr
= TAILQ_LAST(&nfs_pendlockq
, nfs_lock_msg_queue
)) ||
362 (mr
->lmr_msg
.lm_xid
< nfs_lockxid
)) {
363 /* fast path: empty queue or new largest xid */
366 /* check if xid is already in use */
367 } while (nfs_lockdmsg_find_by_xid(nfs_lockxid
));
372 #define MACH_MAX_TRIES 3
375 nfs_lockd_send_request(LOCKD_MSG
*msg
, int interruptable
)
379 mach_port_t lockd_port
= IPC_PORT_NULL
;
381 kr
= host_get_lockd_port(host_priv_self(), &lockd_port
);
382 if (kr
!= KERN_SUCCESS
|| !IPC_PORT_VALID(lockd_port
)) {
387 /* In the kernel all mach messaging is interruptable */
399 (uint32_t *)&msg
->lm_addr
,
400 (uint32_t *)&msg
->lm_cred
,
403 if (kr
!= KERN_SUCCESS
) {
404 printf("lockd_request received %d!\n", kr
);
406 } while (!interruptable
&& kr
== MACH_SEND_INTERRUPTED
);
407 } while (kr
== MIG_SERVER_DIED
&& retries
++ < MACH_MAX_TRIES
);
409 ipc_port_release_send(lockd_port
);
411 case MACH_SEND_INTERRUPTED
:
415 * Other MACH or MIG errors we will retry. Eventually
416 * we will call nfs_down and allow the user to disable
424 * NFS advisory byte-level locks (client)
430 LOCKD_MSG_REQUEST
*msgreq
,
434 LOCKD_MSG
*msg
= &msgreq
->lmr_msg
;
436 int interruptable
, slpflag
;
437 struct nfsmount
*nmp
;
439 int timeo
, wentdown
= 0;
440 long starttime
, endtime
, lastmsg
;
442 struct sockaddr
*saddr
;
445 if (!nmp
|| !nmp
->nm_saddr
) {
449 lck_mtx_lock(&nmp
->nm_lock
);
450 saddr
= nmp
->nm_saddr
;
451 bcopy(saddr
, &msg
->lm_addr
, min(sizeof msg
->lm_addr
, saddr
->sa_len
));
452 if (nmp
->nm_vers
== NFS_VER3
) {
453 msg
->lm_flags
|= LOCKD_MSG_NFSV3
;
456 if (nmp
->nm_sotype
!= SOCK_DGRAM
) {
457 msg
->lm_flags
|= LOCKD_MSG_TCP
;
461 starttime
= now
.tv_sec
;
462 lastmsg
= now
.tv_sec
- ((nmp
->nm_tprintf_delay
) - (nmp
->nm_tprintf_initial_delay
));
463 interruptable
= NMFLAG(nmp
, INTR
);
464 lck_mtx_unlock(&nmp
->nm_lock
);
466 lck_mtx_lock(nfs_lock_mutex
);
468 /* allocate unique xid */
469 msg
->lm_xid
= nfs_lockxid_get();
470 nfs_lockdmsg_enqueue(msgreq
);
475 nfs_lockd_request_sent
= 1;
477 /* need to drop nfs_lock_mutex while calling nfs_lockd_send_request() */
478 lck_mtx_unlock(nfs_lock_mutex
);
479 error
= nfs_lockd_send_request(msg
, interruptable
);
480 lck_mtx_lock(nfs_lock_mutex
);
481 if (error
&& error
!= EAGAIN
) {
486 * Always wait for an answer. Not waiting for unlocks could
487 * cause a lock to be left if the unlock request gets dropped.
491 * Retry if it takes too long to get a response.
493 * The timeout numbers were picked out of thin air... they start
494 * at 4 and double each timeout with a max of 30 seconds.
496 * In order to maintain responsiveness, we pass a small timeout
497 * to msleep and calculate the timeouts ourselves. This allows
498 * us to pick up on mount changes quicker.
502 slpflag
= (interruptable
&& (type
!= F_UNLCK
)) ? PCATCH
: 0;
506 endtime
= now
.tv_sec
+ timeo
;
507 while (now
.tv_sec
< endtime
) {
509 if (!msgreq
->lmr_answered
) {
510 error
= msleep(msgreq
, nfs_lock_mutex
, slpflag
| PUSER
, "lockd", &ts
);
513 if (msgreq
->lmr_answered
) {
515 * Note: it's possible to have a lock granted at
516 * essentially the same time that we get interrupted.
517 * Since the lock may be granted, we can't return an
518 * error from this request or we might not unlock the
519 * lock that's been granted.
522 if ((msgreq
->lmr_errno
== ENOTSUP
) && nmp
&&
523 (nmp
->nm_state
& NFSSTA_LOCKSWORK
)) {
525 * We have evidence that locks work, yet lockd
526 * returned ENOTSUP. This is probably because
527 * it was unable to contact the server's lockd
528 * to send it the request.
530 * Because we know locks work, we'll consider
531 * this failure to be a timeout.
539 if (error
!= EWOULDBLOCK
) {
542 /* check that we still have our mount... */
543 /* ...and that we still support locks */
544 /* ...and that there isn't a recovery pending */
546 if ((error2
= nfs_sigintr(nmp
, NULL
, NULL
, 0))) {
548 if (type
== F_UNLCK
) {
549 printf("nfs3_lockd_request: aborting unlock request, error %d\n", error
);
553 lck_mtx_lock(&nmp
->nm_lock
);
554 if (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
) {
555 lck_mtx_unlock(&nmp
->nm_lock
);
558 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && !(flags
& R_RECOVER
)) {
559 /* recovery pending... return an error that'll get this operation restarted */
560 error
= NFSERR_GRACE
;
561 lck_mtx_unlock(&nmp
->nm_lock
);
564 interruptable
= NMFLAG(nmp
, INTR
);
565 lck_mtx_unlock(&nmp
->nm_lock
);
569 /* check that we still have our mount... */
571 if ((error2
= nfs_sigintr(nmp
, NULL
, NULL
, 0))) {
573 if (error2
!= EINTR
) {
574 if (type
== F_UNLCK
) {
575 printf("nfs3_lockd_request: aborting unlock request, error %d\n", error
);
580 /* ...and that we still support locks */
581 lck_mtx_lock(&nmp
->nm_lock
);
582 if (nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
) {
583 if (error
== EWOULDBLOCK
) {
586 lck_mtx_unlock(&nmp
->nm_lock
);
589 /* ...and that there isn't a recovery pending */
590 if ((error
== EWOULDBLOCK
) && (nmp
->nm_state
& NFSSTA_RECOVER
) && !(flags
& R_RECOVER
)) {
591 /* recovery pending... return to allow recovery to occur */
592 error
= NFSERR_DENIED
;
593 lck_mtx_unlock(&nmp
->nm_lock
);
596 interruptable
= NMFLAG(nmp
, INTR
);
597 if ((error
!= EWOULDBLOCK
) ||
598 ((nmp
->nm_state
& NFSSTA_RECOVER
) && !(flags
& R_RECOVER
)) ||
599 ((flags
& R_RECOVER
) && ((now
.tv_sec
- starttime
) > 30))) {
600 if ((error
== EWOULDBLOCK
) && (flags
& R_RECOVER
)) {
601 /* give up if this is for recovery and taking too long */
603 } else if ((nmp
->nm_state
& NFSSTA_RECOVER
) && !(flags
& R_RECOVER
)) {
604 /* recovery pending... return an error that'll get this operation restarted */
605 error
= NFSERR_GRACE
;
607 lck_mtx_unlock(&nmp
->nm_lock
);
609 * We're going to bail on this request.
610 * If we were a blocked lock request, send a cancel.
612 if ((msgreq
->lmr_errno
== EINPROGRESS
) &&
613 !(msg
->lm_flags
& LOCKD_MSG_CANCEL
)) {
614 /* set this request up as a cancel */
615 msg
->lm_flags
|= LOCKD_MSG_CANCEL
;
616 nfs_lockdmsg_dequeue(msgreq
);
617 msg
->lm_xid
= nfs_lockxid_get();
618 nfs_lockdmsg_enqueue(msgreq
);
619 msgreq
->lmr_saved_errno
= error
;
620 msgreq
->lmr_errno
= 0;
621 msgreq
->lmr_answered
= 0;
624 /* send cancel request */
630 /* warn if we're not getting any response */
632 if ((msgreq
->lmr_errno
!= EINPROGRESS
) &&
633 !(msg
->lm_flags
& LOCKD_MSG_DENIED_GRACE
) &&
634 (nmp
->nm_tprintf_initial_delay
!= 0) &&
635 ((lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
)) {
636 lck_mtx_unlock(&nmp
->nm_lock
);
637 lastmsg
= now
.tv_sec
;
638 nfs_down(nmp
, thd
, 0, NFSSTA_LOCKTIMEO
, "lockd not responding", 1);
641 lck_mtx_unlock(&nmp
->nm_lock
);
644 if (msgreq
->lmr_errno
== EINPROGRESS
) {
646 * We've got a blocked lock request that we are
647 * going to retry. First, we'll want to try to
648 * send a cancel for the previous request.
650 * Clear errno so if we don't get a response
651 * to the resend we'll call nfs_down().
652 * Also reset timeout because we'll expect a
653 * quick response to the cancel/resend (even if
654 * it is NLM_BLOCKED).
656 msg
->lm_flags
|= LOCKD_MSG_CANCEL
;
657 nfs_lockdmsg_dequeue(msgreq
);
658 msg
->lm_xid
= nfs_lockxid_get();
659 nfs_lockdmsg_enqueue(msgreq
);
660 msgreq
->lmr_saved_errno
= msgreq
->lmr_errno
;
661 msgreq
->lmr_errno
= 0;
662 msgreq
->lmr_answered
= 0;
664 /* send cancel then resend request */
669 * We timed out, so we will resend the request.
671 if (!(flags
& R_RECOVER
)) {
681 /* we got a reponse, so the server's lockd is OK */
682 nfs_up(NFSTONMP(np
), thd
, NFSSTA_LOCKTIMEO
,
683 wentdown
? "lockd alive again" : NULL
);
686 if (msgreq
->lmr_answered
&& (msg
->lm_flags
& LOCKD_MSG_DENIED_GRACE
)) {
688 * The lock request was denied because the server lockd is
689 * still in its grace period. So, we need to try the
690 * request again in a little bit. Return the GRACE error so
691 * the higher levels can perform the retry.
693 msgreq
->lmr_saved_errno
= msgreq
->lmr_errno
= error
= NFSERR_GRACE
;
696 if (msgreq
->lmr_errno
== EINPROGRESS
) {
697 /* got NLM_BLOCKED response */
698 /* need to wait for NLM_GRANTED */
700 msgreq
->lmr_answered
= 0;
701 goto wait_for_granted
;
704 if ((msg
->lm_flags
& LOCKD_MSG_CANCEL
) &&
705 (msgreq
->lmr_saved_errno
== EINPROGRESS
)) {
707 * We just got a successful reply to the
708 * cancel of the previous blocked lock request.
709 * Now, go ahead and return a DENIED error so the
710 * higher levels can resend the request.
712 msg
->lm_flags
&= ~LOCKD_MSG_CANCEL
;
713 error
= NFSERR_DENIED
;
714 /* Will dequeue msgreq after the following break at the end of this routine */
719 * If the blocked lock request was cancelled.
720 * Restore the error condition from when we
721 * originally bailed on the request.
723 if (msg
->lm_flags
& LOCKD_MSG_CANCEL
) {
724 msg
->lm_flags
&= ~LOCKD_MSG_CANCEL
;
725 error
= msgreq
->lmr_saved_errno
;
727 error
= msgreq
->lmr_errno
;
731 if ((error
== ENOTSUP
) && nmp
&& !(nmp
->nm_state
& NFSSTA_LOCKSWORK
)) {
733 * We have NO evidence that locks work and lockd
734 * returned ENOTSUP. Let's take this as a hint
735 * that locks aren't supported and disable them
738 nfs_lockdmsg_dequeue(msgreq
);
739 lck_mtx_unlock(nfs_lock_mutex
);
740 lck_mtx_lock(&nmp
->nm_lock
);
741 if (nmp
->nm_lockmode
== NFS_LOCK_MODE_ENABLED
) {
742 nmp
->nm_lockmode
= NFS_LOCK_MODE_DISABLED
;
743 nfs_lockd_mount_unregister(nmp
);
745 nmp
->nm_state
&= ~NFSSTA_LOCKTIMEO
;
746 lck_mtx_unlock(&nmp
->nm_lock
);
747 printf("lockd returned ENOTSUP, disabling locks for nfs server: %s\n",
748 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
752 /* record that NFS file locking has worked on this mount */
754 lck_mtx_lock(&nmp
->nm_lock
);
755 if (!(nmp
->nm_state
& NFSSTA_LOCKSWORK
)) {
756 nmp
->nm_state
|= NFSSTA_LOCKSWORK
;
758 lck_mtx_unlock(&nmp
->nm_lock
);
764 nfs_lockdmsg_dequeue(msgreq
);
766 lck_mtx_unlock(nfs_lock_mutex
);
772 * Send an NLM LOCK message to the server
777 struct nfs_open_file
*nofp
,
778 struct nfs_file_lock
*nflp
,
784 struct nfs_lock_owner
*nlop
= nflp
->nfl_owner
;
785 struct nfsmount
*nmp
;
787 LOCKD_MSG_REQUEST msgreq
;
791 if (nfs_mount_gone(nmp
)) {
795 if (!nlop
->nlo_open_owner
) {
796 nfs_open_owner_ref(nofp
->nof_owner
);
797 nlop
->nlo_open_owner
= nofp
->nof_owner
;
799 if ((error
= nfs_lock_owner_set_busy(nlop
, thd
))) {
803 /* set up lock message request structure */
804 bzero(&msgreq
, sizeof(msgreq
));
805 msg
= &msgreq
.lmr_msg
;
806 msg
->lm_version
= LOCKD_MSG_VERSION
;
807 if ((nflp
->nfl_flags
& NFS_FILE_LOCK_WAIT
) && !reclaim
) {
808 msg
->lm_flags
|= LOCKD_MSG_BLOCK
;
811 msg
->lm_flags
|= LOCKD_MSG_RECLAIM
;
813 msg
->lm_fh_len
= (nmp
->nm_vers
== NFS_VER2
) ? NFSX_V2FH
: np
->n_fhsize
;
814 bcopy(np
->n_fhp
, msg
->lm_fh
, msg
->lm_fh_len
);
815 cru2x(cred
, &msg
->lm_cred
);
817 msg
->lm_fl
.l_whence
= SEEK_SET
;
818 msg
->lm_fl
.l_start
= nflp
->nfl_start
;
819 msg
->lm_fl
.l_len
= NFS_FLOCK_LENGTH(nflp
->nfl_start
, nflp
->nfl_end
);
820 msg
->lm_fl
.l_type
= nflp
->nfl_type
;
821 msg
->lm_fl
.l_pid
= nlop
->nlo_pid
;
823 error
= nfs3_lockd_request(np
, 0, &msgreq
, flags
, thd
);
825 nfs_lock_owner_clear_busy(nlop
);
830 * Send an NLM UNLOCK message to the server
835 struct nfs_lock_owner
*nlop
,
843 struct nfsmount
*nmp
;
844 LOCKD_MSG_REQUEST msgreq
;
852 /* set up lock message request structure */
853 bzero(&msgreq
, sizeof(msgreq
));
854 msg
= &msgreq
.lmr_msg
;
855 msg
->lm_version
= LOCKD_MSG_VERSION
;
856 msg
->lm_fh_len
= (nmp
->nm_vers
== NFS_VER2
) ? NFSX_V2FH
: np
->n_fhsize
;
857 bcopy(np
->n_fhp
, msg
->lm_fh
, msg
->lm_fh_len
);
858 cru2x(cred
, &msg
->lm_cred
);
860 msg
->lm_fl
.l_whence
= SEEK_SET
;
861 msg
->lm_fl
.l_start
= start
;
862 msg
->lm_fl
.l_len
= NFS_FLOCK_LENGTH(start
, end
);
863 msg
->lm_fl
.l_type
= F_UNLCK
;
864 msg
->lm_fl
.l_pid
= nlop
->nlo_pid
;
866 return nfs3_lockd_request(np
, F_UNLCK
, &msgreq
, flags
, thd
);
870 * Send an NLM LOCK TEST message to the server
875 struct nfs_lock_owner
*nlop
,
881 struct nfsmount
*nmp
;
883 LOCKD_MSG_REQUEST msgreq
;
887 if (nfs_mount_gone(nmp
)) {
891 /* set up lock message request structure */
892 bzero(&msgreq
, sizeof(msgreq
));
893 msg
= &msgreq
.lmr_msg
;
894 msg
->lm_version
= LOCKD_MSG_VERSION
;
895 msg
->lm_flags
|= LOCKD_MSG_TEST
;
896 msg
->lm_fh_len
= (nmp
->nm_vers
== NFS_VER2
) ? NFSX_V2FH
: np
->n_fhsize
;
897 bcopy(np
->n_fhp
, msg
->lm_fh
, msg
->lm_fh_len
);
898 cru2x(vfs_context_ucred(ctx
), &msg
->lm_cred
);
900 msg
->lm_fl
.l_whence
= SEEK_SET
;
901 msg
->lm_fl
.l_start
= start
;
902 msg
->lm_fl
.l_len
= NFS_FLOCK_LENGTH(start
, end
);
903 msg
->lm_fl
.l_type
= fl
->l_type
;
904 msg
->lm_fl
.l_pid
= nlop
->nlo_pid
;
906 error
= nfs3_lockd_request(np
, 0, &msgreq
, 0, vfs_context_thread(ctx
));
908 if (!error
&& (msg
->lm_flags
& LOCKD_MSG_TEST
) && !msgreq
.lmr_errno
) {
909 if (msg
->lm_fl
.l_type
!= F_UNLCK
) {
910 fl
->l_type
= msg
->lm_fl
.l_type
;
911 fl
->l_pid
= msg
->lm_fl
.l_pid
;
912 fl
->l_start
= msg
->lm_fl
.l_start
;
913 fl
->l_len
= msg
->lm_fl
.l_len
;
914 fl
->l_whence
= SEEK_SET
;
916 fl
->l_type
= F_UNLCK
;
925 * NFS advisory byte-level locks answer from the lock daemon.
928 nfslockdans(proc_t p
, struct lockd_ans
*ansp
)
930 LOCKD_MSG_REQUEST
*msgreq
;
933 /* Let root make this call. */
934 error
= proc_suser(p
);
939 /* the version should match, or we're out of sync */
940 if (ansp
->la_version
!= LOCKD_ANS_VERSION
) {
944 lck_mtx_lock(nfs_lock_mutex
);
946 /* try to find the lockd message by transaction id (cookie) */
947 msgreq
= nfs_lockdmsg_find_by_xid(ansp
->la_xid
);
948 if (ansp
->la_flags
& LOCKD_ANS_GRANTED
) {
950 * We can't depend on the granted message having our cookie,
951 * so we check the answer against the lockd message found.
952 * If no message was found or it doesn't match the answer,
953 * we look for the lockd message by the answer's lock info.
955 if (!msgreq
|| nfs_lockdmsg_compare_to_answer(msgreq
, ansp
)) {
956 msgreq
= nfs_lockdmsg_find_by_answer(ansp
);
959 * We need to make sure this request isn't being cancelled
960 * If it is, we don't want to accept the granted message.
962 if (msgreq
&& (msgreq
->lmr_msg
.lm_flags
& LOCKD_MSG_CANCEL
)) {
967 lck_mtx_unlock(nfs_lock_mutex
);
971 msgreq
->lmr_errno
= ansp
->la_errno
;
972 if ((msgreq
->lmr_msg
.lm_flags
& LOCKD_MSG_TEST
) && msgreq
->lmr_errno
== 0) {
973 if (ansp
->la_flags
& LOCKD_ANS_LOCK_INFO
) {
974 if (ansp
->la_flags
& LOCKD_ANS_LOCK_EXCL
) {
975 msgreq
->lmr_msg
.lm_fl
.l_type
= F_WRLCK
;
977 msgreq
->lmr_msg
.lm_fl
.l_type
= F_RDLCK
;
979 msgreq
->lmr_msg
.lm_fl
.l_pid
= ansp
->la_pid
;
980 msgreq
->lmr_msg
.lm_fl
.l_start
= ansp
->la_start
;
981 msgreq
->lmr_msg
.lm_fl
.l_len
= ansp
->la_len
;
983 msgreq
->lmr_msg
.lm_fl
.l_type
= F_UNLCK
;
986 if (ansp
->la_flags
& LOCKD_ANS_DENIED_GRACE
) {
987 msgreq
->lmr_msg
.lm_flags
|= LOCKD_MSG_DENIED_GRACE
;
990 msgreq
->lmr_answered
= 1;
991 lck_mtx_unlock(nfs_lock_mutex
);
999 * NFS host restart notification from the lock daemon.
1001 * Used to initiate reclaiming of held locks when a server we
1002 * have mounted reboots.
1005 nfslockdnotify(proc_t p
, user_addr_t argp
)
1007 int error
, i
, headsize
;
1008 struct lockd_notify ln
;
1009 struct nfsmount
*nmp
;
1010 struct sockaddr
*saddr
;
1012 /* Let root make this call. */
1013 error
= proc_suser(p
);
1018 headsize
= (char*)&ln
.ln_addr
[0] - (char*)&ln
.ln_version
;
1019 error
= copyin(argp
, &ln
, headsize
);
1023 if (ln
.ln_version
!= LOCKD_NOTIFY_VERSION
) {
1026 if ((ln
.ln_addrcount
< 1) || (ln
.ln_addrcount
> 128)) {
1030 saddr
= (struct sockaddr
*)&ln
.ln_addr
[0];
1032 lck_mtx_lock(nfs_lock_mutex
);
1034 for (i
= 0; i
< ln
.ln_addrcount
; i
++) {
1035 error
= copyin(argp
, &ln
.ln_addr
[0], sizeof(ln
.ln_addr
[0]));
1039 argp
+= sizeof(ln
.ln_addr
[0]);
1040 /* scan lockd mount list for match to this address */
1041 TAILQ_FOREACH(nmp
, &nfs_lockd_mount_list
, nm_ldlink
) {
1042 /* check if address matches this mount's server address */
1043 if (!nmp
->nm_saddr
|| nfs_sockaddr_cmp(saddr
, nmp
->nm_saddr
)) {
1046 /* We have a match! Mark it as needing recovery. */
1047 lck_mtx_lock(&nmp
->nm_lock
);
1048 nfs_need_recover(nmp
, 0);
1049 lck_mtx_unlock(&nmp
->nm_lock
);
1053 lck_mtx_unlock(nfs_lock_mutex
);
1058 #endif /* CONFIG_NFS_CLIENT */