1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */
2 /* $FreeBSD: src/usr.sbin/rpc.lockd/lockd_lock.c,v 1.10 2002/03/22 19:57:09 alfred Exp $ */
5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6 * Copyright (c) 2000 Manuel Bouyer.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 #include <sys/types.h>
54 #include <sys/socket.h>
55 #include <sys/param.h>
56 #include <sys/mount.h>
58 #include <rpcsvc/sm_inter.h>
59 #include <rpcsvc/nlm_prot.h>
62 #include "lockd_lock.h"
64 #define MAXOBJECTSIZE 64
65 #define MAXBUFFERSIZE 1024
68 * SM_MAXSTRLEN is usually 1024. This means that lock requests and
69 * host name monitoring entries are *MUCH* larger than they should be
73 * A set of utilities for managing file locking
75 * XXX: All locks are in a linked list, a better structure should be used
76 * to improve search/access effeciency.
79 /* struct describing a lock */
81 LIST_ENTRY(file_lock
) nfslocklist
;
82 netobj filehandle
; /* NFS filehandle */
83 struct sockaddr
*addr
;
84 struct nlm4_holder client
; /* lock holder */
85 u_int64_t granted_cookie
;
86 char client_name
[SM_MAXSTRLEN
];
87 int nsm_status
; /* status from the remote lock manager */
88 int status
; /* lock status, see below */
89 int flags
; /* lock flags, see lockd_lock.h */
90 int blocking
; /* blocking lock or not */
91 pid_t locker
; /* pid of the child process trying to get the lock */
92 int fd
; /* file descriptor for this lock */
95 LIST_HEAD(nfslocklist_head
, file_lock
);
96 struct nfslocklist_head nfslocklist_head
= LIST_HEAD_INITIALIZER(nfslocklist_head
);
98 LIST_HEAD(blockedlocklist_head
, file_lock
);
99 struct blockedlocklist_head blockedlocklist_head
= LIST_HEAD_INITIALIZER(blockedlocklist_head
);
101 /* struct describing a share reservation */
103 LIST_ENTRY(file_share
) nfssharelist
;
104 netobj oh
; /* share holder */
105 char client_name
[SM_MAXSTRLEN
];
109 LIST_HEAD(nfssharelist_head
, file_share
);
111 /* Struct describing a file with share reservations */
113 LIST_ENTRY(sharefile
) sharefilelist
;
114 netobj filehandle
; /* Local access filehandle */
115 int fd
; /* file descriptor: remains open until no more shares */
117 struct nfssharelist_head sharelist_head
;
119 LIST_HEAD(nfssharefilelist_head
, sharefile
);
120 struct nfssharefilelist_head nfssharefilelist_head
= LIST_HEAD_INITIALIZER(nfssharefilelist_head
);
123 #define LKST_LOCKED 1 /* lock is locked */
124 /* XXX: Is this flag file specific or lock specific? */
125 #define LKST_WAITING 2 /* file is already locked by another host */
126 #define LKST_PROCESSING 3 /* child is trying to aquire the lock */
127 #define LKST_DYING 4 /* must dies when we get news from the child */
129 /* struct describing a monitored host */
131 TAILQ_ENTRY(host
) hostlst
;
132 char name
[SM_MAXSTRLEN
];
136 /* list of hosts we monitor */
137 TAILQ_HEAD(hostlst_head
, host
);
138 struct hostlst_head hostlst_head
= TAILQ_HEAD_INITIALIZER(hostlst_head
);
139 struct hostlst_head hostlst_unref
= TAILQ_HEAD_INITIALIZER(hostlst_unref
);
141 int host_expire
= 60; /* seconds */
143 u_int64_t send_granted_cookie
= 0;
146 * File monitoring handlers
147 * XXX: These might be able to be removed when kevent support
148 * is placed into the hardware lock/unlock routines. (ie.
149 * let the kernel do all the file monitoring)
152 /* Struct describing a monitored file */
154 LIST_ENTRY(monfile
) monfilelist
;
155 netobj filehandle
; /* Local access filehandle */
156 int fd
; /* file descriptor: remains open until unlock! */
161 /* List of files we monitor */
162 LIST_HEAD(monfilelist_head
, monfile
);
163 struct monfilelist_head monfilelist_head
= LIST_HEAD_INITIALIZER(monfilelist_head
);
165 static int debugdelay
= 0;
167 enum nfslock_status
{ NFS_GRANTED
= 0, NFS_GRANTED_DUPLICATE
,
168 NFS_DENIED
, NFS_DENIED_NOLOCK
,
171 enum hwlock_status
{ HW_GRANTED
= 0, HW_GRANTED_DUPLICATE
,
172 HW_DENIED
, HW_DENIED_NOLOCK
,
173 HW_STALEFH
, HW_READONLY
, HW_RESERR
};
175 enum partialfilelock_status
{ PFL_GRANTED
=0, PFL_GRANTED_DUPLICATE
, PFL_DENIED
,
176 PFL_NFSDENIED
, PFL_NFSBLOCKED
, PFL_NFSDENIED_NOLOCK
, PFL_NFSRESERR
,
177 PFL_HWDENIED
, PFL_HWBLOCKED
, PFL_HWDENIED_NOLOCK
, PFL_HWRESERR
};
179 enum LFLAGS
{LEDGE_LEFT
, LEDGE_LBOUNDARY
, LEDGE_INSIDE
, LEDGE_RBOUNDARY
, LEDGE_RIGHT
};
180 enum RFLAGS
{REDGE_LEFT
, REDGE_LBOUNDARY
, REDGE_INSIDE
, REDGE_RBOUNDARY
, REDGE_RIGHT
};
181 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */
182 enum split_status
{SPL_DISJOINT
=0, SPL_LOCK1
=1, SPL_LOCK2
=2, SPL_CONTAINED
=4, SPL_RESERR
=8};
184 enum partialfilelock_status
lock_partialfilelock(struct file_lock
*fl
);
186 int send_granted(struct file_lock
*fl
, int opcode
);
188 void sigunlock(void);
189 void destroy_lock_host(struct host
*ihp
);
190 void monitor_lock_host(const char *hostname
);
191 void unmonitor_lock_host(const char *hostname
);
193 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock
*src
,
194 const bool_t exclusive
, struct nlm4_holder
*dest
);
195 struct file_lock
* allocate_file_lock(const netobj
*lockowner
,
196 const netobj
*filehandle
);
197 void deallocate_file_lock(struct file_lock
*fl
);
198 void fill_file_lock(struct file_lock
*fl
,
199 struct sockaddr
*addr
, const bool_t exclusive
, const int32_t svid
,
200 const u_int64_t offset
, const u_int64_t len
, const char *caller_name
,
201 const int state
, const int status
, const int flags
, const int blocking
);
202 int regions_overlap(const u_int64_t start1
, const u_int64_t len1
,
203 const u_int64_t start2
, const u_int64_t len2
);;
204 enum split_status
region_compare(const u_int64_t starte
, const u_int64_t lene
,
205 const u_int64_t startu
, const u_int64_t lenu
,
206 u_int64_t
*start1
, u_int64_t
*len1
, u_int64_t
*start2
, u_int64_t
*len2
);
207 int same_netobj(const netobj
*n0
, const netobj
*n1
);
208 int same_filelock_identity(const struct file_lock
*fl0
,
209 const struct file_lock
*fl2
);
211 static void debuglog(char const *fmt
, ...);
212 void dump_static_object(const unsigned char* object
, const int sizeof_object
,
213 unsigned char* hbuff
, const int sizeof_hbuff
,
214 unsigned char* cbuff
, const int sizeof_cbuff
);
215 void dump_netobj(const struct netobj
*nobj
);
216 void dump_filelock(const struct file_lock
*fl
);
217 struct file_lock
* get_lock_matching_unlock(const struct file_lock
*fl
);
218 enum nfslock_status
test_nfslock(const struct file_lock
*fl
,
219 struct file_lock
**conflicting_fl
);
220 enum nfslock_status
lock_nfslock(struct file_lock
*fl
);
221 enum nfslock_status
delete_nfslock(struct file_lock
*fl
);
222 enum nfslock_status
unlock_nfslock(const struct file_lock
*fl
,
223 struct file_lock
**released_lock
, struct file_lock
**left_lock
,
224 struct file_lock
**right_lock
);
225 enum hwlock_status
lock_hwlock(struct file_lock
*fl
);
226 enum split_status
split_nfslock(const struct file_lock
*exist_lock
,
227 const struct file_lock
*unlock_lock
, struct file_lock
**left_lock
,
228 struct file_lock
**right_lock
);
229 void add_blockingfilelock(struct file_lock
*fl
);
230 enum hwlock_status
unlock_hwlock(const struct file_lock
*fl
);
231 enum hwlock_status
test_hwlock(const struct file_lock
*fl
,
232 struct file_lock
**conflicting_fl
);
233 void remove_blockingfilelock(struct file_lock
*fl
);
234 void clear_blockingfilelock(const char *hostname
);
235 void retry_blockingfilelocklist(netobj
*fh
);
236 enum partialfilelock_status
unlock_partialfilelock(
237 const struct file_lock
*fl
);
238 void clear_partialfilelock(const char *hostname
);
239 enum partialfilelock_status
test_partialfilelock(
240 const struct file_lock
*fl
, struct file_lock
**conflicting_fl
);
241 enum nlm_stats
do_test(struct file_lock
*fl
,
242 struct file_lock
**conflicting_fl
);
243 enum nlm_stats
do_unlock(struct file_lock
*fl
);
244 enum nlm_stats
do_lock(struct file_lock
*fl
);
245 void do_clear(const char *hostname
);
249 debuglog(char const *fmt
, ...)
253 if (debug_level
< 1) {
260 vsyslog(LOG_DEBUG
, fmt
, ap
);
265 dump_static_object(object
, size_object
, hbuff
, size_hbuff
, cbuff
, size_cbuff
)
266 const unsigned char *object
;
267 const int size_object
;
268 unsigned char *hbuff
;
269 const int size_hbuff
;
270 unsigned char *cbuff
;
271 const int size_cbuff
;
275 if (debug_level
< 2) {
279 objectsize
= size_object
;
281 if (objectsize
== 0) {
282 debuglog("object is size 0\n");
284 if (objectsize
> MAXOBJECTSIZE
) {
285 debuglog("Object of size %d being clamped"
286 "to size %d\n", objectsize
, MAXOBJECTSIZE
);
287 objectsize
= MAXOBJECTSIZE
;
291 if (size_hbuff
< objectsize
*2+1) {
292 debuglog("Hbuff not large enough."
295 for(i
=0;i
<objectsize
;i
++) {
296 sprintf(hbuff
+i
*2,"%02x",*(object
+i
));
303 if (size_cbuff
< objectsize
+1) {
304 debuglog("Cbuff not large enough."
308 for(i
=0;i
<objectsize
;i
++) {
309 if (*(object
+i
) >= 32 && *(object
+i
) <= 127) {
310 *(cbuff
+i
) = *(object
+i
);
321 dump_netobj(const struct netobj
*nobj
)
323 char hbuff
[MAXBUFFERSIZE
*2];
324 char cbuff
[MAXBUFFERSIZE
];
326 if (debug_level
< 2) {
331 debuglog("Null netobj pointer\n");
333 else if (nobj
->n_len
== 0) {
334 debuglog("Size zero netobj\n");
336 dump_static_object(nobj
->n_bytes
, nobj
->n_len
,
337 hbuff
, sizeof(hbuff
), cbuff
, sizeof(cbuff
));
338 debuglog("netobj: len: %d data: %s ::: %s\n",
339 nobj
->n_len
, hbuff
, cbuff
);
343 /* #define DUMP_FILELOCK_VERBOSE */
345 dump_filelock(const struct file_lock
*fl
)
347 #ifdef DUMP_FILELOCK_VERBOSE
348 char hbuff
[MAXBUFFERSIZE
*2];
349 char cbuff
[MAXBUFFERSIZE
];
352 if (debug_level
< 2) {
357 debuglog("Dumping file lock structure @ %p\n", fl
);
359 #ifdef DUMP_FILELOCK_VERBOSE
360 dump_static_object((unsigned char *)&fl
->filehandle
.n_bytes
,
361 fl
->filehandle
.n_len
, hbuff
, sizeof(hbuff
),
362 cbuff
, sizeof(cbuff
));
363 debuglog("Filehandle: %8s ::: %8s\n", hbuff
, cbuff
);
366 debuglog("Dumping nlm4_holder:\n"
367 "exc: %x svid: %x offset:len %llx:%llx\n",
368 fl
->client
.exclusive
, fl
->client
.svid
,
369 fl
->client
.l_offset
, fl
->client
.l_len
);
371 #ifdef DUMP_FILELOCK_VERBOSE
372 debuglog("Dumping client identity:\n");
373 dump_netobj(&fl
->client
.oh
);
375 debuglog("nsm: %d status: %d flags: %d locker: %d"
376 " fd: %d\n", fl
->nsm_status
, fl
->status
,
377 fl
->flags
, fl
->locker
, fl
->fd
);
380 debuglog("NULL file lock structure\n");
385 copy_nlm4_lock_to_nlm4_holder(src
, exclusive
, dest
)
386 const struct nlm4_lock
*src
;
387 const bool_t exclusive
;
388 struct nlm4_holder
*dest
;
391 dest
->exclusive
= exclusive
;
392 dest
->oh
.n_len
= src
->oh
.n_len
;
393 dest
->oh
.n_bytes
= src
->oh
.n_bytes
;
394 dest
->svid
= src
->svid
;
395 dest
->l_offset
= src
->l_offset
;
396 dest
->l_len
= src
->l_len
;
401 * allocate_file_lock: Create a lock with the given parameters
405 allocate_file_lock(const netobj
*lockowner
, const netobj
*filehandle
)
407 struct file_lock
*newfl
;
409 newfl
= malloc(sizeof(struct file_lock
));
413 bzero(newfl
, sizeof(newfl
));
415 newfl
->client
.oh
.n_bytes
= malloc(lockowner
->n_len
);
416 if (newfl
->client
.oh
.n_bytes
== NULL
) {
420 newfl
->client
.oh
.n_len
= lockowner
->n_len
;
421 bcopy(lockowner
->n_bytes
, newfl
->client
.oh
.n_bytes
, lockowner
->n_len
);
423 newfl
->filehandle
.n_bytes
= malloc(filehandle
->n_len
);
424 if (newfl
->filehandle
.n_bytes
== NULL
) {
425 free(newfl
->client
.oh
.n_bytes
);
429 newfl
->filehandle
.n_len
= filehandle
->n_len
;
430 bcopy(filehandle
->n_bytes
, newfl
->filehandle
.n_bytes
, filehandle
->n_len
);
436 * file_file_lock: Force creation of a valid file lock
439 fill_file_lock(struct file_lock
*fl
,
440 struct sockaddr
*addr
, const bool_t exclusive
, const int32_t svid
,
441 const u_int64_t offset
, const u_int64_t len
, const char *caller_name
,
442 const int state
, const int status
, const int flags
, const int blocking
)
446 fl
->client
.exclusive
= exclusive
;
447 fl
->client
.svid
= svid
;
448 fl
->client
.l_offset
= offset
;
449 fl
->client
.l_len
= len
;
451 strncpy(fl
->client_name
, caller_name
, SM_MAXSTRLEN
);
453 fl
->nsm_status
= state
;
456 fl
->blocking
= blocking
;
460 * deallocate_file_lock: Free all storage associated with a file lock
463 deallocate_file_lock(struct file_lock
*fl
)
465 free(fl
->client
.oh
.n_bytes
);
466 free(fl
->filehandle
.n_bytes
);
471 * regions_overlap(): This function examines the two provided regions for
475 regions_overlap(start1
, len1
, start2
, len2
)
476 const u_int64_t start1
, len1
, start2
, len2
;
478 u_int64_t d1
,d2
,d3
,d4
;
479 enum split_status result
;
481 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
482 start1
, len1
, start2
, len2
);
484 result
= region_compare(start1
, len1
, start2
, len2
,
487 debuglog("Exiting region overlap with val: %d\n",result
);
489 if (result
== SPL_DISJOINT
) {
499 * region_compare(): Examine lock regions and split appropriately
501 * XXX: Fix 64 bit overflow problems
502 * XXX: Check to make sure I got *ALL* the cases.
503 * XXX: This DESPERATELY needs a regression test.
506 region_compare(starte
, lene
, startu
, lenu
,
507 start1
, len1
, start2
, len2
)
508 const u_int64_t starte
, lene
, startu
, lenu
;
509 u_int64_t
*start1
, *len1
, *start2
, *len2
;
512 * Please pay attention to the sequential exclusions
513 * of the if statements!!!
517 enum split_status retval
;
519 retval
= SPL_DISJOINT
;
521 if (lene
== 0 && lenu
== 0) {
522 /* Examine left edge of locker */
523 if (startu
< starte
) {
525 } else if (startu
== starte
) {
526 lflags
= LEDGE_LBOUNDARY
;
528 lflags
= LEDGE_INSIDE
;
531 rflags
= REDGE_RBOUNDARY
; /* Both are infiinite */
533 if (lflags
== LEDGE_INSIDE
) {
535 *len1
= startu
- starte
;
538 if (lflags
== LEDGE_LEFT
|| lflags
== LEDGE_LBOUNDARY
) {
539 retval
= SPL_CONTAINED
;
543 } else if (lene
== 0 && lenu
!= 0) {
544 /* Established lock is infinite */
545 /* Examine left edge of unlocker */
546 if (startu
< starte
) {
548 } else if (startu
== starte
) {
549 lflags
= LEDGE_LBOUNDARY
;
550 } else if (startu
> starte
) {
551 lflags
= LEDGE_INSIDE
;
554 /* Examine right edge of unlocker */
555 if (startu
+ lenu
< starte
) {
556 /* Right edge of unlocker left of established lock */
559 } else if (startu
+ lenu
== starte
) {
560 /* Right edge of unlocker on start of established lock */
561 rflags
= REDGE_LBOUNDARY
;
563 } else { /* Infinifty is right of finity */
564 /* Right edge of unlocker inside established lock */
565 rflags
= REDGE_INSIDE
;
568 if (lflags
== LEDGE_INSIDE
) {
570 *len1
= startu
- starte
;
574 if (rflags
== REDGE_INSIDE
) {
575 /* Create right lock */
576 *start2
= startu
+lenu
;
580 } else if (lene
!= 0 && lenu
== 0) {
581 /* Unlocker is infinite */
582 /* Examine left edge of unlocker */
583 if (startu
< starte
) {
585 retval
= SPL_CONTAINED
;
587 } else if (startu
== starte
) {
588 lflags
= LEDGE_LBOUNDARY
;
589 retval
= SPL_CONTAINED
;
591 } else if ((startu
> starte
) && (startu
< starte
+ lene
- 1)) {
592 lflags
= LEDGE_INSIDE
;
593 } else if (startu
== starte
+ lene
- 1) {
594 lflags
= LEDGE_RBOUNDARY
;
595 } else { /* startu > starte + lene -1 */
596 lflags
= LEDGE_RIGHT
;
600 rflags
= REDGE_RIGHT
; /* Infinity is right of finity */
602 if (lflags
== LEDGE_INSIDE
|| lflags
== LEDGE_RBOUNDARY
) {
604 *len1
= startu
- starte
;
610 /* Both locks are finite */
612 /* Examine left edge of unlocker */
613 if (startu
< starte
) {
615 } else if (startu
== starte
) {
616 lflags
= LEDGE_LBOUNDARY
;
617 } else if ((startu
> starte
) && (startu
< starte
+ lene
- 1)) {
618 lflags
= LEDGE_INSIDE
;
619 } else if (startu
== starte
+ lene
- 1) {
620 lflags
= LEDGE_RBOUNDARY
;
621 } else { /* startu > starte + lene -1 */
622 lflags
= LEDGE_RIGHT
;
626 /* Examine right edge of unlocker */
627 if (startu
+ lenu
< starte
) {
628 /* Right edge of unlocker left of established lock */
631 } else if (startu
+ lenu
== starte
) {
632 /* Right edge of unlocker on start of established lock */
633 rflags
= REDGE_LBOUNDARY
;
635 } else if (startu
+ lenu
< starte
+ lene
) {
636 /* Right edge of unlocker inside established lock */
637 rflags
= REDGE_INSIDE
;
638 } else if (startu
+ lenu
== starte
+ lene
) {
639 /* Right edge of unlocker on right edge of established lock */
640 rflags
= REDGE_RBOUNDARY
;
641 } else { /* startu + lenu > starte + lene */
642 /* Right edge of unlocker is right of established lock */
643 rflags
= REDGE_RIGHT
;
646 if (lflags
== LEDGE_INSIDE
|| lflags
== LEDGE_RBOUNDARY
) {
647 /* Create left lock */
649 *len1
= (startu
- starte
);
653 if (rflags
== REDGE_INSIDE
) {
654 /* Create right lock */
655 *start2
= startu
+lenu
;
656 *len2
= starte
+lene
-(startu
+lenu
);
660 if ((lflags
== LEDGE_LEFT
|| lflags
== LEDGE_LBOUNDARY
) &&
661 (rflags
== REDGE_RBOUNDARY
|| rflags
== REDGE_RIGHT
)) {
662 retval
= SPL_CONTAINED
;
670 * same_netobj: Compares the apprpriate bits of a netobj for identity
673 same_netobj(const netobj
*n0
, const netobj
*n1
)
679 debuglog("Entering netobj identity check\n");
681 if (n0
->n_len
== n1
->n_len
) {
682 debuglog("Preliminary length check passed\n");
683 retval
= !bcmp(n0
->n_bytes
, n1
->n_bytes
, n0
->n_len
);
684 debuglog("netobj %smatch\n", retval
? "" : "mis");
691 * same_filelock_identity: Compares the appropriate bits of a file_lock
694 same_filelock_identity(fl0
, fl1
)
695 const struct file_lock
*fl0
, *fl1
;
701 debuglog("Checking filelock identity\n");
704 * Check process ids and host information.
706 retval
= (fl0
->client
.svid
== fl1
->client
.svid
&&
707 same_netobj(&(fl0
->client
.oh
), &(fl1
->client
.oh
)));
709 debuglog("Exiting checking filelock identity: retval: %d\n",retval
);
715 * Below here are routines associated with manipulating the NFS
720 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
722 * XXX: It is a shame that this duplicates so much code from test_nfslock.
725 get_lock_matching_unlock(const struct file_lock
*fl
)
727 struct file_lock
*ifl
; /* Iterator */
729 debuglog("Entering lock_matching_unlock\n");
730 debuglog("********Dump of fl*****************\n");
733 LIST_FOREACH(ifl
, &nfslocklist_head
, nfslocklist
) {
734 debuglog("Pointer to file lock: %p\n",ifl
);
736 debuglog("****Dump of ifl****\n");
738 debuglog("*******************\n");
741 * XXX: It is conceivable that someone could use the NLM RPC
742 * system to directly access filehandles. This may be a
743 * security hazard as the filehandle code may bypass normal
744 * file access controls
746 if (fl
->filehandle
.n_len
!= ifl
->filehandle
.n_len
)
748 if (bcmp(fl
->filehandle
.n_bytes
, ifl
->filehandle
.n_bytes
,
749 fl
->filehandle
.n_len
))
752 debuglog("matching_unlock: Filehandles match, "
753 "checking regions\n");
755 /* Filehandles match, check for region overlap */
756 if (!regions_overlap(fl
->client
.l_offset
, fl
->client
.l_len
,
757 ifl
->client
.l_offset
, ifl
->client
.l_len
))
760 debuglog("matching_unlock: Region overlap"
761 " found %llu : %llu -- %llu : %llu\n",
762 fl
->client
.l_offset
,fl
->client
.l_len
,
763 ifl
->client
.l_offset
,ifl
->client
.l_len
);
765 /* Regions overlap, check the identity */
766 if (!same_filelock_identity(fl
,ifl
))
769 debuglog("matching_unlock: Duplicate lock id. Granting\n");
773 debuglog("Exiting lock_matching_unlock\n");
779 * test_nfslock: check for NFS lock in lock list
781 * This routine makes the following assumptions:
782 * 1) Nothing will adjust the lock list during a lookup
784 * This routine has an intersting quirk which bit me hard.
785 * The conflicting_fl is the pointer to the conflicting lock.
786 * However, to modify the "*pointer* to the conflicting lock" rather
787 * that the "conflicting lock itself" one must pass in a "pointer to
788 * the pointer of the conflicting lock". Gross.
792 test_nfslock(const struct file_lock
*fl
, struct file_lock
**conflicting_fl
)
794 struct file_lock
*ifl
; /* Iterator */
795 enum nfslock_status retval
;
797 debuglog("Entering test_nfslock\n");
799 retval
= NFS_GRANTED
;
800 (*conflicting_fl
) = NULL
;
802 debuglog("Entering lock search loop\n");
804 debuglog("***********************************\n");
805 debuglog("Dumping match filelock\n");
806 debuglog("***********************************\n");
808 debuglog("***********************************\n");
810 LIST_FOREACH(ifl
, &nfslocklist_head
, nfslocklist
) {
811 if (retval
== NFS_DENIED
)
814 debuglog("Top of lock loop\n");
815 debuglog("Pointer to file lock: %p\n",ifl
);
817 debuglog("***********************************\n");
818 debuglog("Dumping test filelock\n");
819 debuglog("***********************************\n");
821 debuglog("***********************************\n");
824 * XXX: It is conceivable that someone could use the NLM RPC
825 * system to directly access filehandles. This may be a
826 * security hazard as the filehandle code may bypass normal
827 * file access controls
829 if (fl
->filehandle
.n_len
!= ifl
->filehandle
.n_len
)
831 if (bcmp(fl
->filehandle
.n_bytes
, ifl
->filehandle
.n_bytes
,
832 fl
->filehandle
.n_len
))
835 debuglog("test_nfslock: filehandle match found\n");
837 /* Filehandles match, check for region overlap */
838 if (!regions_overlap(fl
->client
.l_offset
, fl
->client
.l_len
,
839 ifl
->client
.l_offset
, ifl
->client
.l_len
))
842 debuglog("test_nfslock: Region overlap found"
843 " %llu : %llu -- %llu : %llu\n",
844 fl
->client
.l_offset
,fl
->client
.l_len
,
845 ifl
->client
.l_offset
,ifl
->client
.l_len
);
847 /* Regions overlap, check the exclusivity */
848 if (!(fl
->client
.exclusive
|| ifl
->client
.exclusive
))
851 debuglog("test_nfslock: Exclusivity failure: %d %d\n",
852 fl
->client
.exclusive
,
853 ifl
->client
.exclusive
);
855 if (same_filelock_identity(fl
,ifl
)) {
856 debuglog("test_nfslock: Duplicate id. Granting\n");
857 (*conflicting_fl
) = ifl
;
858 retval
= NFS_GRANTED_DUPLICATE
;
860 /* locking attempt fails */
861 debuglog("test_nfslock: Lock attempt failed\n");
862 debuglog("Desired lock\n");
864 debuglog("Conflicting lock\n");
866 (*conflicting_fl
) = ifl
;
871 debuglog("Dumping file locks\n");
872 debuglog("Exiting test_nfslock\n");
878 * lock_nfslock: attempt to create a lock in the NFS lock list
880 * This routine tests whether the lock will be granted and then adds
881 * the entry to the lock list if so.
883 * Argument fl gets modified as its list housekeeping entries get modified
884 * upon insertion into the NFS lock list
886 * This routine makes several assumptions:
887 * 1) It is perfectly happy to grant a duplicate lock from the same pid.
888 * While this seems to be intuitively wrong, it is required for proper
889 * Posix semantics during unlock. It is absolutely imperative to not
890 * unlock the main lock before the two child locks are established. Thus,
891 * one has be be able to create duplicate locks over an existing lock
892 * 2) It currently accepts duplicate locks from the same id,pid
896 lock_nfslock(struct file_lock
*fl
)
898 enum nfslock_status retval
;
899 struct file_lock
*dummy_fl
;
903 debuglog("Entering lock_nfslock...\n");
905 retval
= test_nfslock(fl
,&dummy_fl
);
907 if (retval
== NFS_GRANTED
|| retval
== NFS_GRANTED_DUPLICATE
) {
908 debuglog("Inserting lock...\n");
910 LIST_INSERT_HEAD(&nfslocklist_head
, fl
, nfslocklist
);
913 debuglog("Exiting lock_nfslock...\n");
919 * delete_nfslock: delete an NFS lock list entry
921 * This routine is used to delete a lock out of the NFS lock list
922 * without regard to status, underlying locks, regions or anything else
924 * Note that this routine *does not deallocate memory* of the lock.
925 * It just disconnects it from the list. The lock can then be used
926 * by other routines without fear of trashing the list.
930 delete_nfslock(struct file_lock
*fl
)
933 LIST_REMOVE(fl
, nfslocklist
);
935 return (NFS_GRANTED
);
939 split_nfslock(exist_lock
, unlock_lock
, left_lock
, right_lock
)
940 const struct file_lock
*exist_lock
, *unlock_lock
;
941 struct file_lock
**left_lock
, **right_lock
;
943 u_int64_t start1
, len1
, start2
, len2
;
944 enum split_status spstatus
;
946 spstatus
= region_compare(exist_lock
->client
.l_offset
, exist_lock
->client
.l_len
,
947 unlock_lock
->client
.l_offset
, unlock_lock
->client
.l_len
,
948 &start1
, &len1
, &start2
, &len2
);
950 if ((spstatus
& SPL_LOCK1
) != 0) {
951 *left_lock
= allocate_file_lock(&exist_lock
->client
.oh
, &exist_lock
->filehandle
);
952 if (*left_lock
== NULL
) {
953 debuglog("Unable to allocate resource for split 1\n");
957 fill_file_lock(*left_lock
,
959 exist_lock
->client
.exclusive
, exist_lock
->client
.svid
,
961 exist_lock
->client_name
, exist_lock
->nsm_status
,
962 exist_lock
->status
, exist_lock
->flags
, exist_lock
->blocking
);
965 if ((spstatus
& SPL_LOCK2
) != 0) {
966 *right_lock
= allocate_file_lock(&exist_lock
->client
.oh
, &exist_lock
->filehandle
);
967 if (*right_lock
== NULL
) {
968 debuglog("Unable to allocate resource for split 1\n");
969 if (*left_lock
!= NULL
) {
970 deallocate_file_lock(*left_lock
);
975 fill_file_lock(*right_lock
,
977 exist_lock
->client
.exclusive
, exist_lock
->client
.svid
,
979 exist_lock
->client_name
, exist_lock
->nsm_status
,
980 exist_lock
->status
, exist_lock
->flags
, exist_lock
->blocking
);
987 unlock_nfslock(fl
, released_lock
, left_lock
, right_lock
)
988 const struct file_lock
*fl
;
989 struct file_lock
**released_lock
;
990 struct file_lock
**left_lock
;
991 struct file_lock
**right_lock
;
993 struct file_lock
*mfl
; /* Matching file lock */
994 enum nfslock_status retval
;
995 enum split_status spstatus
;
997 debuglog("Entering unlock_nfslock\n");
999 *released_lock
= NULL
;
1003 retval
= NFS_DENIED_NOLOCK
;
1005 printf("Attempting to match lock...\n");
1006 mfl
= get_lock_matching_unlock(fl
);
1009 debuglog("Unlock matched. Querying for split\n");
1011 spstatus
= split_nfslock(mfl
, fl
, left_lock
, right_lock
);
1013 debuglog("Split returned %d %p %p %p %p\n",spstatus
,mfl
,fl
,*left_lock
,*right_lock
);
1014 debuglog("********Split dumps********");
1017 dump_filelock(*left_lock
);
1018 dump_filelock(*right_lock
);
1019 debuglog("********End Split dumps********");
1021 if (spstatus
== SPL_RESERR
) {
1022 if (*left_lock
!= NULL
) {
1023 deallocate_file_lock(*left_lock
);
1027 if (*right_lock
!= NULL
) {
1028 deallocate_file_lock(*right_lock
);
1035 /* Insert new locks from split if required */
1036 if (*left_lock
!= NULL
) {
1037 debuglog("Split left activated\n");
1038 LIST_INSERT_HEAD(&nfslocklist_head
, *left_lock
, nfslocklist
);
1041 if (*right_lock
!= NULL
) {
1042 debuglog("Split right activated\n");
1043 LIST_INSERT_HEAD(&nfslocklist_head
, *right_lock
, nfslocklist
);
1046 /* Unlock the lock since it matches identity */
1047 LIST_REMOVE(mfl
, nfslocklist
);
1048 *released_lock
= mfl
;
1049 retval
= NFS_GRANTED
;
1052 debuglog("Exiting unlock_nfslock\n");
1058 * Below here are the routines for manipulating the file lock directly
1059 * on the disk hardware itself
1062 lock_hwlock(struct file_lock
*fl
)
1064 struct monfile
*imf
,*nmf
;
1065 int lflags
, flerror
;
1067 /* Scan to see if filehandle already present */
1068 LIST_FOREACH(imf
, &monfilelist_head
, monfilelist
) {
1069 if ((fl
->filehandle
.n_len
== imf
->filehandle
.n_len
) &&
1070 (bcmp(fl
->filehandle
.n_bytes
, imf
->filehandle
.n_bytes
,
1071 fl
->filehandle
.n_len
) == 0)) {
1072 /* imf is the correct filehandle */
1078 * Filehandle already exists (we control the file)
1079 * *AND* NFS has already cleared the lock for availability
1080 * Grant it and bump the refcount.
1084 return (HW_GRANTED
);
1087 /* No filehandle found, create and go */
1088 nmf
= malloc(sizeof(struct monfile
));
1090 debuglog("hwlock resource allocation failure\n");
1093 nmf
->filehandle
.n_bytes
= malloc(fl
->filehandle
.n_len
);
1095 debuglog("hwlock resource allocation failure\n");
1100 /* XXX: Is O_RDWR always the correct mode? */
1101 nmf
->fd
= fhopen((fhandle_t
*)fl
->filehandle
.n_bytes
, O_RDWR
);
1103 debuglog("fhopen failed (from %16s): %32s\n",
1104 fl
->client_name
, strerror(errno
));
1108 return (HW_STALEFH
);
1110 return (HW_READONLY
);
1116 /* File opened correctly, fill the monitor struct */
1117 nmf
->filehandle
.n_len
= fl
->filehandle
.n_len
;
1118 bcopy(fl
->filehandle
.n_bytes
, nmf
->filehandle
.n_bytes
, fl
->filehandle
.n_len
);
1120 nmf
->exclusive
= fl
->client
.exclusive
;
1122 lflags
= (nmf
->exclusive
== 1) ?
1123 (LOCK_EX
| LOCK_NB
) : (LOCK_SH
| LOCK_NB
);
1125 flerror
= flock(nmf
->fd
, lflags
);
1128 debuglog("flock failed (from %16s): %32s\n",
1129 fl
->client_name
, strerror(errno
));
1136 return (HW_STALEFH
);
1138 return (HW_READONLY
);
1145 /* File opened and locked */
1146 LIST_INSERT_HEAD(&monfilelist_head
, nmf
, monfilelist
);
1148 debuglog("flock succeeded (from %16s)\n", fl
->client_name
);
1149 return (HW_GRANTED
);
1153 unlock_hwlock(const struct file_lock
*fl
)
1155 struct monfile
*imf
;
1157 debuglog("Entering unlock_hwlock\n");
1158 debuglog("Entering loop interation\n");
1160 /* Scan to see if filehandle already present */
1161 LIST_FOREACH(imf
, &monfilelist_head
, monfilelist
) {
1162 if ((fl
->filehandle
.n_len
== imf
->filehandle
.n_len
) &&
1163 (bcmp(fl
->filehandle
.n_bytes
, imf
->filehandle
.n_bytes
,
1164 fl
->filehandle
.n_len
) == 0)) {
1165 /* imf is the correct filehandle */
1170 debuglog("Completed iteration. Proceeding\n");
1174 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1175 return (HW_DENIED_NOLOCK
);
1181 if (imf
->refcount
< 0) {
1182 debuglog("Negative hardware reference count\n");
1185 if (imf
->refcount
<= 0) {
1187 LIST_REMOVE(imf
, monfilelist
);
1190 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1191 return (HW_GRANTED
);
1195 test_hwlock(fl
, conflicting_fl
)
1196 const struct file_lock
*fl __unused
;
1197 struct file_lock
**conflicting_fl __unused
;
1201 * XXX: lock tests on hardware are not required until
1202 * true partial file testing is done on the underlying file
1210 * Below here are routines for manipulating blocked lock requests
1211 * They should only be called from the XXX_partialfilelock routines
1212 * if at all possible
1216 add_blockingfilelock(struct file_lock
*fl
)
1218 struct file_lock
*ifl
, *nfl
;
1220 debuglog("Entering add_blockingfilelock\n");
1223 * Check for a duplicate lock request.
1224 * If found, deallocate the older request.
1226 ifl
= LIST_FIRST(&blockedlocklist_head
);
1227 for (; ifl
!= NULL
; ifl
= nfl
) {
1228 debuglog("Pointer to file lock: %p\n",ifl
);
1229 debuglog("****Dump of ifl****\n");
1231 debuglog("*******************\n");
1233 nfl
= LIST_NEXT(ifl
, nfslocklist
);
1235 if (fl
->filehandle
.n_len
!= ifl
->filehandle
.n_len
)
1237 if (bcmp(fl
->filehandle
.n_bytes
, ifl
->filehandle
.n_bytes
,
1238 fl
->filehandle
.n_len
))
1241 /* Filehandles match, check region */
1242 if ((fl
->client
.l_offset
!= ifl
->client
.l_offset
) ||
1243 (fl
->client
.l_len
!= ifl
->client
.l_len
))
1246 /* Regions match, check the identity */
1247 if (!same_filelock_identity(fl
,ifl
))
1250 debuglog("add_blockingfilelock: removing duplicate lock request.\n");
1251 remove_blockingfilelock(ifl
);
1252 deallocate_file_lock(ifl
);
1257 * Clear the blocking flag so that it can be reused without
1258 * adding it to the blocking queue a second time
1262 LIST_INSERT_HEAD(&blockedlocklist_head
, fl
, nfslocklist
);
1264 debuglog("Exiting add_blockingfilelock\n");
1268 remove_blockingfilelock(struct file_lock
*fl
)
1271 debuglog("Entering remove_blockingfilelock\n");
1273 LIST_REMOVE(fl
, nfslocklist
);
1275 debuglog("Exiting remove_blockingfilelock\n");
1279 clear_blockingfilelock(const char *hostname
)
1281 struct file_lock
*ifl
,*nfl
;
1284 * Normally, LIST_FOREACH is called for, but since
1285 * the current element *is* the iterator, deleting it
1286 * would mess up the iteration. Thus, a next element
1287 * must be used explicitly
1290 ifl
= LIST_FIRST(&blockedlocklist_head
);
1292 while (ifl
!= NULL
) {
1293 nfl
= LIST_NEXT(ifl
, nfslocklist
);
1295 if (strncmp(hostname
, ifl
->client_name
, SM_MAXSTRLEN
) == 0) {
1296 remove_blockingfilelock(ifl
);
1297 deallocate_file_lock(ifl
);
1304 int need_retry_blocked_locks
= 0; /* need to call retry_blockingfilelocklist() */
1307 retry_blockingfilelocklist(netobj
*fh
)
1310 * If fh is given, then retry just the locks with the
1311 * same filehandle in the blocked list.
1312 * Otherwise, simply retry all locks in the blocked list.
1314 struct file_lock
*ifl
, *nfl
, *pfl
; /* Iterator */
1315 enum partialfilelock_status pflstatus
;
1318 debuglog("Entering retry_blockingfilelocklist\n");
1320 need_retry_blocked_locks
= 0;
1323 ifl
= LIST_FIRST(&blockedlocklist_head
);
1324 debuglog("Iterator choice %p\n",ifl
);
1326 while (ifl
!= NULL
) {
1328 * SUBTLE BUG: The next element must be worked out before the
1329 * current element has been moved
1331 nfl
= LIST_NEXT(ifl
, nfslocklist
);
1332 debuglog("Iterator choice %p\n",ifl
);
1333 debuglog("Prev iterator choice %p\n",pfl
);
1334 debuglog("Next iterator choice %p\n",nfl
);
1336 /* if given a filehandle, only retry locks for the same filehandle */
1337 if (fh
&& !same_netobj(fh
, &ifl
->filehandle
)) {
1343 * SUBTLE BUG: The file_lock must be removed from the
1344 * old list so that it's list pointers get disconnected
1345 * before being allowed to participate in the new list
1346 * which will automatically add it in if necessary.
1349 LIST_REMOVE(ifl
, nfslocklist
);
1350 pflstatus
= lock_partialfilelock(ifl
);
1352 if (pflstatus
== PFL_GRANTED
|| pflstatus
== PFL_GRANTED_DUPLICATE
) {
1353 debuglog("Granted blocked lock\n");
1354 /* lock granted and is now being used */
1355 rv
= send_granted(ifl
, 0);
1358 * Uh oh... the NLM_GRANTED message failed.
1359 * About the only thing we can do is drop the lock.
1360 * Note: this could be bad if the error was only
1361 * transient. Hopefully, if the client is still
1362 * waiting for the lock, they will resend the request.
1365 /* ifl is NO LONGER VALID AT THIS POINT */
1368 /* Reinsert lock back into same place in blocked list */
1369 debuglog("Replacing blocked lock\n");
1371 LIST_INSERT_AFTER(pfl
, ifl
, nfslocklist
);
1373 /* ifl is the only elem. in the list */
1374 LIST_INSERT_HEAD(&blockedlocklist_head
, ifl
, nfslocklist
);
1377 if (pflstatus
== PFL_GRANTED
|| pflstatus
== PFL_GRANTED_DUPLICATE
) {
1378 /* If ifl was permanently removed from the list, (e.g the */
1379 /* lock was granted), pfl should remain where it's at. */
1381 /* If ifl was left in the list, (e.g it was reinserted back */
1382 /* in place), pfl should simply be moved forward to be ifl */
1385 /* Valid increment behavior regardless of state of ifl */
1389 debuglog("Exiting retry_blockingfilelocklist\n");
1393 * Below here are routines associated with manipulating all
1394 * aspects of the partial file locking system (list, hardware, etc.)
1398 * Please note that lock monitoring must be done at this level which
1399 * keeps track of *individual* lock requests on lock and unlock
1401 * XXX: Split unlocking is going to make the unlock code miserable
1405 * lock_partialfilelock:
1407 * Argument fl gets modified as its list housekeeping entries get modified
1408 * upon insertion into the NFS lock list
1410 * This routine makes several assumptions:
1411 * 1) It (will) pass locks through to flock to lock the entire underlying file
1412 * and then parcel out NFS locks if it gets control of the file.
1413 * This matches the old rpc.lockd file semantics (except where it
1414 * is now more correct). It is the safe solution, but will cause
1415 * overly restrictive blocking if someone is trying to use the
1416 * underlying files without using NFS. This appears to be an
1417 * acceptable tradeoff since most people use standalone NFS servers.
1418 * XXX: The right solution is probably kevent combined with fcntl
1420 * 2) Nothing modifies the lock lists between testing and granting
1421 * I have no idea whether this is a useful assumption or not
1424 enum partialfilelock_status
1425 lock_partialfilelock(struct file_lock
*fl
)
1427 enum partialfilelock_status retval
;
1428 enum nfslock_status lnlstatus
;
1429 enum hwlock_status hwstatus
;
1431 debuglog("Entering lock_partialfilelock\n");
1433 retval
= PFL_DENIED
;
1436 * Execute the NFS lock first, if possible, as it is significantly
1437 * easier and less expensive to undo than the filesystem lock
1440 lnlstatus
= lock_nfslock(fl
);
1442 switch (lnlstatus
) {
1444 case NFS_GRANTED_DUPLICATE
:
1446 * At this point, the NFS lock is allocated and active.
1447 * Remember to clean it up if the hardware lock fails
1449 hwstatus
= lock_hwlock(fl
);
1453 case HW_GRANTED_DUPLICATE
:
1454 debuglog("HW GRANTED\n");
1456 * XXX: Fixme: Check hwstatus for duplicate when
1457 * true partial file locking and accounting is
1458 * done on the hardware
1460 if (lnlstatus
== NFS_GRANTED_DUPLICATE
) {
1461 retval
= PFL_GRANTED_DUPLICATE
;
1463 retval
= PFL_GRANTED
;
1465 if (fl
->flags
& LOCK_MON
)
1466 monitor_lock_host(fl
->client_name
);
1469 debuglog("HW RESERR\n");
1470 retval
= PFL_HWRESERR
;
1473 debuglog("HW DENIED\n");
1474 retval
= PFL_HWDENIED
;
1477 debuglog("Unmatched hwstatus %d\n",hwstatus
);
1481 if (retval
!= PFL_GRANTED
&&
1482 retval
!= PFL_GRANTED_DUPLICATE
) {
1483 /* Clean up the NFS lock */
1484 debuglog("Deleting trial NFS lock\n");
1489 retval
= PFL_NFSDENIED
;
1492 retval
= PFL_NFSRESERR
;
1494 debuglog("Unmatched lnlstatus %d\n");
1495 retval
= PFL_NFSDENIED_NOLOCK
;
1500 * By the time fl reaches here, it is completely free again on
1501 * failure. The NFS lock done before attempting the
1502 * hardware lock has been backed out
1505 if (retval
== PFL_NFSDENIED
|| retval
== PFL_HWDENIED
) {
1506 /* Once last chance to check the lock */
1507 if (fl
->blocking
== 1) {
1508 /* Queue the lock */
1509 debuglog("BLOCKING LOCK RECEIVED\n");
1510 retval
= (retval
== PFL_NFSDENIED
?
1511 PFL_NFSBLOCKED
: PFL_HWBLOCKED
);
1512 add_blockingfilelock(fl
);
1515 /* Leave retval alone, it's already correct */
1516 debuglog("Lock denied. Non-blocking failure\n");
1521 debuglog("Exiting lock_partialfilelock\n");
1527 * unlock_partialfilelock:
1529 * Given a file_lock, unlock all locks which match.
1531 * Note that a given lock might have to unlock ITSELF! See
1532 * clear_partialfilelock for example.
1535 enum partialfilelock_status
1536 unlock_partialfilelock(const struct file_lock
*fl
)
1538 struct file_lock
*lfl
,*rfl
,*releasedfl
,*selffl
;
1539 enum partialfilelock_status retval
;
1540 enum nfslock_status unlstatus
;
1541 enum hwlock_status unlhwstatus
, lhwstatus
;
1543 debuglog("Entering unlock_partialfilelock\n");
1549 retval
= PFL_DENIED
;
1552 * There are significant overlap and atomicity issues
1553 * with partially releasing a lock. For example, releasing
1554 * part of an NFS shared lock does *not* always release the
1555 * corresponding part of the file since there is only one
1556 * rpc.lockd UID but multiple users could be requesting it
1557 * from NFS. Also, an unlock request should never allow
1558 * another process to gain a lock on the remaining parts.
1559 * ie. Always apply the new locks before releasing the
1564 * Loop is required since multiple little locks
1565 * can be allocated and then deallocated with one
1568 * The loop is required to be here so that the nfs &
1569 * hw subsystems do not need to communicate with one
1574 debuglog("Value of releasedfl: %p\n",releasedfl
);
1575 /* lfl&rfl are created *AND* placed into the NFS lock list if required */
1576 unlstatus
= unlock_nfslock(fl
, &releasedfl
, &lfl
, &rfl
);
1577 debuglog("Value of releasedfl: %p\n",releasedfl
);
1580 /* XXX: This is grungy. It should be refactored to be cleaner */
1582 lhwstatus
= lock_hwlock(lfl
);
1583 if (lhwstatus
!= HW_GRANTED
&&
1584 lhwstatus
!= HW_GRANTED_DUPLICATE
) {
1585 debuglog("HW duplicate lock failure for left split\n");
1587 if (lfl
->flags
& LOCK_MON
)
1588 monitor_lock_host(lfl
->client_name
);
1592 lhwstatus
= lock_hwlock(rfl
);
1593 if (lhwstatus
!= HW_GRANTED
&&
1594 lhwstatus
!= HW_GRANTED_DUPLICATE
) {
1595 debuglog("HW duplicate lock failure for right split\n");
1597 if (rfl
->flags
& LOCK_MON
)
1598 monitor_lock_host(rfl
->client_name
);
1601 switch (unlstatus
) {
1603 /* Attempt to unlock on the hardware */
1604 debuglog("NFS unlock granted. Attempting hardware unlock\n");
1606 /* This call *MUST NOT* unlock the two newly allocated locks */
1607 unlhwstatus
= unlock_hwlock(fl
);
1608 debuglog("HW unlock returned with code %d\n",unlhwstatus
);
1610 switch (unlhwstatus
) {
1612 debuglog("HW unlock granted\n");
1613 if (releasedfl
->flags
& LOCK_MON
)
1614 unmonitor_lock_host(releasedfl
->client_name
);
1615 retval
= PFL_GRANTED
;
1617 case HW_DENIED_NOLOCK
:
1618 /* Huh?!?! This shouldn't happen */
1619 debuglog("HW unlock denied no lock\n");
1620 retval
= PFL_HWRESERR
;
1621 /* Break out of do-while */
1622 unlstatus
= NFS_RESERR
;
1625 debuglog("HW unlock failed\n");
1626 retval
= PFL_HWRESERR
;
1627 /* Break out of do-while */
1628 unlstatus
= NFS_RESERR
;
1632 debuglog("Exiting with status retval: %d\n",retval
);
1634 // XXX sending granted messages before unlock response
1635 // XXX causes unlock response to be corrupted?
1636 // XXX Workaround is to move this to nlm_prot_svc.c
1637 // XXX after the unlock response is sent.
1638 // retry_blockingfilelocklist();
1639 need_retry_blocked_locks
= 1;
1641 case NFS_DENIED_NOLOCK
:
1642 retval
= PFL_GRANTED
;
1643 debuglog("All locks cleaned out\n");
1646 retval
= PFL_NFSRESERR
;
1647 debuglog("NFS unlock failure\n");
1652 if (releasedfl
!= NULL
) {
1653 if (fl
== releasedfl
) {
1655 * XXX: YECHHH!!! Attempt to unlock self succeeded
1656 * but we can't deallocate the space yet. This is what
1657 * happens when you don't write malloc and free together
1659 debuglog("Attempt to unlock self\n");
1660 selffl
= releasedfl
;
1663 * XXX: this deallocation *still* needs to migrate closer
1664 * to the allocation code way up in get_lock or the allocation
1665 * code needs to migrate down (violation of "When you write
1666 * malloc you must write free")
1669 deallocate_file_lock(releasedfl
);
1673 } while (unlstatus
== NFS_GRANTED
);
1675 if (selffl
!= NULL
) {
1677 * This statement wipes out the incoming file lock (fl)
1678 * in spite of the fact that it is declared const
1680 debuglog("WARNING! Destroying incoming lock pointer\n");
1681 deallocate_file_lock(selffl
);
1684 debuglog("Exiting unlock_partialfilelock\n");
1690 * clear_partialfilelock
1692 * Normally called in response to statd state number change.
1693 * Wipe out all locks held by a host. As a bonus, the act of
1694 * doing so should automatically clear their statd entries and
1695 * unmonitor the host.
1699 clear_partialfilelock(const char *hostname
)
1701 struct file_lock
*ifl
, *nfl
;
1703 /* Clear blocking file lock list */
1704 clear_blockingfilelock(hostname
);
1706 /* do all required unlocks */
1707 /* Note that unlock can smash the current pointer to a lock */
1710 * Normally, LIST_FOREACH is called for, but since
1711 * the current element *is* the iterator, deleting it
1712 * would mess up the iteration. Thus, a next element
1713 * must be used explicitly
1716 ifl
= LIST_FIRST(&nfslocklist_head
);
1718 while (ifl
!= NULL
) {
1719 nfl
= LIST_NEXT(ifl
, nfslocklist
);
1721 if (strncmp(hostname
, ifl
->client_name
, SM_MAXSTRLEN
) == 0) {
1722 /* Unlock destroys ifl out from underneath */
1723 unlock_partialfilelock(ifl
);
1724 /* ifl is NO LONGER VALID AT THIS POINT */
1731 * test_partialfilelock:
1733 enum partialfilelock_status
1734 test_partialfilelock(const struct file_lock
*fl
,
1735 struct file_lock
**conflicting_fl
)
1737 enum partialfilelock_status retval
;
1738 enum nfslock_status teststatus
;
1740 debuglog("Entering testpartialfilelock...\n");
1742 retval
= PFL_DENIED
;
1744 teststatus
= test_nfslock(fl
, conflicting_fl
);
1745 debuglog("test_partialfilelock: teststatus %d\n",teststatus
);
1747 if (teststatus
== NFS_GRANTED
|| teststatus
== NFS_GRANTED_DUPLICATE
) {
1748 /* XXX: Add the underlying filesystem locking code */
1749 retval
= (teststatus
== NFS_GRANTED
) ?
1750 PFL_GRANTED
: PFL_GRANTED_DUPLICATE
;
1751 debuglog("Dumping locks...\n");
1753 dump_filelock(*conflicting_fl
);
1754 debuglog("Done dumping locks...\n");
1756 retval
= PFL_NFSDENIED
;
1757 debuglog("NFS test denied.\n");
1759 debuglog("Conflicting.\n");
1760 dump_filelock(*conflicting_fl
);
1763 debuglog("Exiting testpartialfilelock...\n");
1769 * Below here are routines associated with translating the partial file locking
1770 * codes into useful codes to send back to the NFS RPC messaging system
1774 * These routines translate the (relatively) useful return codes back onto
1775 * the few return codes which the nlm subsystems wishes to trasmit
1779 do_test(struct file_lock
*fl
, struct file_lock
**conflicting_fl
)
1781 enum partialfilelock_status pfsret
;
1782 enum nlm_stats retval
;
1784 debuglog("Entering do_test...\n");
1786 pfsret
= test_partialfilelock(fl
,conflicting_fl
);
1790 debuglog("PFL test lock granted\n");
1792 dump_filelock(*conflicting_fl
);
1793 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_granted
: nlm_granted
;
1795 case PFL_GRANTED_DUPLICATE
:
1796 debuglog("PFL test lock granted--duplicate id detected\n");
1798 dump_filelock(*conflicting_fl
);
1799 debuglog("Clearing conflicting_fl for call semantics\n");
1800 *conflicting_fl
= NULL
;
1801 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_granted
: nlm_granted
;
1805 debuglog("PFL test lock denied\n");
1807 dump_filelock(*conflicting_fl
);
1808 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_denied
: nlm_denied
;
1812 debuglog("PFL test lock resource fail\n");
1814 dump_filelock(*conflicting_fl
);
1815 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_denied_nolocks
: nlm_denied_nolocks
;
1818 debuglog("PFL test lock *FAILED*\n");
1820 dump_filelock(*conflicting_fl
);
1821 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
1825 debuglog("Exiting do_test...\n");
1831 * do_lock: Try to acquire a lock
1833 * This routine makes a distinction between NLM versions. I am pretty
1834 * convinced that this should be abstracted out and bounced up a level
1838 do_lock(struct file_lock
*fl
)
1840 enum partialfilelock_status pfsret
;
1841 enum nlm_stats retval
;
1843 debuglog("Entering do_lock...\n");
1845 pfsret
= lock_partialfilelock(fl
);
1849 debuglog("PFL lock granted");
1851 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_granted
: nlm_granted
;
1853 case PFL_GRANTED_DUPLICATE
:
1854 debuglog("PFL lock granted--duplicate id detected");
1856 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_granted
: nlm_granted
;
1860 debuglog("PFL_NFS lock denied");
1862 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_denied
: nlm_denied
;
1864 case PFL_NFSBLOCKED
:
1866 debuglog("PFL_NFS blocking lock denied. Queued.\n");
1868 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_blocked
: nlm_blocked
;
1872 debuglog("PFL lock resource alocation fail\n");
1874 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_denied_nolocks
: nlm_denied_nolocks
;
1877 debuglog("PFL lock *FAILED*");
1879 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
1883 debuglog("Exiting do_lock...\n");
1889 do_unlock(struct file_lock
*fl
)
1891 enum partialfilelock_status pfsret
;
1892 enum nlm_stats retval
;
1894 debuglog("Entering do_unlock...\n");
1895 pfsret
= unlock_partialfilelock(fl
);
1899 debuglog("PFL unlock granted");
1901 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_granted
: nlm_granted
;
1905 debuglog("PFL_NFS unlock denied");
1907 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_denied
: nlm_denied
;
1909 case PFL_NFSDENIED_NOLOCK
:
1910 case PFL_HWDENIED_NOLOCK
:
1911 debuglog("PFL_NFS no lock found\n");
1912 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_granted
: nlm_granted
;
1916 debuglog("PFL unlock resource failure");
1918 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_denied_nolocks
: nlm_denied_nolocks
;
1921 debuglog("PFL unlock *FAILED*");
1923 retval
= (fl
->flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
1927 debuglog("Exiting do_unlock...\n");
1935 * This routine is non-existent because it doesn't have a return code.
1936 * It is here for completeness in case someone *does* need to do return
1937 * codes later. A decent compiler should optimize this away.
1941 do_clear(const char *hostname
)
1944 clear_partialfilelock(hostname
);
1948 * The following routines are all called from the code which the
1953 * testlock(): inform the caller if the requested lock would be granted
1955 * returns NULL if lock would granted
1956 * returns pointer to a conflicting nlm4_holder if not
1959 struct nlm4_holder
*
1960 testlock(struct nlm4_lock
*lock
, bool_t exclusive
, int flags __unused
)
1962 struct file_lock test_fl
, *conflicting_fl
;
1964 bzero(&test_fl
, sizeof(test_fl
));
1966 test_fl
.filehandle
.n_len
= lock
->fh
.n_len
;
1967 test_fl
.filehandle
.n_bytes
= lock
->fh
.n_bytes
;
1968 copy_nlm4_lock_to_nlm4_holder(lock
, exclusive
, &test_fl
.client
);
1971 do_test(&test_fl
, &conflicting_fl
);
1973 if (conflicting_fl
== NULL
) {
1974 debuglog("No conflicting lock found\n");
1978 debuglog("Found conflicting lock\n");
1979 dump_filelock(conflicting_fl
);
1981 return (&conflicting_fl
->client
);
1986 * getlock: try to aquire the lock.
1987 * If file is already locked and we can sleep, put the lock in the list with
1988 * status LKST_WAITING; it'll be processed later.
1989 * Otherwise try to lock. If we're allowed to block, fork a child which
1990 * will do the blocking lock.
1994 getlock(nlm4_lockargs
*lckarg
, struct svc_req
*rqstp
, const int flags
)
1996 struct file_lock
*newfl
;
1997 enum nlm_stats retval
;
1999 debuglog("Entering getlock...\n");
2001 if (grace_expired
== 0 && lckarg
->reclaim
== 0)
2002 return (flags
& LOCK_V4
) ?
2003 nlm4_denied_grace_period
: nlm_denied_grace_period
;
2005 /* allocate new file_lock for this request */
2006 newfl
= allocate_file_lock(&lckarg
->alock
.oh
, &lckarg
->alock
.fh
);
2007 if (newfl
== NULL
) {
2008 syslog(LOG_NOTICE
, "lock allocate failed: %s", strerror(errno
));
2010 return (flags
& LOCK_V4
) ?
2011 nlm4_denied_nolocks
: nlm_denied_nolocks
;
2014 if (lckarg
->alock
.fh
.n_len
!= sizeof(fhandle_t
)) {
2015 debuglog("recieved fhandle size %d, local size %d",
2016 lckarg
->alock
.fh
.n_len
, (int)sizeof(fhandle_t
));
2019 fill_file_lock(newfl
,
2020 (struct sockaddr
*)svc_getcaller(rqstp
->rq_xprt
),
2021 lckarg
->exclusive
, lckarg
->alock
.svid
, lckarg
->alock
.l_offset
,
2022 lckarg
->alock
.l_len
,
2023 lckarg
->alock
.caller_name
, lckarg
->state
, 0, flags
, lckarg
->block
);
2026 * newfl is now fully constructed and deallocate_file_lock
2027 * can now be used to delete it
2031 debuglog("Pointer to new lock is %p\n",newfl
);
2033 retval
= do_lock(newfl
);
2035 debuglog("Pointer to new lock is %p\n",newfl
);
2041 /* case nlm_granted: is the same as nlm4_granted */
2042 /* do_mon(lckarg->alock.caller_name); */
2045 /* case nlm_blocked: is the same as nlm4_blocked */
2046 /* do_mon(lckarg->alock.caller_name); */
2049 deallocate_file_lock(newfl
);
2053 debuglog("Exiting getlock...\n");
2059 /* unlock a filehandle */
2061 unlock(nlm4_lock
*lock
, const int flags __unused
)
2063 struct file_lock fl
;
2068 debuglog("Entering unlock...\n");
2070 bzero(&fl
,sizeof(struct file_lock
));
2071 fl
.filehandle
.n_len
= lock
->fh
.n_len
;
2072 fl
.filehandle
.n_bytes
= lock
->fh
.n_bytes
;
2074 copy_nlm4_lock_to_nlm4_holder(lock
, 0, &fl
.client
);
2076 err
= do_unlock(&fl
);
2080 debuglog("Exiting unlock...\n");
2085 /* cancel a blocked lock request */
2087 cancellock(nlm4_cancargs
*args
, const int flags __unused
)
2089 struct file_lock
*ifl
, *nfl
;
2094 debuglog("Entering cancellock...\n");
2099 * scan blocked lock list for matching request and remove/destroy
2101 ifl
= LIST_FIRST(&blockedlocklist_head
);
2102 for ( ; ifl
!= NULL
; ifl
= nfl
) {
2103 nfl
= LIST_NEXT(ifl
, nfslocklist
);
2105 /* compare lock fh - filehandle */
2106 if (!same_netobj(&args
->alock
.fh
, &ifl
->filehandle
))
2109 /* compare lock caller_name - client_name */
2110 if (strncmp(args
->alock
.caller_name
, ifl
->client_name
, SM_MAXSTRLEN
))
2113 /* Note: done't compare cookie - client_cookie */
2114 /* The cookie may be specific to the cancel request */
2115 /* and not be the same as the one in the original lock request. */
2117 /* compare lock oh - client.oh */
2118 if (!same_netobj(&args
->alock
.oh
, &ifl
->client
.oh
))
2121 /* compare lock svid - client.svid */
2122 if (args
->alock
.svid
!= ifl
->client
.svid
)
2125 /* compare lock l_offset - client.l_offset */
2126 if (args
->alock
.l_offset
!= ifl
->client
.l_offset
)
2129 /* compare lock l_len - client.l_len */
2130 if (args
->alock
.l_len
!= ifl
->client
.l_len
)
2133 /* compare exclusive - client.exclusive */
2134 if (args
->exclusive
!= ifl
->client
.exclusive
)
2138 remove_blockingfilelock(ifl
);
2139 deallocate_file_lock(ifl
);
2146 debuglog("Exiting cancellock...\n");
2153 * XXX: The following monitor/unmonitor routines
2154 * have not been extensively tested (ie. no regression
2155 * script exists like for the locking sections
2159 * Find a lock host on a queue. If found:
2161 * bump the access time,
2162 * dequeue it from the queue it was found on,
2163 * enqueue it at the front of the "in use" queue.
2166 get_lock_host(struct hostlst_head
*hd
, const char *hostname
)
2170 debuglog("get_lock_host %s\n", hostname
);
2171 TAILQ_FOREACH(ihp
, hd
, hostlst
) {
2172 if (strncmp(hostname
, ihp
->name
, SM_MAXSTRLEN
) == 0) {
2173 TAILQ_REMOVE(hd
, ihp
, hostlst
);
2174 /* Host is already monitored, bump refcount */
2176 ihp
->lastuse
= currsec
;
2177 /* Host should only be in the monitor list once */
2178 TAILQ_INSERT_HEAD(&hostlst_head
, ihp
, hostlst
);
2182 debuglog("get_lock_host %s %s\n",
2183 ihp
== NULL
? "did not find" : "found", hostname
);
2188 * monitor_lock_host: monitor lock hosts locally with a ref count and
2192 monitor_lock_host(const char *hostname
)
2194 struct host
*ihp
, *nhp
;
2196 struct sm_stat_res sres
;
2197 int rpcret
, statflag
;
2202 debuglog("monitor_lock_host: %s\n", hostname
);
2203 ihp
= get_lock_host(&hostlst_head
, hostname
);
2205 ihp
= get_lock_host(&hostlst_unref
, hostname
);
2207 debuglog("Monitor_lock_host: %s (cached)\n", hostname
);
2211 debuglog("Monitor_lock_host: %s (not found, creating)\n", hostname
);
2212 /* Host is not yet monitored, add it */
2213 nhp
= malloc(sizeof(struct host
));
2216 debuglog("Unable to allocate entry for statd mon\n");
2220 /* Allocated new host entry, now fill the fields */
2221 strncpy(nhp
->name
, hostname
, SM_MAXSTRLEN
);
2223 nhp
->lastuse
= currsec
;
2224 debuglog("Locally Monitoring host %16s\n",hostname
);
2226 debuglog("Attempting to tell statd\n");
2228 bzero(&smon
,sizeof(smon
));
2230 smon
.mon_id
.mon_name
= nhp
->name
;
2231 smon
.mon_id
.my_id
.my_name
= "localhost\0";
2233 smon
.mon_id
.my_id
.my_prog
= NLM_PROG
;
2234 smon
.mon_id
.my_id
.my_vers
= NLM_SM
;
2235 smon
.mon_id
.my_id
.my_proc
= NLM_SM_NOTIFY
;
2237 rpcret
= callrpc("localhost", SM_PROG
, SM_VERS
, SM_MON
, xdr_mon
,
2238 &smon
, xdr_sm_stat_res
, &sres
);
2241 if (sres
.res_stat
== stat_fail
) {
2242 debuglog("Statd call failed\n");
2248 debuglog("Rpc call to statd failed with return value: %d\n",
2253 if (statflag
== 1) {
2254 TAILQ_INSERT_HEAD(&hostlst_head
, nhp
, hostlst
);
2261 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2264 unmonitor_lock_host(const char *hostname
)
2268 TAILQ_FOREACH(ihp
, &hostlst_head
, hostlst
) {
2269 if (strncmp(hostname
, ihp
->name
, SM_MAXSTRLEN
) == 0) {
2270 /* Host is monitored, bump refcount */
2272 /* Host should only be in the monitor list once */
2278 debuglog("Could not find host %16s in mon list\n", hostname
);
2282 if (ihp
->refcnt
> 0)
2285 if (ihp
->refcnt
< 0) {
2286 debuglog("Negative refcount!: %d\n", ihp
->refcnt
);
2289 TAILQ_REMOVE(&hostlst_head
, ihp
, hostlst
);
2290 TAILQ_INSERT_HEAD(&hostlst_unref
, ihp
, hostlst
);
2291 if (host_expire
<= 0)
2292 destroy_lock_host(ihp
);
2296 destroy_lock_host(struct host
*ihp
)
2298 struct mon_id smon_id
;
2299 struct sm_stat smstat
;
2302 debuglog("Attempting to unmonitor host %16s\n", ihp
->name
);
2304 bzero(&smon_id
,sizeof(smon_id
));
2306 smon_id
.mon_name
= (char *)ihp
->name
;
2307 smon_id
.my_id
.my_name
= "localhost";
2308 smon_id
.my_id
.my_prog
= NLM_PROG
;
2309 smon_id
.my_id
.my_vers
= NLM_SM
;
2310 smon_id
.my_id
.my_proc
= NLM_SM_NOTIFY
;
2312 rpcret
= callrpc("localhost", SM_PROG
, SM_VERS
, SM_UNMON
, xdr_mon_id
,
2313 &smon_id
, xdr_sm_stat
, &smstat
);
2316 debuglog("Rpc call to unmonitor statd failed with "
2317 " return value: %d: %s", rpcret
, clnt_sperrno(rpcret
));
2319 debuglog("Succeeded unmonitoring %16s\n", ihp
->name
);
2322 TAILQ_REMOVE(&hostlst_unref
, ihp
, hostlst
);
2327 * returns 1 if there are hosts to expire or 0 if there are none.
2330 expire_lock_hosts(void)
2334 debuglog("expire_lock_hosts: called\n");
2336 ihp
= TAILQ_LAST(&hostlst_unref
, hostlst_head
);
2339 if (host_expire
> 0 && ihp
->lastuse
>= currsec
- host_expire
)
2341 debuglog("expire_lock_hosts: expiring %s %d %d %d\n",
2342 ihp
->name
, (int)ihp
->lastuse
,
2343 (int)currsec
, (int)currsec
- host_expire
);
2344 destroy_lock_host(ihp
);
2346 return (TAILQ_LAST(&hostlst_unref
, hostlst_head
) != NULL
);
2350 * notify: Clear all locks from a host if statd complains
2352 * XXX: This routine has not been thoroughly tested. However, neither
2353 * had the old one been. It used to compare the statd crash state counter
2354 * to the current lock state. The upshot of this was that it basically
2355 * cleared all locks from the specified host 99% of the time (with the
2356 * other 1% being a bug). Consequently, the assumption is that clearing
2357 * all locks from a host when notified by statd is acceptable.
2359 * Please note that this routine skips the usual level of redirection
2360 * through a do_* type routine. This introduces a possible level of
2361 * error and might better be written as do_notify and take this one out.
2366 notify(const char *hostname
, const int state
)
2368 debuglog("notify from %s, new state %d", hostname
, state
);
2374 debuglog("Leaving notify\n");
2378 send_granted(fl
, opcode
)
2379 struct file_lock
*fl
;
2380 int opcode __unused
;
2384 struct timeval timeo
;
2386 static struct nlm_res retval
;
2387 static struct nlm4_res retval4
;
2389 debuglog("About to send granted on blocked lock\n");
2391 cli
= get_client(fl
->addr
,
2392 (fl
->flags
& LOCK_V4
) ? NLM_VERS4
: NLM_VERS
);
2394 syslog(LOG_NOTICE
, "failed to get CLIENT for %s",
2397 * We fail to notify remote that the lock has been granted.
2398 * The client will timeout and retry, the lock will be
2399 * granted at this time.
2404 timeo
.tv_usec
= (fl
->flags
& LOCK_ASYNC
) ? 0 : 500000; /* 0.5s */
2406 fl
->granted_cookie
= ++send_granted_cookie
;
2407 if (!send_granted_cookie
)
2408 send_granted_cookie
++;
2410 if (fl
->flags
& LOCK_V4
) {
2411 static nlm4_testargs res
;
2412 res
.cookie
.n_len
= sizeof(fl
->granted_cookie
);
2413 res
.cookie
.n_bytes
= (char*)&fl
->granted_cookie
;
2414 res
.exclusive
= fl
->client
.exclusive
;
2415 res
.alock
.caller_name
= fl
->client_name
;
2416 res
.alock
.fh
.n_len
= fl
->filehandle
.n_len
;
2417 res
.alock
.fh
.n_bytes
= fl
->filehandle
.n_bytes
;
2418 res
.alock
.oh
= fl
->client
.oh
;
2419 res
.alock
.svid
= fl
->client
.svid
;
2420 res
.alock
.l_offset
= fl
->client
.l_offset
;
2421 res
.alock
.l_len
= fl
->client
.l_len
;
2422 debuglog("sending v4 reply%s",
2423 (fl
->flags
& LOCK_ASYNC
) ? " (async)":"");
2424 if (fl
->flags
& LOCK_ASYNC
) {
2425 rv
= clnt_call(cli
, NLM4_GRANTED_MSG
,
2426 xdr_nlm4_testargs
, &res
, xdr_void
, &dummy
, timeo
);
2428 rv
= clnt_call(cli
, NLM4_GRANTED
,
2429 xdr_nlm4_testargs
, &res
, xdr_nlm4_res
,
2433 static nlm_testargs res
;
2435 res
.cookie
.n_len
= sizeof(fl
->granted_cookie
);
2436 res
.cookie
.n_bytes
= (char*)&fl
->granted_cookie
;
2437 res
.exclusive
= fl
->client
.exclusive
;
2438 res
.alock
.caller_name
= fl
->client_name
;
2439 res
.alock
.fh
.n_len
= fl
->filehandle
.n_len
;
2440 res
.alock
.fh
.n_bytes
= fl
->filehandle
.n_bytes
;
2441 res
.alock
.oh
= fl
->client
.oh
;
2442 res
.alock
.svid
= fl
->client
.svid
;
2443 res
.alock
.l_offset
= fl
->client
.l_offset
;
2444 res
.alock
.l_len
= fl
->client
.l_len
;
2445 debuglog("sending v1 reply%s",
2446 (fl
->flags
& LOCK_ASYNC
) ? " (async)":"");
2447 if (fl
->flags
& LOCK_ASYNC
) {
2448 rv
= clnt_call(cli
, NLM_GRANTED_MSG
,
2449 xdr_nlm_testargs
, &res
, xdr_void
, &dummy
, timeo
);
2451 rv
= clnt_call(cli
, NLM_GRANTED
,
2452 xdr_nlm_testargs
, &res
, xdr_nlm_res
,
2456 if (debug_level
> 2)
2457 debuglog("clnt_call returns %d(%s) for granted",
2458 rv
, clnt_sperrno(rv
));
2460 if ((rv
!= RPC_SUCCESS
) &&
2461 !((fl
->flags
& LOCK_ASYNC
) && (rv
== RPC_TIMEDOUT
)))
2467 * granted_failed: remove a granted lock that wasn't successfully
2468 * accepted by the client
2471 granted_failed(nlm4_res
*arg
)
2474 struct file_lock
*ifl
;
2476 debuglog("Entering granted_failed, status %d\n", arg
->stat
.stat
);
2478 if (arg
->cookie
.n_len
!= sizeof(cookie
)) {
2479 debuglog("Exiting granted_failed: bogus cookie size %d\n",
2483 bcopy(arg
->cookie
.n_bytes
, &cookie
, sizeof(cookie
));
2484 debuglog("granted_failed, cookie 0x%llx\n", cookie
);
2486 LIST_FOREACH(ifl
, &nfslocklist_head
, nfslocklist
) {
2487 debuglog("Pointer to file lock: %p\n",ifl
);
2489 debuglog("****Dump of ifl****\n");
2492 if (ifl
->granted_cookie
!= cookie
)
2495 debuglog("granted_failed: cookie found\n");
2501 /* ifl is NO LONGER VALID AT THIS POINT */
2503 debuglog("granted_failed: cookie NOT FOUND\n");
2506 debuglog("Exiting granted_failed\n");
2510 * getshare: try to acquire a share reservation
2513 getshare(nlm_shareargs
*shrarg
, struct svc_req
*rqstp
, const int flags
)
2515 struct sharefile
*shrfile
;
2516 struct file_share
*sh
;
2518 debuglog("Entering getshare...\n");
2520 if (grace_expired
== 0 && shrarg
->reclaim
== 0) {
2521 debuglog("getshare denied - grace period\n");
2522 return (flags
& LOCK_V4
) ?
2523 nlm4_denied_grace_period
:
2524 nlm_denied_grace_period
;
2527 /* find file in list of share files */
2528 LIST_FOREACH(shrfile
, &nfssharefilelist_head
, sharefilelist
) {
2529 if ((shrarg
->share
.fh
.n_len
== shrfile
->filehandle
.n_len
) &&
2530 (bcmp(shrarg
->share
.fh
.n_bytes
, shrfile
->filehandle
.n_bytes
,
2531 shrarg
->share
.fh
.n_len
) == 0)) {
2532 /* shrfile is the correct file */
2537 /* if share file not found, create a new share file */
2540 fd
= fhopen((fhandle_t
*)shrarg
->share
.fh
.n_bytes
, O_RDONLY
);
2542 debuglog("fhopen failed (from %16s): %32s\n",
2543 shrarg
->share
.caller_name
, strerror(errno
));
2544 if ((flags
& LOCK_V4
) == 0)
2548 return nlm4_stale_fh
;
2553 shrfile
= malloc(sizeof(struct sharefile
));
2555 debuglog("getshare failed: can't allocate sharefile\n");
2557 return (flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
2559 shrfile
->filehandle
.n_len
= shrarg
->share
.fh
.n_len
;
2560 shrfile
->filehandle
.n_bytes
= malloc(shrarg
->share
.fh
.n_len
);
2561 if (!shrfile
->filehandle
.n_bytes
) {
2562 debuglog("getshare failed: can't allocate sharefile filehandle\n");
2565 return (flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
2567 bcopy(shrarg
->share
.fh
.n_bytes
, shrfile
->filehandle
.n_bytes
,
2568 shrarg
->share
.fh
.n_len
);
2570 shrfile
->refcount
= 0;
2571 shrfile
->sharelist_head
.lh_first
= NULL
;
2572 LIST_INSERT_HEAD(&nfssharefilelist_head
, shrfile
, sharefilelist
);
2575 /* compare request mode/access to current shares */
2576 LIST_FOREACH(sh
, &shrfile
->sharelist_head
, nfssharelist
) {
2577 /* if request host/owner matches a current share... */
2578 if ((strncmp(shrarg
->share
.caller_name
, sh
->client_name
, SM_MAXSTRLEN
) == 0) &&
2579 same_netobj(&shrarg
->share
.oh
, &sh
->oh
)) {
2580 /* ...then just update share mode/access */
2581 sh
->mode
= shrarg
->share
.mode
;
2582 sh
->access
= shrarg
->share
.access
;
2583 debuglog("getshare: updated existing share\n");
2586 if (((shrarg
->share
.mode
& sh
->access
) != 0) ||
2587 ((shrarg
->share
.access
& sh
->mode
) != 0)) {
2588 /* share request conflicts with existing share */
2589 debuglog("getshare: conflicts with existing share\n");
2594 /* create/init new share */
2595 sh
= malloc(sizeof(struct file_share
));
2597 debuglog("getshare failed: can't allocate share\n");
2598 if (!shrfile
->refcount
) {
2599 LIST_REMOVE(shrfile
, sharefilelist
);
2601 free(shrfile
->filehandle
.n_bytes
);
2604 return (flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
2606 sh
->oh
.n_len
= shrarg
->share
.oh
.n_len
;
2607 sh
->oh
.n_bytes
= malloc(sh
->oh
.n_len
);
2608 if (!sh
->oh
.n_bytes
) {
2609 debuglog("getshare failed: can't allocate share owner handle\n");
2611 if (!shrfile
->refcount
) {
2612 LIST_REMOVE(shrfile
, sharefilelist
);
2614 free(shrfile
->filehandle
.n_bytes
);
2617 return (flags
& LOCK_V4
) ? nlm4_failed
: nlm_denied
;
2619 strncpy(sh
->client_name
, shrarg
->share
.caller_name
, SM_MAXSTRLEN
);
2620 sh
->mode
= shrarg
->share
.mode
;
2621 sh
->access
= shrarg
->share
.access
;
2623 /* insert new share into file's share list */
2624 LIST_INSERT_HEAD(&shrfile
->sharelist_head
, sh
, nfssharelist
);
2625 shrfile
->refcount
++;
2627 debuglog("Exiting getshare...\n");
2633 /* remove a share reservation */
2635 unshare(nlm_shareargs
*shrarg
, struct svc_req
*rqstp
)
2637 struct sharefile
*shrfile
;
2638 struct file_share
*sh
;
2640 debuglog("Entering unshare...\n");
2642 /* find file in list of share files */
2643 LIST_FOREACH(shrfile
, &nfssharefilelist_head
, sharefilelist
) {
2644 if ((shrarg
->share
.fh
.n_len
== shrfile
->filehandle
.n_len
) &&
2645 (bcmp(shrarg
->share
.fh
.n_bytes
, shrfile
->filehandle
.n_bytes
,
2646 shrarg
->share
.fh
.n_len
) == 0)) {
2647 /* shrfile is the correct file */
2652 /* if share file not found, return success (per spec) */
2654 debuglog("unshare: no such share file\n");
2659 LIST_FOREACH(sh
, &shrfile
->sharelist_head
, nfssharelist
) {
2660 /* if request host/owner matches a current share... */
2661 if ((strncmp(shrarg
->share
.caller_name
, sh
->client_name
, SM_MAXSTRLEN
) == 0) &&
2662 same_netobj(&shrarg
->share
.oh
, &sh
->oh
))
2666 /* if share not found, return success (per spec) */
2668 debuglog("unshare: no such share\n");
2672 /* remove share from file and deallocate */
2673 shrfile
->refcount
--;
2674 LIST_REMOVE(sh
, nfssharelist
);
2675 free(sh
->oh
.n_bytes
);
2678 /* if file has no more shares, deallocate share file */
2679 if (!shrfile
->refcount
) {
2680 debuglog("unshare: file has no more shares\n");
2681 LIST_REMOVE(shrfile
, sharefilelist
);
2683 free(shrfile
->filehandle
.n_bytes
);
2687 debuglog("Exiting unshare...\n");
2695 * Wipe out all non-monitored locks and shares held by a host.
2699 do_free_all(const char *hostname
)
2701 struct file_lock
*ifl
, *nfl
;
2702 struct sharefile
*shrfile
, *nshrfile
;
2703 struct file_share
*ifs
, *nfs
;
2705 /* clear non-monitored blocking file locks */
2706 ifl
= LIST_FIRST(&blockedlocklist_head
);
2707 while (ifl
!= NULL
) {
2708 nfl
= LIST_NEXT(ifl
, nfslocklist
);
2710 if (((ifl
->flags
& LOCK_MON
) == 0) &&
2711 (strncmp(hostname
, ifl
->client_name
, SM_MAXSTRLEN
) == 0)) {
2712 remove_blockingfilelock(ifl
);
2713 deallocate_file_lock(ifl
);
2719 /* clear non-monitored file locks */
2720 ifl
= LIST_FIRST(&nfslocklist_head
);
2721 while (ifl
!= NULL
) {
2722 nfl
= LIST_NEXT(ifl
, nfslocklist
);
2724 if (((ifl
->flags
& LOCK_MON
) == 0) &&
2725 (strncmp(hostname
, ifl
->client_name
, SM_MAXSTRLEN
) == 0)) {
2726 /* Unlock destroys ifl out from underneath */
2727 unlock_partialfilelock(ifl
);
2728 /* ifl is NO LONGER VALID AT THIS POINT */
2735 shrfile
= LIST_FIRST(&nfssharefilelist_head
);
2736 while (shrfile
!= NULL
) {
2737 nshrfile
= LIST_NEXT(shrfile
, sharefilelist
);
2739 ifs
= LIST_FIRST(&shrfile
->sharelist_head
);
2740 while (ifs
!= NULL
) {
2741 nfs
= LIST_NEXT(ifs
, nfssharelist
);
2743 if (strncmp(hostname
, ifs
->client_name
, SM_MAXSTRLEN
) == 0) {
2744 shrfile
->refcount
--;
2745 LIST_REMOVE(ifs
, nfssharelist
);
2746 free(ifs
->oh
.n_bytes
);
2753 if (!shrfile
->refcount
) {
2754 LIST_REMOVE(shrfile
, sharefilelist
);
2756 free(shrfile
->filehandle
.n_bytes
);
2768 * Routines below here have not been modified in the overhaul
2772 * Are these two routines still required since lockd is not spawning off
2773 * children to service locks anymore? Presumably they were originally
2774 * put in place to prevent a one child from changing the lock list out
2775 * from under another one.
2783 sigemptyset(&block
);
2784 sigaddset(&block
, SIGCHLD
);
2786 if (sigprocmask(SIG_BLOCK
, &block
, NULL
) < 0) {
2787 syslog(LOG_WARNING
, "siglock failed: %s", strerror(errno
));
2796 sigemptyset(&block
);
2797 sigaddset(&block
, SIGCHLD
);
2799 if (sigprocmask(SIG_UNBLOCK
, &block
, NULL
) < 0) {
2800 syslog(LOG_WARNING
, "sigunlock failed: %s", strerror(errno
));