]> git.saurik.com Git - apple/network_cmds.git/blame - rpc_lockd.tproj/lockd_lock.c
network_cmds-176.3.1.tar.gz
[apple/network_cmds.git] / rpc_lockd.tproj / lockd_lock.c
CommitLineData
ac2f15b3
A
1/* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */
2/* $FreeBSD: src/usr.sbin/rpc.lockd/lockd_lock.c,v 1.10 2002/03/22 19:57:09 alfred Exp $ */
3
4/*
5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6 * Copyright (c) 2000 Manuel Bouyer.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38#define LOCKD_DEBUG
39
40#include <stdio.h>
41#ifdef LOCKD_DEBUG
42#include <stdarg.h>
43#endif
44#include <stdlib.h>
45#include <unistd.h>
46#include <fcntl.h>
47#include <syslog.h>
48#include <errno.h>
49#include <string.h>
50#include <signal.h>
51#include <rpc/rpc.h>
52#include <sys/types.h>
53#include <sys/stat.h>
54#include <sys/socket.h>
55#include <sys/param.h>
56#include <sys/mount.h>
57#include <sys/wait.h>
58#include <rpcsvc/sm_inter.h>
59#include <rpcsvc/nlm_prot.h>
60
61#include "lockd.h"
62#include "lockd_lock.h"
63
64#define MAXOBJECTSIZE 64
65#define MAXBUFFERSIZE 1024
66
67/*
68 * SM_MAXSTRLEN is usually 1024. This means that lock requests and
69 * host name monitoring entries are *MUCH* larger than they should be
70 */
71
72/*
73 * A set of utilities for managing file locking
74 *
75 * XXX: All locks are in a linked list, a better structure should be used
76 * to improve search/access effeciency.
77 */
78
79/* struct describing a lock */
80struct file_lock {
81 LIST_ENTRY(file_lock) nfslocklist;
82 netobj filehandle; /* NFS filehandle */
83 struct sockaddr *addr;
84 struct nlm4_holder client; /* lock holder */
7902cf7e 85 u_int64_t granted_cookie;
ac2f15b3
A
86 char client_name[SM_MAXSTRLEN];
87 int nsm_status; /* status from the remote lock manager */
88 int status; /* lock status, see below */
89 int flags; /* lock flags, see lockd_lock.h */
90 int blocking; /* blocking lock or not */
91 pid_t locker; /* pid of the child process trying to get the lock */
92 int fd; /* file descriptor for this lock */
93};
94
95LIST_HEAD(nfslocklist_head, file_lock);
96struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
97
98LIST_HEAD(blockedlocklist_head, file_lock);
99struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
100
101/* struct describing a share reservation */
102struct file_share {
103 LIST_ENTRY(file_share) nfssharelist;
104 netobj oh; /* share holder */
105 char client_name[SM_MAXSTRLEN];
106 short mode;
107 short access;
108};
109LIST_HEAD(nfssharelist_head, file_share);
110
111/* Struct describing a file with share reservations */
112struct sharefile {
113 LIST_ENTRY(sharefile) sharefilelist;
114 netobj filehandle; /* Local access filehandle */
115 int fd; /* file descriptor: remains open until no more shares */
116 int refcount;
117 struct nfssharelist_head sharelist_head;
118};
119LIST_HEAD(nfssharefilelist_head, sharefile);
120struct nfssharefilelist_head nfssharefilelist_head = LIST_HEAD_INITIALIZER(nfssharefilelist_head);
121
122/* lock status */
123#define LKST_LOCKED 1 /* lock is locked */
124/* XXX: Is this flag file specific or lock specific? */
125#define LKST_WAITING 2 /* file is already locked by another host */
126#define LKST_PROCESSING 3 /* child is trying to aquire the lock */
127#define LKST_DYING 4 /* must dies when we get news from the child */
128
129/* struct describing a monitored host */
130struct host {
7902cf7e 131 TAILQ_ENTRY(host) hostlst;
ac2f15b3
A
132 char name[SM_MAXSTRLEN];
133 int refcnt;
7902cf7e 134 time_t lastuse;
ac2f15b3
A
135};
136/* list of hosts we monitor */
7902cf7e
A
137TAILQ_HEAD(hostlst_head, host);
138struct hostlst_head hostlst_head = TAILQ_HEAD_INITIALIZER(hostlst_head);
139struct hostlst_head hostlst_unref = TAILQ_HEAD_INITIALIZER(hostlst_unref);
140
141int host_expire = 60; /* seconds */
142time_t currsec;
143u_int64_t send_granted_cookie = 0;
ac2f15b3
A
144
145/*
146 * File monitoring handlers
147 * XXX: These might be able to be removed when kevent support
148 * is placed into the hardware lock/unlock routines. (ie.
149 * let the kernel do all the file monitoring)
150 */
151
152/* Struct describing a monitored file */
153struct monfile {
154 LIST_ENTRY(monfile) monfilelist;
155 netobj filehandle; /* Local access filehandle */
156 int fd; /* file descriptor: remains open until unlock! */
157 int refcount;
158 int exclusive;
159};
160
161/* List of files we monitor */
162LIST_HEAD(monfilelist_head, monfile);
163struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
164
165static int debugdelay = 0;
166
167enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
168 NFS_DENIED, NFS_DENIED_NOLOCK,
169 NFS_RESERR };
170
171enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
172 HW_DENIED, HW_DENIED_NOLOCK,
173 HW_STALEFH, HW_READONLY, HW_RESERR };
174
175enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
176 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
177 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
178
179enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
180enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
181/* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */
182enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
183
184enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
185
7902cf7e 186int send_granted(struct file_lock *fl, int opcode);
ac2f15b3
A
187void siglock(void);
188void sigunlock(void);
7902cf7e 189void destroy_lock_host(struct host *ihp);
ac2f15b3
A
190void monitor_lock_host(const char *hostname);
191void unmonitor_lock_host(const char *hostname);
192
193void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
194 const bool_t exclusive, struct nlm4_holder *dest);
195struct file_lock * allocate_file_lock(const netobj *lockowner,
7902cf7e 196 const netobj *filehandle);
ac2f15b3
A
197void deallocate_file_lock(struct file_lock *fl);
198void fill_file_lock(struct file_lock *fl,
199 struct sockaddr *addr, const bool_t exclusive, const int32_t svid,
200 const u_int64_t offset, const u_int64_t len, const char *caller_name,
201 const int state, const int status, const int flags, const int blocking);
202int regions_overlap(const u_int64_t start1, const u_int64_t len1,
203 const u_int64_t start2, const u_int64_t len2);;
204enum split_status region_compare(const u_int64_t starte, const u_int64_t lene,
205 const u_int64_t startu, const u_int64_t lenu,
206 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
207int same_netobj(const netobj *n0, const netobj *n1);
208int same_filelock_identity(const struct file_lock *fl0,
209 const struct file_lock *fl2);
210
211static void debuglog(char const *fmt, ...);
212void dump_static_object(const unsigned char* object, const int sizeof_object,
213 unsigned char* hbuff, const int sizeof_hbuff,
214 unsigned char* cbuff, const int sizeof_cbuff);
215void dump_netobj(const struct netobj *nobj);
216void dump_filelock(const struct file_lock *fl);
217struct file_lock * get_lock_matching_unlock(const struct file_lock *fl);
218enum nfslock_status test_nfslock(const struct file_lock *fl,
219 struct file_lock **conflicting_fl);
220enum nfslock_status lock_nfslock(struct file_lock *fl);
221enum nfslock_status delete_nfslock(struct file_lock *fl);
222enum nfslock_status unlock_nfslock(const struct file_lock *fl,
223 struct file_lock **released_lock, struct file_lock **left_lock,
224 struct file_lock **right_lock);
225enum hwlock_status lock_hwlock(struct file_lock *fl);
226enum split_status split_nfslock(const struct file_lock *exist_lock,
227 const struct file_lock *unlock_lock, struct file_lock **left_lock,
228 struct file_lock **right_lock);
229void add_blockingfilelock(struct file_lock *fl);
230enum hwlock_status unlock_hwlock(const struct file_lock *fl);
231enum hwlock_status test_hwlock(const struct file_lock *fl,
232 struct file_lock **conflicting_fl);
233void remove_blockingfilelock(struct file_lock *fl);
234void clear_blockingfilelock(const char *hostname);
7902cf7e 235void retry_blockingfilelocklist(netobj *fh);
ac2f15b3
A
236enum partialfilelock_status unlock_partialfilelock(
237 const struct file_lock *fl);
238void clear_partialfilelock(const char *hostname);
239enum partialfilelock_status test_partialfilelock(
240 const struct file_lock *fl, struct file_lock **conflicting_fl);
241enum nlm_stats do_test(struct file_lock *fl,
242 struct file_lock **conflicting_fl);
243enum nlm_stats do_unlock(struct file_lock *fl);
244enum nlm_stats do_lock(struct file_lock *fl);
245void do_clear(const char *hostname);
246
247
248void
249debuglog(char const *fmt, ...)
250{
251 va_list ap;
252
253 if (debug_level < 1) {
254 return;
255 }
256
257 sleep(debugdelay);
258
259 va_start(ap, fmt);
260 vsyslog(LOG_DEBUG, fmt, ap);
261 va_end(ap);
262}
263
264void
265dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
266 const unsigned char *object;
267 const int size_object;
268 unsigned char *hbuff;
269 const int size_hbuff;
270 unsigned char *cbuff;
271 const int size_cbuff;
272{
273 int i, objectsize;
274
275 if (debug_level < 2) {
276 return;
277 }
278
279 objectsize = size_object;
280
281 if (objectsize == 0) {
282 debuglog("object is size 0\n");
283 } else {
284 if (objectsize > MAXOBJECTSIZE) {
285 debuglog("Object of size %d being clamped"
286 "to size %d\n", objectsize, MAXOBJECTSIZE);
287 objectsize = MAXOBJECTSIZE;
288 }
289
290 if (hbuff != NULL) {
291 if (size_hbuff < objectsize*2+1) {
292 debuglog("Hbuff not large enough."
293 " Increase size\n");
294 } else {
295 for(i=0;i<objectsize;i++) {
296 sprintf(hbuff+i*2,"%02x",*(object+i));
297 }
298 *(hbuff+i*2) = '\0';
299 }
300 }
301
302 if (cbuff != NULL) {
303 if (size_cbuff < objectsize+1) {
304 debuglog("Cbuff not large enough."
305 " Increase Size\n");
306 }
307
308 for(i=0;i<objectsize;i++) {
309 if (*(object+i) >= 32 && *(object+i) <= 127) {
310 *(cbuff+i) = *(object+i);
311 } else {
312 *(cbuff+i) = '.';
313 }
314 }
315 *(cbuff+i) = '\0';
316 }
317 }
318}
319
320void
321dump_netobj(const struct netobj *nobj)
322{
323 char hbuff[MAXBUFFERSIZE*2];
324 char cbuff[MAXBUFFERSIZE];
325
326 if (debug_level < 2) {
327 return;
328 }
329
330 if (nobj == NULL) {
331 debuglog("Null netobj pointer\n");
332 }
333 else if (nobj->n_len == 0) {
334 debuglog("Size zero netobj\n");
335 } else {
336 dump_static_object(nobj->n_bytes, nobj->n_len,
337 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
338 debuglog("netobj: len: %d data: %s ::: %s\n",
339 nobj->n_len, hbuff, cbuff);
340 }
341}
342
343/* #define DUMP_FILELOCK_VERBOSE */
344void
345dump_filelock(const struct file_lock *fl)
346{
347#ifdef DUMP_FILELOCK_VERBOSE
348 char hbuff[MAXBUFFERSIZE*2];
349 char cbuff[MAXBUFFERSIZE];
350#endif
351
352 if (debug_level < 2) {
353 return;
354 }
355
356 if (fl != NULL) {
357 debuglog("Dumping file lock structure @ %p\n", fl);
358
359#ifdef DUMP_FILELOCK_VERBOSE
360 dump_static_object((unsigned char *)&fl->filehandle.n_bytes,
361 fl->filehandle.n_len, hbuff, sizeof(hbuff),
362 cbuff, sizeof(cbuff));
363 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff);
364#endif
365
366 debuglog("Dumping nlm4_holder:\n"
367 "exc: %x svid: %x offset:len %llx:%llx\n",
368 fl->client.exclusive, fl->client.svid,
369 fl->client.l_offset, fl->client.l_len);
370
371#ifdef DUMP_FILELOCK_VERBOSE
372 debuglog("Dumping client identity:\n");
373 dump_netobj(&fl->client.oh);
374
ac2f15b3
A
375 debuglog("nsm: %d status: %d flags: %d locker: %d"
376 " fd: %d\n", fl->nsm_status, fl->status,
377 fl->flags, fl->locker, fl->fd);
378#endif
379 } else {
380 debuglog("NULL file lock structure\n");
381 }
382}
383
384void
385copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
386 const struct nlm4_lock *src;
387 const bool_t exclusive;
388 struct nlm4_holder *dest;
389{
390
391 dest->exclusive = exclusive;
392 dest->oh.n_len = src->oh.n_len;
393 dest->oh.n_bytes = src->oh.n_bytes;
394 dest->svid = src->svid;
395 dest->l_offset = src->l_offset;
396 dest->l_len = src->l_len;
397}
398
399
400/*
401 * allocate_file_lock: Create a lock with the given parameters
402 */
403
404struct file_lock *
7902cf7e 405allocate_file_lock(const netobj *lockowner, const netobj *filehandle)
ac2f15b3
A
406{
407 struct file_lock *newfl;
408
409 newfl = malloc(sizeof(struct file_lock));
410 if (newfl == NULL) {
411 return NULL;
412 }
413 bzero(newfl, sizeof(newfl));
414
415 newfl->client.oh.n_bytes = malloc(lockowner->n_len);
416 if (newfl->client.oh.n_bytes == NULL) {
417 free(newfl);
418 return NULL;
419 }
420 newfl->client.oh.n_len = lockowner->n_len;
421 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
422
ac2f15b3
A
423 newfl->filehandle.n_bytes = malloc(filehandle->n_len);
424 if (newfl->filehandle.n_bytes == NULL) {
ac2f15b3
A
425 free(newfl->client.oh.n_bytes);
426 free(newfl);
427 return NULL;
428 }
429 newfl->filehandle.n_len = filehandle->n_len;
430 bcopy(filehandle->n_bytes, newfl->filehandle.n_bytes, filehandle->n_len);
431
432 return newfl;
433}
434
435/*
436 * file_file_lock: Force creation of a valid file lock
437 */
438void
439fill_file_lock(struct file_lock *fl,
440 struct sockaddr *addr, const bool_t exclusive, const int32_t svid,
441 const u_int64_t offset, const u_int64_t len, const char *caller_name,
442 const int state, const int status, const int flags, const int blocking)
443{
444 fl->addr = addr;
445
446 fl->client.exclusive = exclusive;
447 fl->client.svid = svid;
448 fl->client.l_offset = offset;
449 fl->client.l_len = len;
450
451 strncpy(fl->client_name, caller_name, SM_MAXSTRLEN);
452
453 fl->nsm_status = state;
454 fl->status = status;
455 fl->flags = flags;
456 fl->blocking = blocking;
457}
458
459/*
460 * deallocate_file_lock: Free all storage associated with a file lock
461 */
462void
463deallocate_file_lock(struct file_lock *fl)
464{
465 free(fl->client.oh.n_bytes);
ac2f15b3
A
466 free(fl->filehandle.n_bytes);
467 free(fl);
468}
469
470/*
471 * regions_overlap(): This function examines the two provided regions for
472 * overlap.
473 */
474int
475regions_overlap(start1, len1, start2, len2)
476 const u_int64_t start1, len1, start2, len2;
477{
478 u_int64_t d1,d2,d3,d4;
479 enum split_status result;
480
481 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
482 start1, len1, start2, len2);
483
484 result = region_compare(start1, len1, start2, len2,
485 &d1, &d2, &d3, &d4);
486
487 debuglog("Exiting region overlap with val: %d\n",result);
488
489 if (result == SPL_DISJOINT) {
490 return 0;
491 } else {
492 return 1;
493 }
494
495 return (result);
496}
497
498/*
499 * region_compare(): Examine lock regions and split appropriately
500 *
501 * XXX: Fix 64 bit overflow problems
502 * XXX: Check to make sure I got *ALL* the cases.
503 * XXX: This DESPERATELY needs a regression test.
504 */
505enum split_status
506region_compare(starte, lene, startu, lenu,
507 start1, len1, start2, len2)
508 const u_int64_t starte, lene, startu, lenu;
509 u_int64_t *start1, *len1, *start2, *len2;
510{
511 /*
512 * Please pay attention to the sequential exclusions
513 * of the if statements!!!
514 */
515 enum LFLAGS lflags;
516 enum RFLAGS rflags;
517 enum split_status retval;
518
519 retval = SPL_DISJOINT;
520
521 if (lene == 0 && lenu == 0) {
522 /* Examine left edge of locker */
523 if (startu < starte) {
524 lflags = LEDGE_LEFT;
525 } else if (startu == starte) {
526 lflags = LEDGE_LBOUNDARY;
527 } else {
528 lflags = LEDGE_INSIDE;
529 }
530
531 rflags = REDGE_RBOUNDARY; /* Both are infiinite */
532
533 if (lflags == LEDGE_INSIDE) {
534 *start1 = starte;
535 *len1 = startu - starte;
536 }
537
538 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
539 retval = SPL_CONTAINED;
540 } else {
541 retval = SPL_LOCK1;
542 }
543 } else if (lene == 0 && lenu != 0) {
544 /* Established lock is infinite */
545 /* Examine left edge of unlocker */
546 if (startu < starte) {
547 lflags = LEDGE_LEFT;
548 } else if (startu == starte) {
549 lflags = LEDGE_LBOUNDARY;
550 } else if (startu > starte) {
551 lflags = LEDGE_INSIDE;
552 }
553
554 /* Examine right edge of unlocker */
555 if (startu + lenu < starte) {
556 /* Right edge of unlocker left of established lock */
557 rflags = REDGE_LEFT;
558 return SPL_DISJOINT;
559 } else if (startu + lenu == starte) {
560 /* Right edge of unlocker on start of established lock */
561 rflags = REDGE_LBOUNDARY;
562 return SPL_DISJOINT;
563 } else { /* Infinifty is right of finity */
564 /* Right edge of unlocker inside established lock */
565 rflags = REDGE_INSIDE;
566 }
567
568 if (lflags == LEDGE_INSIDE) {
569 *start1 = starte;
570 *len1 = startu - starte;
571 retval |= SPL_LOCK1;
572 }
573
574 if (rflags == REDGE_INSIDE) {
575 /* Create right lock */
576 *start2 = startu+lenu;
577 *len2 = 0;
578 retval |= SPL_LOCK2;
579 }
580 } else if (lene != 0 && lenu == 0) {
581 /* Unlocker is infinite */
582 /* Examine left edge of unlocker */
583 if (startu < starte) {
584 lflags = LEDGE_LEFT;
585 retval = SPL_CONTAINED;
586 return retval;
587 } else if (startu == starte) {
588 lflags = LEDGE_LBOUNDARY;
589 retval = SPL_CONTAINED;
590 return retval;
591 } else if ((startu > starte) && (startu < starte + lene - 1)) {
592 lflags = LEDGE_INSIDE;
593 } else if (startu == starte + lene - 1) {
594 lflags = LEDGE_RBOUNDARY;
595 } else { /* startu > starte + lene -1 */
596 lflags = LEDGE_RIGHT;
597 return SPL_DISJOINT;
598 }
599
600 rflags = REDGE_RIGHT; /* Infinity is right of finity */
601
602 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
603 *start1 = starte;
604 *len1 = startu - starte;
605 retval |= SPL_LOCK1;
606 return retval;
607 }
608
609 } else {
610 /* Both locks are finite */
611
612 /* Examine left edge of unlocker */
613 if (startu < starte) {
614 lflags = LEDGE_LEFT;
615 } else if (startu == starte) {
616 lflags = LEDGE_LBOUNDARY;
617 } else if ((startu > starte) && (startu < starte + lene - 1)) {
618 lflags = LEDGE_INSIDE;
619 } else if (startu == starte + lene - 1) {
620 lflags = LEDGE_RBOUNDARY;
621 } else { /* startu > starte + lene -1 */
622 lflags = LEDGE_RIGHT;
623 return SPL_DISJOINT;
624 }
625
626 /* Examine right edge of unlocker */
627 if (startu + lenu < starte) {
628 /* Right edge of unlocker left of established lock */
629 rflags = REDGE_LEFT;
630 return SPL_DISJOINT;
631 } else if (startu + lenu == starte) {
632 /* Right edge of unlocker on start of established lock */
633 rflags = REDGE_LBOUNDARY;
634 return SPL_DISJOINT;
635 } else if (startu + lenu < starte + lene) {
636 /* Right edge of unlocker inside established lock */
637 rflags = REDGE_INSIDE;
638 } else if (startu + lenu == starte + lene) {
639 /* Right edge of unlocker on right edge of established lock */
640 rflags = REDGE_RBOUNDARY;
641 } else { /* startu + lenu > starte + lene */
642 /* Right edge of unlocker is right of established lock */
643 rflags = REDGE_RIGHT;
644 }
645
646 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
647 /* Create left lock */
648 *start1 = starte;
649 *len1 = (startu - starte);
650 retval |= SPL_LOCK1;
651 }
652
653 if (rflags == REDGE_INSIDE) {
654 /* Create right lock */
655 *start2 = startu+lenu;
656 *len2 = starte+lene-(startu+lenu);
657 retval |= SPL_LOCK2;
658 }
659
660 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
661 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
662 retval = SPL_CONTAINED;
663 }
664 }
665
666 return retval;
667}
668
669/*
670 * same_netobj: Compares the apprpriate bits of a netobj for identity
671 */
672int
673same_netobj(const netobj *n0, const netobj *n1)
674{
675 int retval;
676
677 retval = 0;
678
679 debuglog("Entering netobj identity check\n");
680
681 if (n0->n_len == n1->n_len) {
682 debuglog("Preliminary length check passed\n");
683 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
684 debuglog("netobj %smatch\n", retval ? "" : "mis");
685 }
686
687 return (retval);
688}
689
690/*
691 * same_filelock_identity: Compares the appropriate bits of a file_lock
692 */
693int
694same_filelock_identity(fl0, fl1)
695 const struct file_lock *fl0, *fl1;
696{
697 int retval;
698
699 retval = 0;
700
701 debuglog("Checking filelock identity\n");
702
703 /*
704 * Check process ids and host information.
705 */
706 retval = (fl0->client.svid == fl1->client.svid &&
707 same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
708
709 debuglog("Exiting checking filelock identity: retval: %d\n",retval);
710
711 return (retval);
712}
713
714/*
715 * Below here are routines associated with manipulating the NFS
716 * lock list.
717 */
718
719/*
720 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
7902cf7e 721 * or NULL otherwise
ac2f15b3
A
722 * XXX: It is a shame that this duplicates so much code from test_nfslock.
723 */
724struct file_lock *
725get_lock_matching_unlock(const struct file_lock *fl)
726{
727 struct file_lock *ifl; /* Iterator */
728
729 debuglog("Entering lock_matching_unlock\n");
730 debuglog("********Dump of fl*****************\n");
731 dump_filelock(fl);
732
733 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
734 debuglog("Pointer to file lock: %p\n",ifl);
735
736 debuglog("****Dump of ifl****\n");
737 dump_filelock(ifl);
738 debuglog("*******************\n");
739
740 /*
741 * XXX: It is conceivable that someone could use the NLM RPC
742 * system to directly access filehandles. This may be a
743 * security hazard as the filehandle code may bypass normal
744 * file access controls
745 */
746 if (fl->filehandle.n_len != ifl->filehandle.n_len)
747 continue;
748 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
749 fl->filehandle.n_len))
750 continue;
751
752 debuglog("matching_unlock: Filehandles match, "
753 "checking regions\n");
754
755 /* Filehandles match, check for region overlap */
756 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
757 ifl->client.l_offset, ifl->client.l_len))
758 continue;
759
760 debuglog("matching_unlock: Region overlap"
761 " found %llu : %llu -- %llu : %llu\n",
762 fl->client.l_offset,fl->client.l_len,
763 ifl->client.l_offset,ifl->client.l_len);
764
765 /* Regions overlap, check the identity */
766 if (!same_filelock_identity(fl,ifl))
767 continue;
768
769 debuglog("matching_unlock: Duplicate lock id. Granting\n");
770 return (ifl);
771 }
772
773 debuglog("Exiting lock_matching_unlock\n");
774
775 return (NULL);
776}
777
778/*
779 * test_nfslock: check for NFS lock in lock list
780 *
781 * This routine makes the following assumptions:
782 * 1) Nothing will adjust the lock list during a lookup
783 *
784 * This routine has an intersting quirk which bit me hard.
785 * The conflicting_fl is the pointer to the conflicting lock.
786 * However, to modify the "*pointer* to the conflicting lock" rather
787 * that the "conflicting lock itself" one must pass in a "pointer to
788 * the pointer of the conflicting lock". Gross.
789 */
790
791enum nfslock_status
792test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
793{
794 struct file_lock *ifl; /* Iterator */
795 enum nfslock_status retval;
796
797 debuglog("Entering test_nfslock\n");
798
799 retval = NFS_GRANTED;
800 (*conflicting_fl) = NULL;
801
802 debuglog("Entering lock search loop\n");
803
804 debuglog("***********************************\n");
805 debuglog("Dumping match filelock\n");
806 debuglog("***********************************\n");
807 dump_filelock(fl);
808 debuglog("***********************************\n");
809
810 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
811 if (retval == NFS_DENIED)
812 break;
813
814 debuglog("Top of lock loop\n");
815 debuglog("Pointer to file lock: %p\n",ifl);
816
817 debuglog("***********************************\n");
818 debuglog("Dumping test filelock\n");
819 debuglog("***********************************\n");
820 dump_filelock(ifl);
821 debuglog("***********************************\n");
822
823 /*
824 * XXX: It is conceivable that someone could use the NLM RPC
825 * system to directly access filehandles. This may be a
826 * security hazard as the filehandle code may bypass normal
827 * file access controls
828 */
829 if (fl->filehandle.n_len != ifl->filehandle.n_len)
830 continue;
831 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
832 fl->filehandle.n_len))
833 continue;
834
835 debuglog("test_nfslock: filehandle match found\n");
836
837 /* Filehandles match, check for region overlap */
838 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
839 ifl->client.l_offset, ifl->client.l_len))
840 continue;
841
842 debuglog("test_nfslock: Region overlap found"
843 " %llu : %llu -- %llu : %llu\n",
844 fl->client.l_offset,fl->client.l_len,
845 ifl->client.l_offset,ifl->client.l_len);
846
847 /* Regions overlap, check the exclusivity */
848 if (!(fl->client.exclusive || ifl->client.exclusive))
849 continue;
850
851 debuglog("test_nfslock: Exclusivity failure: %d %d\n",
852 fl->client.exclusive,
853 ifl->client.exclusive);
854
855 if (same_filelock_identity(fl,ifl)) {
856 debuglog("test_nfslock: Duplicate id. Granting\n");
857 (*conflicting_fl) = ifl;
858 retval = NFS_GRANTED_DUPLICATE;
859 } else {
860 /* locking attempt fails */
861 debuglog("test_nfslock: Lock attempt failed\n");
862 debuglog("Desired lock\n");
863 dump_filelock(fl);
864 debuglog("Conflicting lock\n");
865 dump_filelock(ifl);
866 (*conflicting_fl) = ifl;
867 retval = NFS_DENIED;
868 }
869 }
870
871 debuglog("Dumping file locks\n");
872 debuglog("Exiting test_nfslock\n");
873
874 return (retval);
875}
876
877/*
878 * lock_nfslock: attempt to create a lock in the NFS lock list
879 *
880 * This routine tests whether the lock will be granted and then adds
881 * the entry to the lock list if so.
882 *
883 * Argument fl gets modified as its list housekeeping entries get modified
884 * upon insertion into the NFS lock list
885 *
886 * This routine makes several assumptions:
887 * 1) It is perfectly happy to grant a duplicate lock from the same pid.
888 * While this seems to be intuitively wrong, it is required for proper
889 * Posix semantics during unlock. It is absolutely imperative to not
890 * unlock the main lock before the two child locks are established. Thus,
891 * one has be be able to create duplicate locks over an existing lock
892 * 2) It currently accepts duplicate locks from the same id,pid
893 */
894
895enum nfslock_status
896lock_nfslock(struct file_lock *fl)
897{
898 enum nfslock_status retval;
899 struct file_lock *dummy_fl;
900
901 dummy_fl = NULL;
902
903 debuglog("Entering lock_nfslock...\n");
904
905 retval = test_nfslock(fl,&dummy_fl);
906
907 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
908 debuglog("Inserting lock...\n");
909 dump_filelock(fl);
910 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
911 }
912
913 debuglog("Exiting lock_nfslock...\n");
914
915 return (retval);
916}
917
918/*
919 * delete_nfslock: delete an NFS lock list entry
920 *
921 * This routine is used to delete a lock out of the NFS lock list
922 * without regard to status, underlying locks, regions or anything else
923 *
924 * Note that this routine *does not deallocate memory* of the lock.
925 * It just disconnects it from the list. The lock can then be used
926 * by other routines without fear of trashing the list.
927 */
928
929enum nfslock_status
930delete_nfslock(struct file_lock *fl)
931{
932
933 LIST_REMOVE(fl, nfslocklist);
934
935 return (NFS_GRANTED);
936}
937
938enum split_status
939split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
940 const struct file_lock *exist_lock, *unlock_lock;
941 struct file_lock **left_lock, **right_lock;
942{
943 u_int64_t start1, len1, start2, len2;
944 enum split_status spstatus;
945
946 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
947 unlock_lock->client.l_offset, unlock_lock->client.l_len,
948 &start1, &len1, &start2, &len2);
949
950 if ((spstatus & SPL_LOCK1) != 0) {
7902cf7e 951 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->filehandle);
ac2f15b3
A
952 if (*left_lock == NULL) {
953 debuglog("Unable to allocate resource for split 1\n");
954 return SPL_RESERR;
955 }
956
957 fill_file_lock(*left_lock,
958 exist_lock->addr,
959 exist_lock->client.exclusive, exist_lock->client.svid,
960 start1, len1,
961 exist_lock->client_name, exist_lock->nsm_status,
962 exist_lock->status, exist_lock->flags, exist_lock->blocking);
963 }
964
965 if ((spstatus & SPL_LOCK2) != 0) {
7902cf7e 966 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->filehandle);
ac2f15b3
A
967 if (*right_lock == NULL) {
968 debuglog("Unable to allocate resource for split 1\n");
969 if (*left_lock != NULL) {
970 deallocate_file_lock(*left_lock);
971 }
972 return SPL_RESERR;
973 }
974
975 fill_file_lock(*right_lock,
976 exist_lock->addr,
977 exist_lock->client.exclusive, exist_lock->client.svid,
978 start2, len2,
979 exist_lock->client_name, exist_lock->nsm_status,
980 exist_lock->status, exist_lock->flags, exist_lock->blocking);
981 }
982
983 return spstatus;
984}
985
986enum nfslock_status
987unlock_nfslock(fl, released_lock, left_lock, right_lock)
988 const struct file_lock *fl;
989 struct file_lock **released_lock;
990 struct file_lock **left_lock;
991 struct file_lock **right_lock;
992{
993 struct file_lock *mfl; /* Matching file lock */
994 enum nfslock_status retval;
995 enum split_status spstatus;
996
997 debuglog("Entering unlock_nfslock\n");
998
999 *released_lock = NULL;
1000 *left_lock = NULL;
1001 *right_lock = NULL;
1002
1003 retval = NFS_DENIED_NOLOCK;
1004
1005 printf("Attempting to match lock...\n");
1006 mfl = get_lock_matching_unlock(fl);
1007
1008 if (mfl != NULL) {
1009 debuglog("Unlock matched. Querying for split\n");
1010
1011 spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
1012
1013 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
1014 debuglog("********Split dumps********");
1015 dump_filelock(mfl);
1016 dump_filelock(fl);
1017 dump_filelock(*left_lock);
1018 dump_filelock(*right_lock);
1019 debuglog("********End Split dumps********");
1020
1021 if (spstatus == SPL_RESERR) {
1022 if (*left_lock != NULL) {
1023 deallocate_file_lock(*left_lock);
1024 *left_lock = NULL;
1025 }
1026
1027 if (*right_lock != NULL) {
1028 deallocate_file_lock(*right_lock);
1029 *right_lock = NULL;
1030 }
1031
1032 return NFS_RESERR;
1033 }
1034
1035 /* Insert new locks from split if required */
1036 if (*left_lock != NULL) {
1037 debuglog("Split left activated\n");
1038 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1039 }
1040
1041 if (*right_lock != NULL) {
1042 debuglog("Split right activated\n");
1043 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1044 }
1045
1046 /* Unlock the lock since it matches identity */
1047 LIST_REMOVE(mfl, nfslocklist);
1048 *released_lock = mfl;
1049 retval = NFS_GRANTED;
1050 }
1051
1052 debuglog("Exiting unlock_nfslock\n");
1053
1054 return retval;
1055}
1056
1057/*
1058 * Below here are the routines for manipulating the file lock directly
1059 * on the disk hardware itself
1060 */
1061enum hwlock_status
1062lock_hwlock(struct file_lock *fl)
1063{
1064 struct monfile *imf,*nmf;
1065 int lflags, flerror;
1066
1067 /* Scan to see if filehandle already present */
1068 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1069 if ((fl->filehandle.n_len == imf->filehandle.n_len) &&
1070 (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes,
1071 fl->filehandle.n_len) == 0)) {
1072 /* imf is the correct filehandle */
1073 break;
1074 }
1075 }
1076
1077 /*
1078 * Filehandle already exists (we control the file)
1079 * *AND* NFS has already cleared the lock for availability
1080 * Grant it and bump the refcount.
1081 */
1082 if (imf != NULL) {
1083 ++(imf->refcount);
1084 return (HW_GRANTED);
1085 }
1086
1087 /* No filehandle found, create and go */
1088 nmf = malloc(sizeof(struct monfile));
1089 if (nmf == NULL) {
1090 debuglog("hwlock resource allocation failure\n");
1091 return (HW_RESERR);
1092 }
1093 nmf->filehandle.n_bytes = malloc(fl->filehandle.n_len);
1094 if (nmf == NULL) {
1095 debuglog("hwlock resource allocation failure\n");
1096 free(nmf);
1097 return (HW_RESERR);
1098 }
1099
1100 /* XXX: Is O_RDWR always the correct mode? */
1101 nmf->fd = fhopen((fhandle_t *)fl->filehandle.n_bytes, O_RDWR);
1102 if (nmf->fd < 0) {
1103 debuglog("fhopen failed (from %16s): %32s\n",
1104 fl->client_name, strerror(errno));
1105 free(nmf);
1106 switch (errno) {
1107 case ESTALE:
1108 return (HW_STALEFH);
1109 case EROFS:
1110 return (HW_READONLY);
1111 default:
1112 return (HW_RESERR);
1113 }
1114 }
1115
1116 /* File opened correctly, fill the monitor struct */
1117 nmf->filehandle.n_len = fl->filehandle.n_len;
1118 bcopy(fl->filehandle.n_bytes, nmf->filehandle.n_bytes, fl->filehandle.n_len);
1119 nmf->refcount = 1;
1120 nmf->exclusive = fl->client.exclusive;
1121
1122 lflags = (nmf->exclusive == 1) ?
1123 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1124
1125 flerror = flock(nmf->fd, lflags);
1126
1127 if (flerror != 0) {
1128 debuglog("flock failed (from %16s): %32s\n",
1129 fl->client_name, strerror(errno));
1130 close(nmf->fd);
1131 free(nmf);
1132 switch (errno) {
1133 case EAGAIN:
1134 return (HW_DENIED);
1135 case ESTALE:
1136 return (HW_STALEFH);
1137 case EROFS:
1138 return (HW_READONLY);
1139 default:
1140 return (HW_RESERR);
1141 break;
1142 }
1143 }
1144
1145 /* File opened and locked */
1146 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1147
1148 debuglog("flock succeeded (from %16s)\n", fl->client_name);
1149 return (HW_GRANTED);
1150}
1151
1152enum hwlock_status
1153unlock_hwlock(const struct file_lock *fl)
1154{
1155 struct monfile *imf;
1156
1157 debuglog("Entering unlock_hwlock\n");
1158 debuglog("Entering loop interation\n");
1159
1160 /* Scan to see if filehandle already present */
1161 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1162 if ((fl->filehandle.n_len == imf->filehandle.n_len) &&
1163 (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes,
1164 fl->filehandle.n_len) == 0)) {
1165 /* imf is the correct filehandle */
1166 break;
1167 }
1168 }
1169
1170 debuglog("Completed iteration. Proceeding\n");
1171
1172 if (imf == NULL) {
1173 /* No lock found */
1174 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1175 return (HW_DENIED_NOLOCK);
1176 }
1177
1178 /* Lock found */
1179 --imf->refcount;
1180
1181 if (imf->refcount < 0) {
1182 debuglog("Negative hardware reference count\n");
1183 }
1184
1185 if (imf->refcount <= 0) {
1186 close(imf->fd);
1187 LIST_REMOVE(imf, monfilelist);
1188 free(imf);
1189 }
1190 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1191 return (HW_GRANTED);
1192}
1193
1194enum hwlock_status
1195test_hwlock(fl, conflicting_fl)
1196 const struct file_lock *fl __unused;
1197 struct file_lock **conflicting_fl __unused;
1198{
1199
1200 /*
1201 * XXX: lock tests on hardware are not required until
1202 * true partial file testing is done on the underlying file
1203 */
1204 return (HW_RESERR);
1205}
1206
1207
1208
1209/*
1210 * Below here are routines for manipulating blocked lock requests
1211 * They should only be called from the XXX_partialfilelock routines
1212 * if at all possible
1213 */
1214
1215void
1216add_blockingfilelock(struct file_lock *fl)
1217{
7902cf7e 1218 struct file_lock *ifl, *nfl;
ac2f15b3
A
1219
1220 debuglog("Entering add_blockingfilelock\n");
1221
7902cf7e
A
1222 /*
1223 * Check for a duplicate lock request.
1224 * If found, deallocate the older request.
1225 */
1226 ifl = LIST_FIRST(&blockedlocklist_head);
1227 for (; ifl != NULL; ifl = nfl) {
1228 debuglog("Pointer to file lock: %p\n",ifl);
1229 debuglog("****Dump of ifl****\n");
1230 dump_filelock(ifl);
1231 debuglog("*******************\n");
1232
1233 nfl = LIST_NEXT(ifl, nfslocklist);
1234
1235 if (fl->filehandle.n_len != ifl->filehandle.n_len)
1236 continue;
1237 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
1238 fl->filehandle.n_len))
1239 continue;
1240
1241 /* Filehandles match, check region */
1242 if ((fl->client.l_offset != ifl->client.l_offset) ||
1243 (fl->client.l_len != ifl->client.l_len))
1244 continue;
1245
1246 /* Regions match, check the identity */
1247 if (!same_filelock_identity(fl,ifl))
1248 continue;
1249
1250 debuglog("add_blockingfilelock: removing duplicate lock request.\n");
1251 remove_blockingfilelock(ifl);
1252 deallocate_file_lock(ifl);
1253 break;
1254 }
1255
ac2f15b3
A
1256 /*
1257 * Clear the blocking flag so that it can be reused without
1258 * adding it to the blocking queue a second time
1259 */
1260
1261 fl->blocking = 0;
1262 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1263
1264 debuglog("Exiting add_blockingfilelock\n");
1265}
1266
1267void
1268remove_blockingfilelock(struct file_lock *fl)
1269{
1270
1271 debuglog("Entering remove_blockingfilelock\n");
1272
1273 LIST_REMOVE(fl, nfslocklist);
1274
1275 debuglog("Exiting remove_blockingfilelock\n");
1276}
1277
1278void
1279clear_blockingfilelock(const char *hostname)
1280{
1281 struct file_lock *ifl,*nfl;
1282
1283 /*
1284 * Normally, LIST_FOREACH is called for, but since
1285 * the current element *is* the iterator, deleting it
1286 * would mess up the iteration. Thus, a next element
1287 * must be used explicitly
1288 */
1289
1290 ifl = LIST_FIRST(&blockedlocklist_head);
1291
1292 while (ifl != NULL) {
1293 nfl = LIST_NEXT(ifl, nfslocklist);
1294
1295 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1296 remove_blockingfilelock(ifl);
1297 deallocate_file_lock(ifl);
1298 }
1299
1300 ifl = nfl;
1301 }
1302}
1303
7902cf7e
A
1304int need_retry_blocked_locks = 0; /* need to call retry_blockingfilelocklist() */
1305
ac2f15b3 1306void
7902cf7e 1307retry_blockingfilelocklist(netobj *fh)
ac2f15b3 1308{
7902cf7e
A
1309 /*
1310 * If fh is given, then retry just the locks with the
1311 * same filehandle in the blocked list.
1312 * Otherwise, simply retry all locks in the blocked list.
1313 */
ac2f15b3
A
1314 struct file_lock *ifl, *nfl, *pfl; /* Iterator */
1315 enum partialfilelock_status pflstatus;
7902cf7e 1316 int rv;
ac2f15b3
A
1317
1318 debuglog("Entering retry_blockingfilelocklist\n");
1319
7902cf7e
A
1320 need_retry_blocked_locks = 0;
1321
ac2f15b3
A
1322 pfl = NULL;
1323 ifl = LIST_FIRST(&blockedlocklist_head);
1324 debuglog("Iterator choice %p\n",ifl);
1325
1326 while (ifl != NULL) {
1327 /*
1328 * SUBTLE BUG: The next element must be worked out before the
1329 * current element has been moved
1330 */
1331 nfl = LIST_NEXT(ifl, nfslocklist);
1332 debuglog("Iterator choice %p\n",ifl);
1333 debuglog("Prev iterator choice %p\n",pfl);
1334 debuglog("Next iterator choice %p\n",nfl);
1335
7902cf7e
A
1336 /* if given a filehandle, only retry locks for the same filehandle */
1337 if (fh && !same_netobj(fh, &ifl->filehandle)) {
1338 ifl = nfl;
1339 continue;
1340 }
1341
ac2f15b3
A
1342 /*
1343 * SUBTLE BUG: The file_lock must be removed from the
1344 * old list so that it's list pointers get disconnected
1345 * before being allowed to participate in the new list
1346 * which will automatically add it in if necessary.
1347 */
1348
1349 LIST_REMOVE(ifl, nfslocklist);
1350 pflstatus = lock_partialfilelock(ifl);
1351
1352 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1353 debuglog("Granted blocked lock\n");
1354 /* lock granted and is now being used */
7902cf7e
A
1355 rv = send_granted(ifl, 0);
1356 if (rv) {
1357 /*
1358 * Uh oh... the NLM_GRANTED message failed.
1359 * About the only thing we can do is drop the lock.
1360 * Note: this could be bad if the error was only
1361 * transient. Hopefully, if the client is still
1362 * waiting for the lock, they will resend the request.
1363 */
1364 do_unlock(ifl);
1365 /* ifl is NO LONGER VALID AT THIS POINT */
1366 }
ac2f15b3
A
1367 } else {
1368 /* Reinsert lock back into same place in blocked list */
1369 debuglog("Replacing blocked lock\n");
1370 if (pfl != NULL)
1371 LIST_INSERT_AFTER(pfl, ifl, nfslocklist);
1372 else
1373 /* ifl is the only elem. in the list */
1374 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1375 }
1376
7902cf7e
A
1377 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1378 /* If ifl was permanently removed from the list, (e.g the */
1379 /* lock was granted), pfl should remain where it's at. */
1380 } else {
1381 /* If ifl was left in the list, (e.g it was reinserted back */
1382 /* in place), pfl should simply be moved forward to be ifl */
1383 pfl = ifl;
1384 }
ac2f15b3
A
1385 /* Valid increment behavior regardless of state of ifl */
1386 ifl = nfl;
ac2f15b3
A
1387 }
1388
1389 debuglog("Exiting retry_blockingfilelocklist\n");
1390}
1391
1392/*
1393 * Below here are routines associated with manipulating all
1394 * aspects of the partial file locking system (list, hardware, etc.)
1395 */
1396
1397/*
1398 * Please note that lock monitoring must be done at this level which
1399 * keeps track of *individual* lock requests on lock and unlock
1400 *
1401 * XXX: Split unlocking is going to make the unlock code miserable
1402 */
1403
1404/*
1405 * lock_partialfilelock:
1406 *
1407 * Argument fl gets modified as its list housekeeping entries get modified
1408 * upon insertion into the NFS lock list
1409 *
1410 * This routine makes several assumptions:
1411 * 1) It (will) pass locks through to flock to lock the entire underlying file
1412 * and then parcel out NFS locks if it gets control of the file.
1413 * This matches the old rpc.lockd file semantics (except where it
1414 * is now more correct). It is the safe solution, but will cause
1415 * overly restrictive blocking if someone is trying to use the
1416 * underlying files without using NFS. This appears to be an
1417 * acceptable tradeoff since most people use standalone NFS servers.
1418 * XXX: The right solution is probably kevent combined with fcntl
1419 *
1420 * 2) Nothing modifies the lock lists between testing and granting
1421 * I have no idea whether this is a useful assumption or not
1422 */
1423
1424enum partialfilelock_status
1425lock_partialfilelock(struct file_lock *fl)
1426{
1427 enum partialfilelock_status retval;
1428 enum nfslock_status lnlstatus;
1429 enum hwlock_status hwstatus;
1430
1431 debuglog("Entering lock_partialfilelock\n");
1432
1433 retval = PFL_DENIED;
1434
1435 /*
1436 * Execute the NFS lock first, if possible, as it is significantly
1437 * easier and less expensive to undo than the filesystem lock
1438 */
1439
1440 lnlstatus = lock_nfslock(fl);
1441
1442 switch (lnlstatus) {
1443 case NFS_GRANTED:
1444 case NFS_GRANTED_DUPLICATE:
1445 /*
1446 * At this point, the NFS lock is allocated and active.
1447 * Remember to clean it up if the hardware lock fails
1448 */
1449 hwstatus = lock_hwlock(fl);
1450
1451 switch (hwstatus) {
1452 case HW_GRANTED:
1453 case HW_GRANTED_DUPLICATE:
1454 debuglog("HW GRANTED\n");
1455 /*
1456 * XXX: Fixme: Check hwstatus for duplicate when
1457 * true partial file locking and accounting is
1458 * done on the hardware
1459 */
1460 if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1461 retval = PFL_GRANTED_DUPLICATE;
1462 } else {
1463 retval = PFL_GRANTED;
1464 }
1465 if (fl->flags & LOCK_MON)
1466 monitor_lock_host(fl->client_name);
1467 break;
1468 case HW_RESERR:
1469 debuglog("HW RESERR\n");
1470 retval = PFL_HWRESERR;
1471 break;
1472 case HW_DENIED:
1473 debuglog("HW DENIED\n");
1474 retval = PFL_HWDENIED;
1475 break;
1476 default:
1477 debuglog("Unmatched hwstatus %d\n",hwstatus);
1478 break;
1479 }
1480
1481 if (retval != PFL_GRANTED &&
1482 retval != PFL_GRANTED_DUPLICATE) {
1483 /* Clean up the NFS lock */
1484 debuglog("Deleting trial NFS lock\n");
1485 delete_nfslock(fl);
1486 }
1487 break;
1488 case NFS_DENIED:
1489 retval = PFL_NFSDENIED;
1490 break;
1491 case NFS_RESERR:
1492 retval = PFL_NFSRESERR;
1493 default:
1494 debuglog("Unmatched lnlstatus %d\n");
1495 retval = PFL_NFSDENIED_NOLOCK;
1496 break;
1497 }
1498
1499 /*
1500 * By the time fl reaches here, it is completely free again on
1501 * failure. The NFS lock done before attempting the
1502 * hardware lock has been backed out
1503 */
1504
1505 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1506 /* Once last chance to check the lock */
1507 if (fl->blocking == 1) {
1508 /* Queue the lock */
1509 debuglog("BLOCKING LOCK RECEIVED\n");
1510 retval = (retval == PFL_NFSDENIED ?
1511 PFL_NFSBLOCKED : PFL_HWBLOCKED);
1512 add_blockingfilelock(fl);
1513 dump_filelock(fl);
1514 } else {
1515 /* Leave retval alone, it's already correct */
1516 debuglog("Lock denied. Non-blocking failure\n");
1517 dump_filelock(fl);
1518 }
1519 }
1520
1521 debuglog("Exiting lock_partialfilelock\n");
1522
1523 return retval;
1524}
1525
1526/*
1527 * unlock_partialfilelock:
1528 *
1529 * Given a file_lock, unlock all locks which match.
1530 *
1531 * Note that a given lock might have to unlock ITSELF! See
1532 * clear_partialfilelock for example.
1533 */
1534
1535enum partialfilelock_status
1536unlock_partialfilelock(const struct file_lock *fl)
1537{
1538 struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1539 enum partialfilelock_status retval;
1540 enum nfslock_status unlstatus;
1541 enum hwlock_status unlhwstatus, lhwstatus;
1542
1543 debuglog("Entering unlock_partialfilelock\n");
1544
1545 selffl = NULL;
1546 lfl = NULL;
1547 rfl = NULL;
1548 releasedfl = NULL;
1549 retval = PFL_DENIED;
1550
1551 /*
1552 * There are significant overlap and atomicity issues
1553 * with partially releasing a lock. For example, releasing
1554 * part of an NFS shared lock does *not* always release the
1555 * corresponding part of the file since there is only one
1556 * rpc.lockd UID but multiple users could be requesting it
1557 * from NFS. Also, an unlock request should never allow
1558 * another process to gain a lock on the remaining parts.
1559 * ie. Always apply the new locks before releasing the
1560 * old one
1561 */
1562
1563 /*
1564 * Loop is required since multiple little locks
1565 * can be allocated and then deallocated with one
1566 * big unlock.
1567 *
1568 * The loop is required to be here so that the nfs &
1569 * hw subsystems do not need to communicate with one
1570 * one another
1571 */
1572
1573 do {
1574 debuglog("Value of releasedfl: %p\n",releasedfl);
1575 /* lfl&rfl are created *AND* placed into the NFS lock list if required */
1576 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1577 debuglog("Value of releasedfl: %p\n",releasedfl);
1578
1579
1580 /* XXX: This is grungy. It should be refactored to be cleaner */
1581 if (lfl != NULL) {
1582 lhwstatus = lock_hwlock(lfl);
1583 if (lhwstatus != HW_GRANTED &&
1584 lhwstatus != HW_GRANTED_DUPLICATE) {
1585 debuglog("HW duplicate lock failure for left split\n");
1586 }
1587 if (lfl->flags & LOCK_MON)
1588 monitor_lock_host(lfl->client_name);
1589 }
1590
1591 if (rfl != NULL) {
1592 lhwstatus = lock_hwlock(rfl);
1593 if (lhwstatus != HW_GRANTED &&
1594 lhwstatus != HW_GRANTED_DUPLICATE) {
1595 debuglog("HW duplicate lock failure for right split\n");
1596 }
1597 if (rfl->flags & LOCK_MON)
1598 monitor_lock_host(rfl->client_name);
1599 }
1600
1601 switch (unlstatus) {
1602 case NFS_GRANTED:
1603 /* Attempt to unlock on the hardware */
1604 debuglog("NFS unlock granted. Attempting hardware unlock\n");
1605
1606 /* This call *MUST NOT* unlock the two newly allocated locks */
1607 unlhwstatus = unlock_hwlock(fl);
1608 debuglog("HW unlock returned with code %d\n",unlhwstatus);
1609
1610 switch (unlhwstatus) {
1611 case HW_GRANTED:
1612 debuglog("HW unlock granted\n");
1613 if (releasedfl->flags & LOCK_MON)
1614 unmonitor_lock_host(releasedfl->client_name);
1615 retval = PFL_GRANTED;
1616 break;
1617 case HW_DENIED_NOLOCK:
1618 /* Huh?!?! This shouldn't happen */
1619 debuglog("HW unlock denied no lock\n");
1620 retval = PFL_HWRESERR;
1621 /* Break out of do-while */
1622 unlstatus = NFS_RESERR;
1623 break;
1624 default:
1625 debuglog("HW unlock failed\n");
1626 retval = PFL_HWRESERR;
1627 /* Break out of do-while */
1628 unlstatus = NFS_RESERR;
1629 break;
1630 }
1631
1632 debuglog("Exiting with status retval: %d\n",retval);
1633
1634 // XXX sending granted messages before unlock response
1635 // XXX causes unlock response to be corrupted?
1636 // XXX Workaround is to move this to nlm_prot_svc.c
1637 // XXX after the unlock response is sent.
1638 // retry_blockingfilelocklist();
7902cf7e 1639 need_retry_blocked_locks = 1;
ac2f15b3
A
1640 break;
1641 case NFS_DENIED_NOLOCK:
1642 retval = PFL_GRANTED;
1643 debuglog("All locks cleaned out\n");
1644 break;
1645 default:
1646 retval = PFL_NFSRESERR;
1647 debuglog("NFS unlock failure\n");
1648 dump_filelock(fl);
1649 break;
1650 }
1651
1652 if (releasedfl != NULL) {
1653 if (fl == releasedfl) {
1654 /*
1655 * XXX: YECHHH!!! Attempt to unlock self succeeded
1656 * but we can't deallocate the space yet. This is what
1657 * happens when you don't write malloc and free together
1658 */
1659 debuglog("Attempt to unlock self\n");
1660 selffl = releasedfl;
1661 } else {
1662 /*
1663 * XXX: this deallocation *still* needs to migrate closer
1664 * to the allocation code way up in get_lock or the allocation
1665 * code needs to migrate down (violation of "When you write
1666 * malloc you must write free")
1667 */
1668
1669 deallocate_file_lock(releasedfl);
1670 }
1671 }
1672
1673 } while (unlstatus == NFS_GRANTED);
1674
1675 if (selffl != NULL) {
1676 /*
1677 * This statement wipes out the incoming file lock (fl)
1678 * in spite of the fact that it is declared const
1679 */
1680 debuglog("WARNING! Destroying incoming lock pointer\n");
1681 deallocate_file_lock(selffl);
1682 }
1683
1684 debuglog("Exiting unlock_partialfilelock\n");
1685
1686 return retval;
1687}
1688
1689/*
1690 * clear_partialfilelock
1691 *
1692 * Normally called in response to statd state number change.
1693 * Wipe out all locks held by a host. As a bonus, the act of
1694 * doing so should automatically clear their statd entries and
1695 * unmonitor the host.
1696 */
1697
1698void
1699clear_partialfilelock(const char *hostname)
1700{
1701 struct file_lock *ifl, *nfl;
1702
1703 /* Clear blocking file lock list */
1704 clear_blockingfilelock(hostname);
1705
1706 /* do all required unlocks */
1707 /* Note that unlock can smash the current pointer to a lock */
1708
1709 /*
1710 * Normally, LIST_FOREACH is called for, but since
1711 * the current element *is* the iterator, deleting it
1712 * would mess up the iteration. Thus, a next element
1713 * must be used explicitly
1714 */
1715
1716 ifl = LIST_FIRST(&nfslocklist_head);
1717
1718 while (ifl != NULL) {
1719 nfl = LIST_NEXT(ifl, nfslocklist);
1720
1721 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1722 /* Unlock destroys ifl out from underneath */
1723 unlock_partialfilelock(ifl);
1724 /* ifl is NO LONGER VALID AT THIS POINT */
1725 }
1726 ifl = nfl;
1727 }
1728}
1729
1730/*
1731 * test_partialfilelock:
1732 */
1733enum partialfilelock_status
1734test_partialfilelock(const struct file_lock *fl,
1735 struct file_lock **conflicting_fl)
1736{
1737 enum partialfilelock_status retval;
1738 enum nfslock_status teststatus;
1739
1740 debuglog("Entering testpartialfilelock...\n");
1741
1742 retval = PFL_DENIED;
1743
1744 teststatus = test_nfslock(fl, conflicting_fl);
1745 debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1746
1747 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1748 /* XXX: Add the underlying filesystem locking code */
1749 retval = (teststatus == NFS_GRANTED) ?
1750 PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1751 debuglog("Dumping locks...\n");
1752 dump_filelock(fl);
1753 dump_filelock(*conflicting_fl);
1754 debuglog("Done dumping locks...\n");
1755 } else {
1756 retval = PFL_NFSDENIED;
1757 debuglog("NFS test denied.\n");
1758 dump_filelock(fl);
1759 debuglog("Conflicting.\n");
1760 dump_filelock(*conflicting_fl);
1761 }
1762
1763 debuglog("Exiting testpartialfilelock...\n");
1764
1765 return retval;
1766}
1767
1768/*
1769 * Below here are routines associated with translating the partial file locking
1770 * codes into useful codes to send back to the NFS RPC messaging system
1771 */
1772
1773/*
1774 * These routines translate the (relatively) useful return codes back onto
1775 * the few return codes which the nlm subsystems wishes to trasmit
1776 */
1777
1778enum nlm_stats
1779do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1780{
1781 enum partialfilelock_status pfsret;
1782 enum nlm_stats retval;
1783
1784 debuglog("Entering do_test...\n");
1785
1786 pfsret = test_partialfilelock(fl,conflicting_fl);
1787
1788 switch (pfsret) {
1789 case PFL_GRANTED:
1790 debuglog("PFL test lock granted\n");
1791 dump_filelock(fl);
1792 dump_filelock(*conflicting_fl);
1793 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1794 break;
1795 case PFL_GRANTED_DUPLICATE:
1796 debuglog("PFL test lock granted--duplicate id detected\n");
1797 dump_filelock(fl);
1798 dump_filelock(*conflicting_fl);
1799 debuglog("Clearing conflicting_fl for call semantics\n");
1800 *conflicting_fl = NULL;
1801 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1802 break;
1803 case PFL_NFSDENIED:
1804 case PFL_HWDENIED:
1805 debuglog("PFL test lock denied\n");
1806 dump_filelock(fl);
1807 dump_filelock(*conflicting_fl);
1808 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1809 break;
1810 case PFL_NFSRESERR:
1811 case PFL_HWRESERR:
1812 debuglog("PFL test lock resource fail\n");
1813 dump_filelock(fl);
1814 dump_filelock(*conflicting_fl);
1815 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1816 break;
1817 default:
1818 debuglog("PFL test lock *FAILED*\n");
1819 dump_filelock(fl);
1820 dump_filelock(*conflicting_fl);
1821 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1822 break;
1823 }
1824
1825 debuglog("Exiting do_test...\n");
1826
1827 return retval;
1828}
1829
1830/*
1831 * do_lock: Try to acquire a lock
1832 *
1833 * This routine makes a distinction between NLM versions. I am pretty
1834 * convinced that this should be abstracted out and bounced up a level
1835 */
1836
1837enum nlm_stats
1838do_lock(struct file_lock *fl)
1839{
1840 enum partialfilelock_status pfsret;
1841 enum nlm_stats retval;
1842
1843 debuglog("Entering do_lock...\n");
1844
1845 pfsret = lock_partialfilelock(fl);
1846
1847 switch (pfsret) {
1848 case PFL_GRANTED:
1849 debuglog("PFL lock granted");
1850 dump_filelock(fl);
1851 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1852 break;
1853 case PFL_GRANTED_DUPLICATE:
1854 debuglog("PFL lock granted--duplicate id detected");
1855 dump_filelock(fl);
1856 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1857 break;
1858 case PFL_NFSDENIED:
1859 case PFL_HWDENIED:
1860 debuglog("PFL_NFS lock denied");
1861 dump_filelock(fl);
1862 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1863 break;
1864 case PFL_NFSBLOCKED:
1865 case PFL_HWBLOCKED:
1866 debuglog("PFL_NFS blocking lock denied. Queued.\n");
1867 dump_filelock(fl);
1868 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1869 break;
1870 case PFL_NFSRESERR:
1871 case PFL_HWRESERR:
1872 debuglog("PFL lock resource alocation fail\n");
1873 dump_filelock(fl);
1874 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1875 break;
1876 default:
1877 debuglog("PFL lock *FAILED*");
1878 dump_filelock(fl);
1879 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1880 break;
1881 }
1882
1883 debuglog("Exiting do_lock...\n");
1884
1885 return retval;
1886}
1887
1888enum nlm_stats
1889do_unlock(struct file_lock *fl)
1890{
1891 enum partialfilelock_status pfsret;
1892 enum nlm_stats retval;
1893
1894 debuglog("Entering do_unlock...\n");
1895 pfsret = unlock_partialfilelock(fl);
1896
1897 switch (pfsret) {
1898 case PFL_GRANTED:
1899 debuglog("PFL unlock granted");
1900 dump_filelock(fl);
1901 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1902 break;
1903 case PFL_NFSDENIED:
1904 case PFL_HWDENIED:
1905 debuglog("PFL_NFS unlock denied");
1906 dump_filelock(fl);
1907 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1908 break;
1909 case PFL_NFSDENIED_NOLOCK:
1910 case PFL_HWDENIED_NOLOCK:
1911 debuglog("PFL_NFS no lock found\n");
1912 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1913 break;
1914 case PFL_NFSRESERR:
1915 case PFL_HWRESERR:
1916 debuglog("PFL unlock resource failure");
1917 dump_filelock(fl);
1918 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1919 break;
1920 default:
1921 debuglog("PFL unlock *FAILED*");
1922 dump_filelock(fl);
1923 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1924 break;
1925 }
1926
1927 debuglog("Exiting do_unlock...\n");
1928
1929 return retval;
1930}
1931
1932/*
1933 * do_clear
1934 *
1935 * This routine is non-existent because it doesn't have a return code.
1936 * It is here for completeness in case someone *does* need to do return
1937 * codes later. A decent compiler should optimize this away.
1938 */
1939
1940void
1941do_clear(const char *hostname)
1942{
1943
1944 clear_partialfilelock(hostname);
1945}
1946
1947/*
1948 * The following routines are all called from the code which the
1949 * RPC layer invokes
1950 */
1951
1952/*
1953 * testlock(): inform the caller if the requested lock would be granted
1954 *
1955 * returns NULL if lock would granted
1956 * returns pointer to a conflicting nlm4_holder if not
1957 */
1958
1959struct nlm4_holder *
1960testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1961{
1962 struct file_lock test_fl, *conflicting_fl;
1963
1964 bzero(&test_fl, sizeof(test_fl));
1965
1966 test_fl.filehandle.n_len = lock->fh.n_len;
1967 test_fl.filehandle.n_bytes = lock->fh.n_bytes;
1968 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1969
1970 siglock();
1971 do_test(&test_fl, &conflicting_fl);
1972
1973 if (conflicting_fl == NULL) {
1974 debuglog("No conflicting lock found\n");
1975 sigunlock();
1976 return NULL;
1977 } else {
1978 debuglog("Found conflicting lock\n");
1979 dump_filelock(conflicting_fl);
1980 sigunlock();
1981 return (&conflicting_fl->client);
1982 }
1983}
1984
1985/*
1986 * getlock: try to aquire the lock.
1987 * If file is already locked and we can sleep, put the lock in the list with
1988 * status LKST_WAITING; it'll be processed later.
1989 * Otherwise try to lock. If we're allowed to block, fork a child which
1990 * will do the blocking lock.
1991 */
1992
1993enum nlm_stats
1994getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1995{
1996 struct file_lock *newfl;
1997 enum nlm_stats retval;
1998
1999 debuglog("Entering getlock...\n");
2000
2001 if (grace_expired == 0 && lckarg->reclaim == 0)
2002 return (flags & LOCK_V4) ?
2003 nlm4_denied_grace_period : nlm_denied_grace_period;
2004
2005 /* allocate new file_lock for this request */
7902cf7e 2006 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->alock.fh);
ac2f15b3
A
2007 if (newfl == NULL) {
2008 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
2009 /* failed */
2010 return (flags & LOCK_V4) ?
2011 nlm4_denied_nolocks : nlm_denied_nolocks;
2012 }
2013
2014 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
2015 debuglog("recieved fhandle size %d, local size %d",
2016 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
2017 }
2018
2019 fill_file_lock(newfl,
2020 (struct sockaddr *)svc_getcaller(rqstp->rq_xprt),
2021 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
2022 lckarg->alock.l_len,
2023 lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block);
2024
2025 /*
2026 * newfl is now fully constructed and deallocate_file_lock
2027 * can now be used to delete it
2028 */
2029
2030 siglock();
2031 debuglog("Pointer to new lock is %p\n",newfl);
2032
2033 retval = do_lock(newfl);
2034
2035 debuglog("Pointer to new lock is %p\n",newfl);
2036 sigunlock();
2037
2038 switch (retval)
2039 {
2040 case nlm4_granted:
2041 /* case nlm_granted: is the same as nlm4_granted */
2042 /* do_mon(lckarg->alock.caller_name); */
2043 break;
2044 case nlm4_blocked:
2045 /* case nlm_blocked: is the same as nlm4_blocked */
2046 /* do_mon(lckarg->alock.caller_name); */
2047 break;
2048 default:
2049 deallocate_file_lock(newfl);
2050 break;
2051 }
2052
2053 debuglog("Exiting getlock...\n");
2054
2055 return retval;
2056}
2057
2058
2059/* unlock a filehandle */
2060enum nlm_stats
2061unlock(nlm4_lock *lock, const int flags __unused)
2062{
2063 struct file_lock fl;
2064 enum nlm_stats err;
2065
2066 siglock();
2067
2068 debuglog("Entering unlock...\n");
2069
2070 bzero(&fl,sizeof(struct file_lock));
2071 fl.filehandle.n_len = lock->fh.n_len;
2072 fl.filehandle.n_bytes = lock->fh.n_bytes;
2073
2074 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
2075
2076 err = do_unlock(&fl);
2077
2078 sigunlock();
2079
2080 debuglog("Exiting unlock...\n");
2081
2082 return err;
2083}
2084
7902cf7e
A
2085/* cancel a blocked lock request */
2086enum nlm_stats
2087cancellock(nlm4_cancargs *args, const int flags __unused)
2088{
2089 struct file_lock *ifl, *nfl;
2090 enum nlm_stats err;
2091
2092 siglock();
2093
2094 debuglog("Entering cancellock...\n");
2095
2096 err = nlm_denied;
2097
2098 /*
2099 * scan blocked lock list for matching request and remove/destroy
2100 */
2101 ifl = LIST_FIRST(&blockedlocklist_head);
2102 for ( ; ifl != NULL; ifl = nfl) {
2103 nfl = LIST_NEXT(ifl, nfslocklist);
2104
2105 /* compare lock fh - filehandle */
2106 if (!same_netobj(&args->alock.fh, &ifl->filehandle))
2107 continue;
2108
2109 /* compare lock caller_name - client_name */
2110 if (strncmp(args->alock.caller_name, ifl->client_name, SM_MAXSTRLEN))
2111 continue;
2112
2113 /* Note: done't compare cookie - client_cookie */
2114 /* The cookie may be specific to the cancel request */
2115 /* and not be the same as the one in the original lock request. */
2116
2117 /* compare lock oh - client.oh */
2118 if (!same_netobj(&args->alock.oh, &ifl->client.oh))
2119 continue;
2120
2121 /* compare lock svid - client.svid */
2122 if (args->alock.svid != ifl->client.svid)
2123 continue;
2124
2125 /* compare lock l_offset - client.l_offset */
2126 if (args->alock.l_offset != ifl->client.l_offset)
2127 continue;
2128
2129 /* compare lock l_len - client.l_len */
2130 if (args->alock.l_len != ifl->client.l_len)
2131 continue;
2132
2133 /* compare exclusive - client.exclusive */
2134 if (args->exclusive != ifl->client.exclusive)
2135 continue;
2136
2137 /* got it */
2138 remove_blockingfilelock(ifl);
2139 deallocate_file_lock(ifl);
2140 err = nlm_granted;
2141 break;
2142 }
2143
2144 sigunlock();
2145
2146 debuglog("Exiting cancellock...\n");
2147
2148 return err;
2149}
2150
2151
ac2f15b3
A
2152/*
2153 * XXX: The following monitor/unmonitor routines
2154 * have not been extensively tested (ie. no regression
2155 * script exists like for the locking sections
2156 */
2157
7902cf7e
A
2158/*
2159 * Find a lock host on a queue. If found:
2160 * bump the ref,
2161 * bump the access time,
2162 * dequeue it from the queue it was found on,
2163 * enqueue it at the front of the "in use" queue.
2164 */
2165struct host *
2166get_lock_host(struct hostlst_head *hd, const char *hostname)
2167{
2168 struct host *ihp;
2169
2170 debuglog("get_lock_host %s\n", hostname);
2171 TAILQ_FOREACH(ihp, hd, hostlst) {
2172 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2173 TAILQ_REMOVE(hd, ihp, hostlst);
2174 /* Host is already monitored, bump refcount */
2175 ++ihp->refcnt;
2176 ihp->lastuse = currsec;
2177 /* Host should only be in the monitor list once */
2178 TAILQ_INSERT_HEAD(&hostlst_head, ihp, hostlst);
2179 break;
2180 }
2181 }
2182 debuglog("get_lock_host %s %s\n",
2183 ihp == NULL ? "did not find" : "found", hostname);
2184 return (ihp);
2185}
2186
ac2f15b3
A
2187/*
2188 * monitor_lock_host: monitor lock hosts locally with a ref count and
2189 * inform statd
2190 */
2191void
2192monitor_lock_host(const char *hostname)
2193{
2194 struct host *ihp, *nhp;
2195 struct mon smon;
2196 struct sm_stat_res sres;
2197 int rpcret, statflag;
7902cf7e 2198
ac2f15b3
A
2199 rpcret = 0;
2200 statflag = 0;
2201
7902cf7e
A
2202 debuglog("monitor_lock_host: %s\n", hostname);
2203 ihp = get_lock_host(&hostlst_head, hostname);
2204 if (ihp == NULL)
2205 ihp = get_lock_host(&hostlst_unref, hostname);
2206 if (ihp != NULL) {
2207 debuglog("Monitor_lock_host: %s (cached)\n", hostname);
2208 return;
ac2f15b3
A
2209 }
2210
7902cf7e 2211 debuglog("Monitor_lock_host: %s (not found, creating)\n", hostname);
ac2f15b3
A
2212 /* Host is not yet monitored, add it */
2213 nhp = malloc(sizeof(struct host));
7902cf7e 2214
ac2f15b3
A
2215 if (nhp == NULL) {
2216 debuglog("Unable to allocate entry for statd mon\n");
2217 return;
2218 }
2219
2220 /* Allocated new host entry, now fill the fields */
2221 strncpy(nhp->name, hostname, SM_MAXSTRLEN);
2222 nhp->refcnt = 1;
7902cf7e 2223 nhp->lastuse = currsec;
ac2f15b3 2224 debuglog("Locally Monitoring host %16s\n",hostname);
7902cf7e 2225
ac2f15b3 2226 debuglog("Attempting to tell statd\n");
7902cf7e 2227
ac2f15b3 2228 bzero(&smon,sizeof(smon));
7902cf7e 2229
ac2f15b3
A
2230 smon.mon_id.mon_name = nhp->name;
2231 smon.mon_id.my_id.my_name = "localhost\0";
2232
2233 smon.mon_id.my_id.my_prog = NLM_PROG;
2234 smon.mon_id.my_id.my_vers = NLM_SM;
2235 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
7902cf7e 2236
ac2f15b3
A
2237 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon,
2238 &smon, xdr_sm_stat_res, &sres);
7902cf7e 2239
ac2f15b3
A
2240 if (rpcret == 0) {
2241 if (sres.res_stat == stat_fail) {
2242 debuglog("Statd call failed\n");
2243 statflag = 0;
2244 } else {
2245 statflag = 1;
2246 }
2247 } else {
2248 debuglog("Rpc call to statd failed with return value: %d\n",
2249 rpcret);
2250 statflag = 0;
2251 }
7902cf7e 2252
ac2f15b3 2253 if (statflag == 1) {
7902cf7e 2254 TAILQ_INSERT_HEAD(&hostlst_head, nhp, hostlst);
ac2f15b3
A
2255 } else {
2256 free(nhp);
2257 }
ac2f15b3
A
2258}
2259
2260/*
2261 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2262 */
2263void
2264unmonitor_lock_host(const char *hostname)
2265{
2266 struct host *ihp;
ac2f15b3 2267
7902cf7e 2268 TAILQ_FOREACH(ihp, &hostlst_head, hostlst) {
ac2f15b3
A
2269 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2270 /* Host is monitored, bump refcount */
2271 --ihp->refcnt;
2272 /* Host should only be in the monitor list once */
2273 break;
2274 }
2275 }
2276
2277 if (ihp == NULL) {
2278 debuglog("Could not find host %16s in mon list\n", hostname);
2279 return;
2280 }
2281
2282 if (ihp->refcnt > 0)
2283 return;
2284
2285 if (ihp->refcnt < 0) {
7902cf7e 2286 debuglog("Negative refcount!: %d\n", ihp->refcnt);
ac2f15b3
A
2287 }
2288
7902cf7e
A
2289 TAILQ_REMOVE(&hostlst_head, ihp, hostlst);
2290 TAILQ_INSERT_HEAD(&hostlst_unref, ihp, hostlst);
2291 if (host_expire <= 0)
2292 destroy_lock_host(ihp);
2293}
2294
2295void
2296destroy_lock_host(struct host *ihp)
2297{
2298 struct mon_id smon_id;
2299 struct sm_stat smstat;
2300 int rpcret;
2301
2302 debuglog("Attempting to unmonitor host %16s\n", ihp->name);
ac2f15b3
A
2303
2304 bzero(&smon_id,sizeof(smon_id));
2305
7902cf7e 2306 smon_id.mon_name = (char *)ihp->name;
ac2f15b3
A
2307 smon_id.my_id.my_name = "localhost";
2308 smon_id.my_id.my_prog = NLM_PROG;
2309 smon_id.my_id.my_vers = NLM_SM;
2310 smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2311
7902cf7e
A
2312 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon_id,
2313 &smon_id, xdr_sm_stat, &smstat);
ac2f15b3
A
2314
2315 if (rpcret != 0) {
2316 debuglog("Rpc call to unmonitor statd failed with "
7902cf7e
A
2317 " return value: %d: %s", rpcret, clnt_sperrno(rpcret));
2318 } else {
2319 debuglog("Succeeded unmonitoring %16s\n", ihp->name);
ac2f15b3
A
2320 }
2321
7902cf7e 2322 TAILQ_REMOVE(&hostlst_unref, ihp, hostlst);
ac2f15b3
A
2323 free(ihp);
2324}
2325
7902cf7e
A
2326/*
2327 * returns 1 if there are hosts to expire or 0 if there are none.
2328 */
2329int
2330expire_lock_hosts(void)
2331{
2332 struct host *ihp;
2333
2334 debuglog("expire_lock_hosts: called\n");
2335 for ( ;; ) {
2336 ihp = TAILQ_LAST(&hostlst_unref, hostlst_head);
2337 if (ihp == NULL)
2338 break;
2339 if (host_expire > 0 && ihp->lastuse >= currsec - host_expire)
2340 break;
2341 debuglog("expire_lock_hosts: expiring %s %d %d %d\n",
2342 ihp->name, (int)ihp->lastuse,
2343 (int)currsec, (int)currsec - host_expire);
2344 destroy_lock_host(ihp);
2345 }
2346 return (TAILQ_LAST(&hostlst_unref, hostlst_head) != NULL);
2347}
2348
ac2f15b3
A
2349/*
2350 * notify: Clear all locks from a host if statd complains
2351 *
2352 * XXX: This routine has not been thoroughly tested. However, neither
2353 * had the old one been. It used to compare the statd crash state counter
2354 * to the current lock state. The upshot of this was that it basically
2355 * cleared all locks from the specified host 99% of the time (with the
2356 * other 1% being a bug). Consequently, the assumption is that clearing
2357 * all locks from a host when notified by statd is acceptable.
2358 *
2359 * Please note that this routine skips the usual level of redirection
2360 * through a do_* type routine. This introduces a possible level of
2361 * error and might better be written as do_notify and take this one out.
2362
2363 */
2364
2365void
2366notify(const char *hostname, const int state)
2367{
2368 debuglog("notify from %s, new state %d", hostname, state);
2369
2370 siglock();
2371 do_clear(hostname);
2372 sigunlock();
2373
2374 debuglog("Leaving notify\n");
2375}
2376
7902cf7e 2377int
ac2f15b3
A
2378send_granted(fl, opcode)
2379 struct file_lock *fl;
2380 int opcode __unused;
2381{
2382 CLIENT *cli;
2383 static char dummy;
2384 struct timeval timeo;
7902cf7e 2385 enum clnt_stat rv;
ac2f15b3
A
2386 static struct nlm_res retval;
2387 static struct nlm4_res retval4;
2388
2389 debuglog("About to send granted on blocked lock\n");
ac2f15b3
A
2390
2391 cli = get_client(fl->addr,
2392 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2393 if (cli == NULL) {
2394 syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2395 fl->client_name);
2396 /*
2397 * We fail to notify remote that the lock has been granted.
2398 * The client will timeout and retry, the lock will be
2399 * granted at this time.
2400 */
7902cf7e 2401 return -1;
ac2f15b3
A
2402 }
2403 timeo.tv_sec = 0;
2404 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2405
7902cf7e
A
2406 fl->granted_cookie = ++send_granted_cookie;
2407 if (!send_granted_cookie)
2408 send_granted_cookie++;
2409
ac2f15b3
A
2410 if (fl->flags & LOCK_V4) {
2411 static nlm4_testargs res;
7902cf7e
A
2412 res.cookie.n_len = sizeof(fl->granted_cookie);
2413 res.cookie.n_bytes = (char*)&fl->granted_cookie;
ac2f15b3
A
2414 res.exclusive = fl->client.exclusive;
2415 res.alock.caller_name = fl->client_name;
2416 res.alock.fh.n_len = fl->filehandle.n_len;
2417 res.alock.fh.n_bytes = fl->filehandle.n_bytes;
2418 res.alock.oh = fl->client.oh;
2419 res.alock.svid = fl->client.svid;
2420 res.alock.l_offset = fl->client.l_offset;
2421 res.alock.l_len = fl->client.l_len;
2422 debuglog("sending v4 reply%s",
2423 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2424 if (fl->flags & LOCK_ASYNC) {
7902cf7e 2425 rv = clnt_call(cli, NLM4_GRANTED_MSG,
ac2f15b3
A
2426 xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo);
2427 } else {
7902cf7e 2428 rv = clnt_call(cli, NLM4_GRANTED,
ac2f15b3
A
2429 xdr_nlm4_testargs, &res, xdr_nlm4_res,
2430 &retval4, timeo);
2431 }
2432 } else {
2433 static nlm_testargs res;
2434
7902cf7e
A
2435 res.cookie.n_len = sizeof(fl->granted_cookie);
2436 res.cookie.n_bytes = (char*)&fl->granted_cookie;
ac2f15b3
A
2437 res.exclusive = fl->client.exclusive;
2438 res.alock.caller_name = fl->client_name;
2439 res.alock.fh.n_len = fl->filehandle.n_len;
2440 res.alock.fh.n_bytes = fl->filehandle.n_bytes;
2441 res.alock.oh = fl->client.oh;
2442 res.alock.svid = fl->client.svid;
2443 res.alock.l_offset = fl->client.l_offset;
2444 res.alock.l_len = fl->client.l_len;
2445 debuglog("sending v1 reply%s",
2446 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2447 if (fl->flags & LOCK_ASYNC) {
7902cf7e 2448 rv = clnt_call(cli, NLM_GRANTED_MSG,
ac2f15b3
A
2449 xdr_nlm_testargs, &res, xdr_void, &dummy, timeo);
2450 } else {
7902cf7e 2451 rv = clnt_call(cli, NLM_GRANTED,
ac2f15b3
A
2452 xdr_nlm_testargs, &res, xdr_nlm_res,
2453 &retval, timeo);
2454 }
2455 }
2456 if (debug_level > 2)
2457 debuglog("clnt_call returns %d(%s) for granted",
7902cf7e
A
2458 rv, clnt_sperrno(rv));
2459
2460 if ((rv != RPC_SUCCESS) &&
2461 !((fl->flags & LOCK_ASYNC) && (rv == RPC_TIMEDOUT)))
2462 return -1;
2463 return 0;
2464}
2465
2466/*
2467 * granted_failed: remove a granted lock that wasn't successfully
2468 * accepted by the client
2469 */
2470void
2471granted_failed(nlm4_res *arg)
2472{
2473 u_int64_t cookie;
2474 struct file_lock *ifl;
2475
2476 debuglog("Entering granted_failed, status %d\n", arg->stat.stat);
2477
2478 if (arg->cookie.n_len != sizeof(cookie)) {
2479 debuglog("Exiting granted_failed: bogus cookie size %d\n",
2480 arg->cookie.n_len);
2481 return;
2482 }
2483 bcopy(arg->cookie.n_bytes, &cookie, sizeof(cookie));
2484 debuglog("granted_failed, cookie 0x%llx\n", cookie);
2485
2486 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
2487 debuglog("Pointer to file lock: %p\n",ifl);
2488
2489 debuglog("****Dump of ifl****\n");
2490 dump_filelock(ifl);
2491
2492 if (ifl->granted_cookie != cookie)
2493 continue;
2494
2495 debuglog("granted_failed: cookie found\n");
2496 break;
2497 }
2498
2499 if (ifl) {
2500 do_unlock(ifl);
2501 /* ifl is NO LONGER VALID AT THIS POINT */
2502 } else {
2503 debuglog("granted_failed: cookie NOT FOUND\n");
2504 }
ac2f15b3 2505
7902cf7e 2506 debuglog("Exiting granted_failed\n");
ac2f15b3
A
2507}
2508
2509/*
2510 * getshare: try to acquire a share reservation
2511 */
2512enum nlm_stats
2513getshare(nlm_shareargs *shrarg, struct svc_req *rqstp, const int flags)
2514{
2515 struct sharefile *shrfile;
2516 struct file_share *sh;
2517
2518 debuglog("Entering getshare...\n");
2519
2520 if (grace_expired == 0 && shrarg->reclaim == 0) {
2521 debuglog("getshare denied - grace period\n");
2522 return (flags & LOCK_V4) ?
2523 nlm4_denied_grace_period :
2524 nlm_denied_grace_period;
2525 }
2526
2527 /* find file in list of share files */
2528 LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) {
2529 if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) &&
2530 (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2531 shrarg->share.fh.n_len) == 0)) {
2532 /* shrfile is the correct file */
2533 break;
2534 }
2535 }
2536
2537 /* if share file not found, create a new share file */
2538 if (!shrfile) {
2539 int fd;
2540 fd = fhopen((fhandle_t *)shrarg->share.fh.n_bytes, O_RDONLY);
2541 if (fd < 0) {
2542 debuglog("fhopen failed (from %16s): %32s\n",
2543 shrarg->share.caller_name, strerror(errno));
2544 if ((flags & LOCK_V4) == 0)
2545 return nlm_denied;
2546 switch (errno) {
2547 case ESTALE:
2548 return nlm4_stale_fh;
2549 default:
2550 return nlm4_failed;
2551 }
2552 }
2553 shrfile = malloc(sizeof(struct sharefile));
2554 if (!shrfile) {
2555 debuglog("getshare failed: can't allocate sharefile\n");
2556 close(fd);
2557 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2558 }
2559 shrfile->filehandle.n_len = shrarg->share.fh.n_len;
2560 shrfile->filehandle.n_bytes = malloc(shrarg->share.fh.n_len);
2561 if (!shrfile->filehandle.n_bytes) {
2562 debuglog("getshare failed: can't allocate sharefile filehandle\n");
2563 free(shrfile);
2564 close(fd);
2565 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2566 }
2567 bcopy(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2568 shrarg->share.fh.n_len);
2569 shrfile->fd = fd;
2570 shrfile->refcount = 0;
2571 shrfile->sharelist_head.lh_first = NULL;
2572 LIST_INSERT_HEAD(&nfssharefilelist_head, shrfile, sharefilelist);
2573 }
2574
2575 /* compare request mode/access to current shares */
2576 LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) {
2577 /* if request host/owner matches a current share... */
2578 if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) &&
2579 same_netobj(&shrarg->share.oh, &sh->oh)) {
2580 /* ...then just update share mode/access */
2581 sh->mode = shrarg->share.mode;
2582 sh->access = shrarg->share.access;
2583 debuglog("getshare: updated existing share\n");
2584 return nlm_granted;
2585 }
2586 if (((shrarg->share.mode & sh->access) != 0) ||
2587 ((shrarg->share.access & sh->mode) != 0)) {
2588 /* share request conflicts with existing share */
2589 debuglog("getshare: conflicts with existing share\n");
2590 return nlm_denied;
2591 }
2592 }
2593
2594 /* create/init new share */
2595 sh = malloc(sizeof(struct file_share));
2596 if (!sh) {
2597 debuglog("getshare failed: can't allocate share\n");
2598 if (!shrfile->refcount) {
2599 LIST_REMOVE(shrfile, sharefilelist);
2600 close(shrfile->fd);
2601 free(shrfile->filehandle.n_bytes);
2602 free(shrfile);
2603 }
2604 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2605 }
2606 sh->oh.n_len = shrarg->share.oh.n_len;
2607 sh->oh.n_bytes = malloc(sh->oh.n_len);
2608 if (!sh->oh.n_bytes) {
2609 debuglog("getshare failed: can't allocate share owner handle\n");
2610 free(sh);
2611 if (!shrfile->refcount) {
2612 LIST_REMOVE(shrfile, sharefilelist);
2613 close(shrfile->fd);
2614 free(shrfile->filehandle.n_bytes);
2615 free(shrfile);
2616 }
2617 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2618 }
2619 strncpy(sh->client_name, shrarg->share.caller_name, SM_MAXSTRLEN);
2620 sh->mode = shrarg->share.mode;
2621 sh->access = shrarg->share.access;
2622
2623 /* insert new share into file's share list */
2624 LIST_INSERT_HEAD(&shrfile->sharelist_head, sh, nfssharelist);
2625 shrfile->refcount++;
2626
2627 debuglog("Exiting getshare...\n");
2628
2629 return nlm_granted;
2630}
2631
2632
2633/* remove a share reservation */
2634enum nlm_stats
2635unshare(nlm_shareargs *shrarg, struct svc_req *rqstp)
2636{
2637 struct sharefile *shrfile;
2638 struct file_share *sh;
2639
2640 debuglog("Entering unshare...\n");
2641
2642 /* find file in list of share files */
2643 LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) {
2644 if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) &&
2645 (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2646 shrarg->share.fh.n_len) == 0)) {
2647 /* shrfile is the correct file */
2648 break;
2649 }
2650 }
2651
2652 /* if share file not found, return success (per spec) */
2653 if (!shrfile) {
2654 debuglog("unshare: no such share file\n");
2655 return nlm_granted;
2656 }
2657
2658 /* find share */
2659 LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) {
2660 /* if request host/owner matches a current share... */
2661 if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) &&
2662 same_netobj(&shrarg->share.oh, &sh->oh))
2663 break;
2664 }
2665
2666 /* if share not found, return success (per spec) */
2667 if (!sh) {
2668 debuglog("unshare: no such share\n");
2669 return nlm_granted;
2670 }
2671
2672 /* remove share from file and deallocate */
2673 shrfile->refcount--;
2674 LIST_REMOVE(sh, nfssharelist);
2675 free(sh->oh.n_bytes);
2676 free(sh);
2677
2678 /* if file has no more shares, deallocate share file */
2679 if (!shrfile->refcount) {
2680 debuglog("unshare: file has no more shares\n");
2681 LIST_REMOVE(shrfile, sharefilelist);
2682 close(shrfile->fd);
2683 free(shrfile->filehandle.n_bytes);
2684 free(shrfile);
2685 }
2686
2687 debuglog("Exiting unshare...\n");
2688
2689 return nlm_granted;
2690}
2691
2692/*
2693 * do_free_all
2694 *
2695 * Wipe out all non-monitored locks and shares held by a host.
2696 */
2697
2698void
2699do_free_all(const char *hostname)
2700{
2701 struct file_lock *ifl, *nfl;
2702 struct sharefile *shrfile, *nshrfile;
2703 struct file_share *ifs, *nfs;
2704
2705 /* clear non-monitored blocking file locks */
2706 ifl = LIST_FIRST(&blockedlocklist_head);
2707 while (ifl != NULL) {
2708 nfl = LIST_NEXT(ifl, nfslocklist);
2709
2710 if (((ifl->flags & LOCK_MON) == 0) &&
2711 (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) {
2712 remove_blockingfilelock(ifl);
2713 deallocate_file_lock(ifl);
2714 }
2715
2716 ifl = nfl;
2717 }
2718
2719 /* clear non-monitored file locks */
2720 ifl = LIST_FIRST(&nfslocklist_head);
2721 while (ifl != NULL) {
2722 nfl = LIST_NEXT(ifl, nfslocklist);
2723
2724 if (((ifl->flags & LOCK_MON) == 0) &&
2725 (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) {
2726 /* Unlock destroys ifl out from underneath */
2727 unlock_partialfilelock(ifl);
2728 /* ifl is NO LONGER VALID AT THIS POINT */
2729 }
2730
2731 ifl = nfl;
2732 }
2733
2734 /* clear shares */
2735 shrfile = LIST_FIRST(&nfssharefilelist_head);
2736 while (shrfile != NULL) {
2737 nshrfile = LIST_NEXT(shrfile, sharefilelist);
2738
2739 ifs = LIST_FIRST(&shrfile->sharelist_head);
2740 while (ifs != NULL) {
2741 nfs = LIST_NEXT(ifs, nfssharelist);
2742
2743 if (strncmp(hostname, ifs->client_name, SM_MAXSTRLEN) == 0) {
2744 shrfile->refcount--;
2745 LIST_REMOVE(ifs, nfssharelist);
2746 free(ifs->oh.n_bytes);
2747 free(ifs);
2748 }
2749
2750 ifs = nfs;
2751 }
2752
2753 if (!shrfile->refcount) {
2754 LIST_REMOVE(shrfile, sharefilelist);
2755 close(shrfile->fd);
2756 free(shrfile->filehandle.n_bytes);
2757 free(shrfile);
2758 }
2759
2760 shrfile = nshrfile;
2761 }
2762
2763}
2764
2765
2766
2767/*
2768 * Routines below here have not been modified in the overhaul
2769 */
2770
2771/*
2772 * Are these two routines still required since lockd is not spawning off
2773 * children to service locks anymore? Presumably they were originally
2774 * put in place to prevent a one child from changing the lock list out
2775 * from under another one.
2776 */
2777
2778void
2779siglock(void)
2780{
2781 sigset_t block;
2782
2783 sigemptyset(&block);
2784 sigaddset(&block, SIGCHLD);
2785
2786 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2787 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2788 }
2789}
2790
2791void
2792sigunlock(void)
2793{
2794 sigset_t block;
2795
2796 sigemptyset(&block);
2797 sigaddset(&block, SIGCHLD);
2798
2799 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2800 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2801 }
2802}
2803
2804