]> git.saurik.com Git - apple/network_cmds.git/blob - rpc_lockd.tproj/lockd_lock.c
1ee60d128510b161de10f2f8d2423f4dc461e5c8
[apple/network_cmds.git] / rpc_lockd.tproj / lockd_lock.c
1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */
2 /* $FreeBSD: src/usr.sbin/rpc.lockd/lockd_lock.c,v 1.10 2002/03/22 19:57:09 alfred Exp $ */
3
4 /*
5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6 * Copyright (c) 2000 Manuel Bouyer.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38 #define LOCKD_DEBUG
39
40 #include <stdio.h>
41 #ifdef LOCKD_DEBUG
42 #include <stdarg.h>
43 #endif
44 #include <stdlib.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <syslog.h>
48 #include <errno.h>
49 #include <string.h>
50 #include <signal.h>
51 #include <rpc/rpc.h>
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <sys/socket.h>
55 #include <sys/param.h>
56 #include <sys/mount.h>
57 #include <sys/wait.h>
58 #include <rpcsvc/sm_inter.h>
59 #include <rpcsvc/nlm_prot.h>
60
61 #include "lockd.h"
62 #include "lockd_lock.h"
63
64 #define MAXOBJECTSIZE 64
65 #define MAXBUFFERSIZE 1024
66
67 /*
68 * A set of utilities for managing file locking
69 *
70 * XXX: All locks are in a linked list, a better structure should be used
71 * to improve search/access effeciency.
72 */
73
74 /* struct describing a lock */
75 struct file_lock {
76 LIST_ENTRY(file_lock) nfslocklist;
77 netobj filehandle; /* NFS filehandle */
78 struct sockaddr *addr;
79 struct nlm4_holder client; /* lock holder */
80 u_int64_t granted_cookie;
81 int nsm_status; /* status from the remote lock manager */
82 int status; /* lock status, see below */
83 int flags; /* lock flags, see lockd_lock.h */
84 int blocking; /* blocking lock or not */
85 char client_name[SM_MAXSTRLEN]; /* client_name is really variable length and must be last! */
86 };
87
88 LIST_HEAD(nfslocklist_head, file_lock);
89 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
90
91 LIST_HEAD(blockedlocklist_head, file_lock);
92 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
93
94 /* struct describing a share reservation */
95 struct file_share {
96 LIST_ENTRY(file_share) nfssharelist;
97 netobj oh; /* share holder */
98 short mode;
99 short access;
100 char client_name[SM_MAXSTRLEN]; /* name is really variable length and must be last! */
101 };
102 LIST_HEAD(nfssharelist_head, file_share);
103
104 /* Struct describing a file with share reservations */
105 struct sharefile {
106 LIST_ENTRY(sharefile) sharefilelist;
107 netobj filehandle; /* Local access filehandle */
108 int fd; /* file descriptor: remains open until no more shares */
109 int refcount;
110 struct nfssharelist_head sharelist_head;
111 };
112 LIST_HEAD(nfssharefilelist_head, sharefile);
113 struct nfssharefilelist_head nfssharefilelist_head = LIST_HEAD_INITIALIZER(nfssharefilelist_head);
114
115 /* lock status */
116 #define LKST_LOCKED 1 /* lock is locked */
117 /* XXX: Is this flag file specific or lock specific? */
118 #define LKST_WAITING 2 /* file is already locked by another host */
119 #define LKST_PROCESSING 3 /* child is trying to aquire the lock */
120 #define LKST_DYING 4 /* must dies when we get news from the child */
121
122 /* struct describing a monitored host */
123 struct host {
124 TAILQ_ENTRY(host) hostlst;
125 int refcnt;
126 time_t lastuse;
127 struct sockaddr addr;
128 char name[SM_MAXSTRLEN]; /* name is really variable length and must be last! */
129 };
130 /* list of hosts we monitor */
131 TAILQ_HEAD(hostlst_head, host);
132 struct hostlst_head hostlst_head = TAILQ_HEAD_INITIALIZER(hostlst_head);
133 struct hostlst_head hostlst_unref = TAILQ_HEAD_INITIALIZER(hostlst_unref);
134
135 int host_expire = 60; /* seconds */
136 time_t currsec;
137 u_int64_t send_granted_cookie = 0;
138
139 /*
140 * File monitoring handlers
141 * XXX: These might be able to be removed when kevent support
142 * is placed into the hardware lock/unlock routines. (ie.
143 * let the kernel do all the file monitoring)
144 */
145
146 /* Struct describing a monitored file */
147 struct monfile {
148 LIST_ENTRY(monfile) monfilelist;
149 netobj filehandle; /* Local access filehandle */
150 int fd; /* file descriptor: remains open until unlock! */
151 int refcount;
152 int exclusive;
153 };
154
155 /* List of files we monitor */
156 LIST_HEAD(monfilelist_head, monfile);
157 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
158
159 static int debugdelay = 0;
160
161 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
162 NFS_DENIED, NFS_DENIED_NOLOCK,
163 NFS_RESERR };
164
165 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
166 HW_DENIED, HW_DENIED_NOLOCK,
167 HW_STALEFH, HW_READONLY, HW_RESERR };
168
169 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
170 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
171 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR,
172 PFL_HWDENIED_STALEFH, PFL_HWDENIED_READONLY };
173
174 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
175 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
176 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */
177 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
178
179 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
180
181 int send_granted(struct file_lock *fl, int opcode);
182 void siglock(void);
183 void sigunlock(void);
184 void destroy_lock_host(struct host *ihp);
185 static void monitor_lock_host(const char *hostname, const struct sockaddr *addr);
186
187 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
188 const bool_t exclusive, struct nlm4_holder *dest);
189 struct file_lock * allocate_file_lock(const netobj *lockowner,
190 const netobj *filehandle, const struct sockaddr *addr,
191 const char *caller_name);
192 void deallocate_file_lock(struct file_lock *fl);
193 void fill_file_lock(struct file_lock *fl,
194 const bool_t exclusive, const int32_t svid,
195 const u_int64_t offset, const u_int64_t len,
196 const int state, const int status, const int flags, const int blocking);
197 int regions_overlap(const u_int64_t start1, const u_int64_t len1,
198 const u_int64_t start2, const u_int64_t len2);;
199 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene,
200 const u_int64_t startu, const u_int64_t lenu,
201 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
202 int same_netobj(const netobj *n0, const netobj *n1);
203 int same_filelock_identity(const struct file_lock *fl0,
204 const struct file_lock *fl2);
205
206 static void debuglog(char const *fmt, ...);
207 void dump_static_object(const unsigned char* object, const int sizeof_object,
208 unsigned char* hbuff, const int sizeof_hbuff,
209 unsigned char* cbuff, const int sizeof_cbuff);
210 void dump_netobj(const struct netobj *nobj);
211 void dump_filelock(const struct file_lock *fl);
212 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl);
213 enum nfslock_status test_nfslock(const struct file_lock *fl,
214 struct file_lock **conflicting_fl);
215 enum nfslock_status lock_nfslock(struct file_lock *fl);
216 enum nfslock_status delete_nfslock(struct file_lock *fl);
217 enum nfslock_status unlock_nfslock(const struct file_lock *fl,
218 struct file_lock **released_lock, struct file_lock **left_lock,
219 struct file_lock **right_lock);
220 enum hwlock_status lock_hwlock(struct file_lock *fl);
221 enum split_status split_nfslock(const struct file_lock *exist_lock,
222 const struct file_lock *unlock_lock, struct file_lock **left_lock,
223 struct file_lock **right_lock);
224 void add_blockingfilelock(struct file_lock *fl);
225 enum hwlock_status unlock_hwlock(const struct file_lock *fl);
226 enum hwlock_status test_hwlock(const struct file_lock *fl,
227 struct file_lock **conflicting_fl);
228 void remove_blockingfilelock(struct file_lock *fl);
229 void clear_blockingfilelock(const char *hostname);
230 void retry_blockingfilelocklist(netobj *fh);
231 enum partialfilelock_status unlock_partialfilelock(
232 const struct file_lock *fl);
233 void clear_partialfilelock(const char *hostname);
234 enum partialfilelock_status test_partialfilelock(
235 const struct file_lock *fl, struct file_lock **conflicting_fl);
236 enum nlm4_stats do_test(struct file_lock *fl, struct file_lock **conflicting_fl);
237 enum nlm4_stats do_unlock(struct file_lock *fl);
238 enum nlm4_stats do_lock(struct file_lock *fl);
239 void do_clear(const char *hostname);
240
241
242 void
243 debuglog(char const *fmt, ...)
244 {
245 va_list ap;
246
247 if (debug_level < 1) {
248 return;
249 }
250
251 sleep(debugdelay);
252
253 va_start(ap, fmt);
254 vsyslog(LOG_DEBUG, fmt, ap);
255 va_end(ap);
256 }
257
258 void
259 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
260 const unsigned char *object;
261 const int size_object;
262 unsigned char *hbuff;
263 const int size_hbuff;
264 unsigned char *cbuff;
265 const int size_cbuff;
266 {
267 int i, objectsize;
268
269 if (debug_level < 2) {
270 return;
271 }
272
273 objectsize = size_object;
274
275 if (objectsize == 0) {
276 debuglog("object is size 0\n");
277 } else {
278 if (objectsize > MAXOBJECTSIZE) {
279 debuglog("Object of size %d being clamped"
280 "to size %d\n", objectsize, MAXOBJECTSIZE);
281 objectsize = MAXOBJECTSIZE;
282 }
283
284 if (hbuff != NULL) {
285 if (size_hbuff < objectsize*2+1) {
286 debuglog("Hbuff not large enough."
287 " Increase size\n");
288 } else {
289 for(i=0;i<objectsize;i++) {
290 sprintf(hbuff+i*2,"%02x",*(object+i));
291 }
292 *(hbuff+i*2) = '\0';
293 }
294 }
295
296 if (cbuff != NULL) {
297 if (size_cbuff < objectsize+1) {
298 debuglog("Cbuff not large enough."
299 " Increase Size\n");
300 }
301
302 for(i=0;i<objectsize;i++) {
303 if (*(object+i) >= 32 && *(object+i) <= 127) {
304 *(cbuff+i) = *(object+i);
305 } else {
306 *(cbuff+i) = '.';
307 }
308 }
309 *(cbuff+i) = '\0';
310 }
311 }
312 }
313
314 void
315 dump_netobj(const struct netobj *nobj)
316 {
317 char hbuff[MAXBUFFERSIZE*2];
318 char cbuff[MAXBUFFERSIZE];
319
320 if (debug_level < 2) {
321 return;
322 }
323
324 if (nobj == NULL) {
325 debuglog("Null netobj pointer\n");
326 }
327 else if (nobj->n_len == 0) {
328 debuglog("Size zero netobj\n");
329 } else {
330 dump_static_object(nobj->n_bytes, nobj->n_len,
331 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
332 debuglog("netobj: len: %d data: %s ::: %s\n",
333 nobj->n_len, hbuff, cbuff);
334 }
335 }
336
337 /* #define DUMP_FILELOCK_VERBOSE */
338 void
339 dump_filelock(const struct file_lock *fl)
340 {
341 #ifdef DUMP_FILELOCK_VERBOSE
342 char hbuff[MAXBUFFERSIZE*2];
343 char cbuff[MAXBUFFERSIZE];
344 #endif
345
346 if (debug_level < 2) {
347 return;
348 }
349
350 if (fl != NULL) {
351 debuglog("Dumping file lock structure @ %p\n", fl);
352
353 #ifdef DUMP_FILELOCK_VERBOSE
354 dump_static_object((unsigned char *)&fl->filehandle.n_bytes,
355 fl->filehandle.n_len, hbuff, sizeof(hbuff),
356 cbuff, sizeof(cbuff));
357 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff);
358 #endif
359
360 debuglog("Dumping nlm4_holder:\n"
361 "exc: %x svid: %x offset:len %llx:%llx\n",
362 fl->client.exclusive, fl->client.svid,
363 fl->client.l_offset, fl->client.l_len);
364
365 #ifdef DUMP_FILELOCK_VERBOSE
366 debuglog("Dumping client identity:\n");
367 dump_netobj(&fl->client.oh);
368
369 debuglog("nsm: %d status: %d flags: %d locker: %d"
370 " fd: %d\n", fl->nsm_status, fl->status,
371 fl->flags, fl->locker, fl->fd);
372 #endif
373 } else {
374 debuglog("NULL file lock structure\n");
375 }
376 }
377
378 void
379 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
380 const struct nlm4_lock *src;
381 const bool_t exclusive;
382 struct nlm4_holder *dest;
383 {
384
385 dest->exclusive = exclusive;
386 dest->oh.n_len = src->oh.n_len;
387 dest->oh.n_bytes = src->oh.n_bytes;
388 dest->svid = src->svid;
389 dest->l_offset = src->l_offset;
390 dest->l_len = src->l_len;
391 }
392
393
394 size_t
395 strnlen(const char *s, size_t len)
396 {
397 size_t n;
398
399 for (n = 0; s[n] != 0 && n < len; n++)
400 ;
401 return n;
402 }
403
404 /*
405 * allocate_file_lock: Create a lock with the given parameters
406 */
407
408 struct file_lock *
409 allocate_file_lock(const netobj *lockowner, const netobj *filehandle,
410 const struct sockaddr *addr, const char *caller_name)
411 {
412 struct file_lock *newfl;
413 size_t n;
414
415 /* Beware of rubbish input! */
416 n = strnlen(caller_name, SM_MAXSTRLEN);
417 if (n == SM_MAXSTRLEN) {
418 return NULL;
419 }
420
421 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1);
422 if (newfl == NULL) {
423 return NULL;
424 }
425 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name));
426 memcpy(newfl->client_name, caller_name, n);
427 newfl->client_name[n] = 0;
428
429 newfl->client.oh.n_bytes = malloc(lockowner->n_len);
430 if (newfl->client.oh.n_bytes == NULL) {
431 free(newfl);
432 return NULL;
433 }
434 newfl->client.oh.n_len = lockowner->n_len;
435 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
436
437 newfl->filehandle.n_bytes = malloc(filehandle->n_len);
438 if (newfl->filehandle.n_bytes == NULL) {
439 free(newfl->client.oh.n_bytes);
440 free(newfl);
441 return NULL;
442 }
443 newfl->filehandle.n_len = filehandle->n_len;
444 bcopy(filehandle->n_bytes, newfl->filehandle.n_bytes, filehandle->n_len);
445
446 newfl->addr = malloc(addr->sa_len);
447 if (newfl->addr == NULL) {
448 free(newfl->client.oh.n_bytes);
449 free(newfl);
450 return NULL;
451 }
452 memcpy(newfl->addr, addr, addr->sa_len);
453
454 return newfl;
455 }
456
457 /*
458 * file_file_lock: Force creation of a valid file lock
459 */
460 void
461 fill_file_lock(struct file_lock *fl,
462 const bool_t exclusive, const int32_t svid,
463 const u_int64_t offset, const u_int64_t len,
464 const int state, const int status, const int flags, const int blocking)
465 {
466 fl->client.exclusive = exclusive;
467 fl->client.svid = svid;
468 fl->client.l_offset = offset;
469 fl->client.l_len = len;
470
471 fl->nsm_status = state;
472 fl->status = status;
473 fl->flags = flags;
474 fl->blocking = blocking;
475 }
476
477 /*
478 * deallocate_file_lock: Free all storage associated with a file lock
479 */
480 void
481 deallocate_file_lock(struct file_lock *fl)
482 {
483 free(fl->addr);
484 free(fl->client.oh.n_bytes);
485 free(fl->filehandle.n_bytes);
486 free(fl);
487 }
488
489 /*
490 * regions_overlap(): This function examines the two provided regions for
491 * overlap.
492 */
493 int
494 regions_overlap(start1, len1, start2, len2)
495 const u_int64_t start1, len1, start2, len2;
496 {
497 u_int64_t d1,d2,d3,d4;
498 enum split_status result;
499
500 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
501 start1, len1, start2, len2);
502
503 result = region_compare(start1, len1, start2, len2,
504 &d1, &d2, &d3, &d4);
505
506 debuglog("Exiting region overlap with val: %d\n",result);
507
508 if (result == SPL_DISJOINT) {
509 return 0;
510 } else {
511 return 1;
512 }
513
514 return (result);
515 }
516
517 /*
518 * region_compare(): Examine lock regions and split appropriately
519 *
520 * XXX: Fix 64 bit overflow problems
521 * XXX: Check to make sure I got *ALL* the cases.
522 * XXX: This DESPERATELY needs a regression test.
523 */
524 enum split_status
525 region_compare(starte, lene, startu, lenu,
526 start1, len1, start2, len2)
527 const u_int64_t starte, lene, startu, lenu;
528 u_int64_t *start1, *len1, *start2, *len2;
529 {
530 /*
531 * Please pay attention to the sequential exclusions
532 * of the if statements!!!
533 */
534 enum LFLAGS lflags;
535 enum RFLAGS rflags;
536 enum split_status retval;
537
538 retval = SPL_DISJOINT;
539
540 if (lene == 0 && lenu == 0) {
541 /* Examine left edge of locker */
542 if (startu < starte) {
543 lflags = LEDGE_LEFT;
544 } else if (startu == starte) {
545 lflags = LEDGE_LBOUNDARY;
546 } else {
547 lflags = LEDGE_INSIDE;
548 }
549
550 rflags = REDGE_RBOUNDARY; /* Both are infiinite */
551
552 if (lflags == LEDGE_INSIDE) {
553 *start1 = starte;
554 *len1 = startu - starte;
555 }
556
557 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
558 retval = SPL_CONTAINED;
559 } else {
560 retval = SPL_LOCK1;
561 }
562 } else if (lene == 0 && lenu != 0) {
563 /* Established lock is infinite */
564 /* Examine left edge of unlocker */
565 if (startu < starte) {
566 lflags = LEDGE_LEFT;
567 } else if (startu == starte) {
568 lflags = LEDGE_LBOUNDARY;
569 } else if (startu > starte) {
570 lflags = LEDGE_INSIDE;
571 }
572
573 /* Examine right edge of unlocker */
574 if (startu + lenu < starte) {
575 /* Right edge of unlocker left of established lock */
576 rflags = REDGE_LEFT;
577 return SPL_DISJOINT;
578 } else if (startu + lenu == starte) {
579 /* Right edge of unlocker on start of established lock */
580 rflags = REDGE_LBOUNDARY;
581 return SPL_DISJOINT;
582 } else { /* Infinifty is right of finity */
583 /* Right edge of unlocker inside established lock */
584 rflags = REDGE_INSIDE;
585 }
586
587 if (lflags == LEDGE_INSIDE) {
588 *start1 = starte;
589 *len1 = startu - starte;
590 retval |= SPL_LOCK1;
591 }
592
593 if (rflags == REDGE_INSIDE) {
594 /* Create right lock */
595 *start2 = startu+lenu;
596 *len2 = 0;
597 retval |= SPL_LOCK2;
598 }
599 } else if (lene != 0 && lenu == 0) {
600 /* Unlocker is infinite */
601 /* Examine left edge of unlocker */
602 if (startu < starte) {
603 lflags = LEDGE_LEFT;
604 retval = SPL_CONTAINED;
605 return retval;
606 } else if (startu == starte) {
607 lflags = LEDGE_LBOUNDARY;
608 retval = SPL_CONTAINED;
609 return retval;
610 } else if ((startu > starte) && (startu < starte + lene - 1)) {
611 lflags = LEDGE_INSIDE;
612 } else if (startu == starte + lene - 1) {
613 lflags = LEDGE_RBOUNDARY;
614 } else { /* startu > starte + lene -1 */
615 lflags = LEDGE_RIGHT;
616 return SPL_DISJOINT;
617 }
618
619 rflags = REDGE_RIGHT; /* Infinity is right of finity */
620
621 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
622 *start1 = starte;
623 *len1 = startu - starte;
624 retval |= SPL_LOCK1;
625 return retval;
626 }
627
628 } else {
629 /* Both locks are finite */
630
631 /* Examine left edge of unlocker */
632 if (startu < starte) {
633 lflags = LEDGE_LEFT;
634 } else if (startu == starte) {
635 lflags = LEDGE_LBOUNDARY;
636 } else if ((startu > starte) && (startu < starte + lene - 1)) {
637 lflags = LEDGE_INSIDE;
638 } else if (startu == starte + lene - 1) {
639 lflags = LEDGE_RBOUNDARY;
640 } else { /* startu > starte + lene -1 */
641 lflags = LEDGE_RIGHT;
642 return SPL_DISJOINT;
643 }
644
645 /* Examine right edge of unlocker */
646 if (startu + lenu < starte) {
647 /* Right edge of unlocker left of established lock */
648 rflags = REDGE_LEFT;
649 return SPL_DISJOINT;
650 } else if (startu + lenu == starte) {
651 /* Right edge of unlocker on start of established lock */
652 rflags = REDGE_LBOUNDARY;
653 return SPL_DISJOINT;
654 } else if (startu + lenu < starte + lene) {
655 /* Right edge of unlocker inside established lock */
656 rflags = REDGE_INSIDE;
657 } else if (startu + lenu == starte + lene) {
658 /* Right edge of unlocker on right edge of established lock */
659 rflags = REDGE_RBOUNDARY;
660 } else { /* startu + lenu > starte + lene */
661 /* Right edge of unlocker is right of established lock */
662 rflags = REDGE_RIGHT;
663 }
664
665 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
666 /* Create left lock */
667 *start1 = starte;
668 *len1 = (startu - starte);
669 retval |= SPL_LOCK1;
670 }
671
672 if (rflags == REDGE_INSIDE) {
673 /* Create right lock */
674 *start2 = startu+lenu;
675 *len2 = starte+lene-(startu+lenu);
676 retval |= SPL_LOCK2;
677 }
678
679 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
680 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
681 retval = SPL_CONTAINED;
682 }
683 }
684
685 return retval;
686 }
687
688 /*
689 * same_netobj: Compares the apprpriate bits of a netobj for identity
690 */
691 int
692 same_netobj(const netobj *n0, const netobj *n1)
693 {
694 int retval;
695
696 retval = 0;
697
698 debuglog("Entering netobj identity check\n");
699
700 if (n0->n_len == n1->n_len) {
701 debuglog("Preliminary length check passed\n");
702 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
703 debuglog("netobj %smatch\n", retval ? "" : "mis");
704 }
705
706 return (retval);
707 }
708
709 /*
710 * same_filelock_identity: Compares the appropriate bits of a file_lock
711 */
712 int
713 same_filelock_identity(fl0, fl1)
714 const struct file_lock *fl0, *fl1;
715 {
716 int retval;
717
718 retval = 0;
719
720 debuglog("Checking filelock identity\n");
721
722 /*
723 * Check process ids and host information.
724 */
725 retval = (fl0->client.svid == fl1->client.svid &&
726 same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
727
728 debuglog("Exiting checking filelock identity: retval: %d\n",retval);
729
730 return (retval);
731 }
732
733 /*
734 * Below here are routines associated with manipulating the NFS
735 * lock list.
736 */
737
738 /*
739 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
740 * or NULL otherwise
741 * XXX: It is a shame that this duplicates so much code from test_nfslock.
742 */
743 struct file_lock *
744 get_lock_matching_unlock(const struct file_lock *fl)
745 {
746 struct file_lock *ifl; /* Iterator */
747
748 debuglog("Entering lock_matching_unlock\n");
749 debuglog("********Dump of fl*****************\n");
750 dump_filelock(fl);
751
752 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
753 debuglog("Pointer to file lock: %p\n",ifl);
754
755 debuglog("****Dump of ifl****\n");
756 dump_filelock(ifl);
757 debuglog("*******************\n");
758
759 /*
760 * XXX: It is conceivable that someone could use the NLM RPC
761 * system to directly access filehandles. This may be a
762 * security hazard as the filehandle code may bypass normal
763 * file access controls
764 */
765 if (fl->filehandle.n_len != ifl->filehandle.n_len)
766 continue;
767 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
768 fl->filehandle.n_len))
769 continue;
770
771 debuglog("matching_unlock: Filehandles match, "
772 "checking regions\n");
773
774 /* Filehandles match, check for region overlap */
775 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
776 ifl->client.l_offset, ifl->client.l_len))
777 continue;
778
779 debuglog("matching_unlock: Region overlap"
780 " found %llu : %llu -- %llu : %llu\n",
781 fl->client.l_offset,fl->client.l_len,
782 ifl->client.l_offset,ifl->client.l_len);
783
784 /* Regions overlap, check the identity */
785 if (!same_filelock_identity(fl,ifl))
786 continue;
787
788 debuglog("matching_unlock: Duplicate lock id. Granting\n");
789 return (ifl);
790 }
791
792 debuglog("Exiting lock_matching_unlock\n");
793
794 return (NULL);
795 }
796
797 /*
798 * test_nfslock: check for NFS lock in lock list
799 *
800 * This routine makes the following assumptions:
801 * 1) Nothing will adjust the lock list during a lookup
802 *
803 * This routine has an intersting quirk which bit me hard.
804 * The conflicting_fl is the pointer to the conflicting lock.
805 * However, to modify the "*pointer* to the conflicting lock" rather
806 * that the "conflicting lock itself" one must pass in a "pointer to
807 * the pointer of the conflicting lock". Gross.
808 */
809
810 enum nfslock_status
811 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
812 {
813 struct file_lock *ifl; /* Iterator */
814 enum nfslock_status retval;
815
816 debuglog("Entering test_nfslock\n");
817
818 retval = NFS_GRANTED;
819 (*conflicting_fl) = NULL;
820
821 debuglog("Entering lock search loop\n");
822
823 debuglog("***********************************\n");
824 debuglog("Dumping match filelock\n");
825 debuglog("***********************************\n");
826 dump_filelock(fl);
827 debuglog("***********************************\n");
828
829 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
830 if (retval == NFS_DENIED)
831 break;
832
833 debuglog("Top of lock loop\n");
834 debuglog("Pointer to file lock: %p\n",ifl);
835
836 debuglog("***********************************\n");
837 debuglog("Dumping test filelock\n");
838 debuglog("***********************************\n");
839 dump_filelock(ifl);
840 debuglog("***********************************\n");
841
842 /*
843 * XXX: It is conceivable that someone could use the NLM RPC
844 * system to directly access filehandles. This may be a
845 * security hazard as the filehandle code may bypass normal
846 * file access controls
847 */
848 if (fl->filehandle.n_len != ifl->filehandle.n_len)
849 continue;
850 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
851 fl->filehandle.n_len))
852 continue;
853
854 debuglog("test_nfslock: filehandle match found\n");
855
856 /* Filehandles match, check for region overlap */
857 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
858 ifl->client.l_offset, ifl->client.l_len))
859 continue;
860
861 debuglog("test_nfslock: Region overlap found"
862 " %llu : %llu -- %llu : %llu\n",
863 fl->client.l_offset,fl->client.l_len,
864 ifl->client.l_offset,ifl->client.l_len);
865
866 /* Regions overlap, check the exclusivity */
867 if (!(fl->client.exclusive || ifl->client.exclusive))
868 continue;
869
870 debuglog("test_nfslock: Exclusivity failure: %d %d\n",
871 fl->client.exclusive,
872 ifl->client.exclusive);
873
874 if (same_filelock_identity(fl,ifl)) {
875 debuglog("test_nfslock: Duplicate id. Granting\n");
876 (*conflicting_fl) = ifl;
877 retval = NFS_GRANTED_DUPLICATE;
878 } else {
879 /* locking attempt fails */
880 debuglog("test_nfslock: Lock attempt failed\n");
881 debuglog("Desired lock\n");
882 dump_filelock(fl);
883 debuglog("Conflicting lock\n");
884 dump_filelock(ifl);
885 (*conflicting_fl) = ifl;
886 retval = NFS_DENIED;
887 }
888 }
889
890 debuglog("Dumping file locks\n");
891 debuglog("Exiting test_nfslock\n");
892
893 return (retval);
894 }
895
896 /*
897 * lock_nfslock: attempt to create a lock in the NFS lock list
898 *
899 * This routine tests whether the lock will be granted and then adds
900 * the entry to the lock list if so.
901 *
902 * Argument fl gets modified as its list housekeeping entries get modified
903 * upon insertion into the NFS lock list
904 *
905 * This routine makes several assumptions:
906 * 1) It is perfectly happy to grant a duplicate lock from the same pid.
907 * While this seems to be intuitively wrong, it is required for proper
908 * Posix semantics during unlock. It is absolutely imperative to not
909 * unlock the main lock before the two child locks are established. Thus,
910 * one has be be able to create duplicate locks over an existing lock
911 * 2) It currently accepts duplicate locks from the same id,pid
912 */
913
914 enum nfslock_status
915 lock_nfslock(struct file_lock *fl)
916 {
917 enum nfslock_status retval;
918 struct file_lock *dummy_fl;
919
920 dummy_fl = NULL;
921
922 debuglog("Entering lock_nfslock...\n");
923
924 retval = test_nfslock(fl,&dummy_fl);
925
926 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
927 debuglog("Inserting lock...\n");
928 dump_filelock(fl);
929 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
930 }
931
932 debuglog("Exiting lock_nfslock...\n");
933
934 return (retval);
935 }
936
937 /*
938 * delete_nfslock: delete an NFS lock list entry
939 *
940 * This routine is used to delete a lock out of the NFS lock list
941 * without regard to status, underlying locks, regions or anything else
942 *
943 * Note that this routine *does not deallocate memory* of the lock.
944 * It just disconnects it from the list. The lock can then be used
945 * by other routines without fear of trashing the list.
946 */
947
948 enum nfslock_status
949 delete_nfslock(struct file_lock *fl)
950 {
951
952 LIST_REMOVE(fl, nfslocklist);
953
954 return (NFS_GRANTED);
955 }
956
957 enum split_status
958 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
959 const struct file_lock *exist_lock, *unlock_lock;
960 struct file_lock **left_lock, **right_lock;
961 {
962 u_int64_t start1, len1, start2, len2;
963 enum split_status spstatus;
964
965 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
966 unlock_lock->client.l_offset, unlock_lock->client.l_len,
967 &start1, &len1, &start2, &len2);
968
969 if ((spstatus & SPL_LOCK1) != 0) {
970 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->filehandle, exist_lock->addr, exist_lock->client_name);
971 if (*left_lock == NULL) {
972 debuglog("Unable to allocate resource for split 1\n");
973 return SPL_RESERR;
974 }
975
976 fill_file_lock(*left_lock,
977 exist_lock->client.exclusive, exist_lock->client.svid,
978 start1, len1,
979 exist_lock->nsm_status,
980 exist_lock->status, exist_lock->flags, exist_lock->blocking);
981 }
982
983 if ((spstatus & SPL_LOCK2) != 0) {
984 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->filehandle, exist_lock->addr, exist_lock->client_name);
985 if (*right_lock == NULL) {
986 debuglog("Unable to allocate resource for split 1\n");
987 if (*left_lock != NULL) {
988 deallocate_file_lock(*left_lock);
989 }
990 return SPL_RESERR;
991 }
992
993 fill_file_lock(*right_lock,
994 exist_lock->client.exclusive, exist_lock->client.svid,
995 start2, len2,
996 exist_lock->nsm_status,
997 exist_lock->status, exist_lock->flags, exist_lock->blocking);
998 }
999
1000 return spstatus;
1001 }
1002
1003 enum nfslock_status
1004 unlock_nfslock(fl, released_lock, left_lock, right_lock)
1005 const struct file_lock *fl;
1006 struct file_lock **released_lock;
1007 struct file_lock **left_lock;
1008 struct file_lock **right_lock;
1009 {
1010 struct file_lock *mfl; /* Matching file lock */
1011 enum nfslock_status retval;
1012 enum split_status spstatus;
1013
1014 debuglog("Entering unlock_nfslock\n");
1015
1016 *released_lock = NULL;
1017 *left_lock = NULL;
1018 *right_lock = NULL;
1019
1020 retval = NFS_DENIED_NOLOCK;
1021
1022 debuglog("Attempting to match lock...\n");
1023 mfl = get_lock_matching_unlock(fl);
1024
1025 if (mfl != NULL) {
1026 debuglog("Unlock matched. Querying for split\n");
1027
1028 spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
1029
1030 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
1031 debuglog("********Split dumps********");
1032 dump_filelock(mfl);
1033 dump_filelock(fl);
1034 dump_filelock(*left_lock);
1035 dump_filelock(*right_lock);
1036 debuglog("********End Split dumps********");
1037
1038 if (spstatus == SPL_RESERR) {
1039 if (*left_lock != NULL) {
1040 deallocate_file_lock(*left_lock);
1041 *left_lock = NULL;
1042 }
1043
1044 if (*right_lock != NULL) {
1045 deallocate_file_lock(*right_lock);
1046 *right_lock = NULL;
1047 }
1048
1049 return NFS_RESERR;
1050 }
1051
1052 /* Insert new locks from split if required */
1053 if (*left_lock != NULL) {
1054 debuglog("Split left activated\n");
1055 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1056 }
1057
1058 if (*right_lock != NULL) {
1059 debuglog("Split right activated\n");
1060 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1061 }
1062
1063 /* Unlock the lock since it matches identity */
1064 LIST_REMOVE(mfl, nfslocklist);
1065 *released_lock = mfl;
1066 retval = NFS_GRANTED;
1067 }
1068
1069 debuglog("Exiting unlock_nfslock\n");
1070
1071 return retval;
1072 }
1073
1074 /*
1075 * Below here are the routines for manipulating the file lock directly
1076 * on the disk hardware itself
1077 */
1078 enum hwlock_status
1079 lock_hwlock(struct file_lock *fl)
1080 {
1081 struct monfile *imf,*nmf;
1082 int lflags, flerror;
1083 fhandle_t fh;
1084
1085 /* Scan to see if filehandle already present */
1086 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1087 if ((fl->filehandle.n_len == imf->filehandle.n_len) &&
1088 (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes,
1089 fl->filehandle.n_len) == 0)) {
1090 /* imf is the correct filehandle */
1091 break;
1092 }
1093 }
1094
1095 /*
1096 * Filehandle already exists (we control the file)
1097 * *AND* NFS has already cleared the lock for availability
1098 * Grant it and bump the refcount.
1099 */
1100 if (imf != NULL) {
1101 ++(imf->refcount);
1102 return (HW_GRANTED);
1103 }
1104
1105 /* No filehandle found, create and go */
1106 nmf = malloc(sizeof(struct monfile));
1107 if (nmf == NULL) {
1108 debuglog("hwlock resource allocation failure\n");
1109 return (HW_RESERR);
1110 }
1111 nmf->filehandle.n_bytes = malloc(fl->filehandle.n_len);
1112 if (nmf->filehandle.n_bytes == NULL) {
1113 debuglog("hwlock resource allocation failure\n");
1114 free(nmf);
1115 return (HW_RESERR);
1116 }
1117
1118 if (fl->filehandle.n_len > NFS_MAX_FH_SIZE) {
1119 debuglog("hwlock: bad fh length %d (from %16s): %32s\n",
1120 fl->filehandle.n_len, fl->client_name, strerror(errno));
1121 free(nmf->filehandle.n_bytes);
1122 free(nmf);
1123 return (HW_STALEFH);
1124 }
1125 fh.fh_len = fl->filehandle.n_len;
1126 bcopy(fl->filehandle.n_bytes, fh.fh_data, fh.fh_len);
1127
1128 /* XXX: Is O_RDWR always the correct mode? */
1129 nmf->fd = fhopen(&fh, O_RDWR);
1130 if (nmf->fd < 0) {
1131 debuglog("fhopen failed (from %16s): %32s\n",
1132 fl->client_name, strerror(errno));
1133 free(nmf->filehandle.n_bytes);
1134 free(nmf);
1135 switch (errno) {
1136 case ESTALE:
1137 return (HW_STALEFH);
1138 case EROFS:
1139 return (HW_READONLY);
1140 default:
1141 return (HW_RESERR);
1142 }
1143 }
1144
1145 /* File opened correctly, fill the monitor struct */
1146 nmf->filehandle.n_len = fl->filehandle.n_len;
1147 bcopy(fl->filehandle.n_bytes, nmf->filehandle.n_bytes, fl->filehandle.n_len);
1148 nmf->refcount = 1;
1149 nmf->exclusive = fl->client.exclusive;
1150
1151 lflags = (nmf->exclusive == 1) ?
1152 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1153
1154 flerror = flock(nmf->fd, lflags);
1155
1156 if (flerror != 0) {
1157 debuglog("flock failed (from %16s): %32s\n",
1158 fl->client_name, strerror(errno));
1159 close(nmf->fd);
1160 free(nmf->filehandle.n_bytes);
1161 free(nmf);
1162 switch (errno) {
1163 case EAGAIN:
1164 return (HW_DENIED);
1165 case ESTALE:
1166 return (HW_STALEFH);
1167 case EROFS:
1168 return (HW_READONLY);
1169 default:
1170 return (HW_RESERR);
1171 break;
1172 }
1173 }
1174
1175 /* File opened and locked */
1176 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1177
1178 debuglog("flock succeeded (from %16s)\n", fl->client_name);
1179 return (HW_GRANTED);
1180 }
1181
1182 enum hwlock_status
1183 unlock_hwlock(const struct file_lock *fl)
1184 {
1185 struct monfile *imf;
1186
1187 debuglog("Entering unlock_hwlock\n");
1188 debuglog("Entering loop interation\n");
1189
1190 /* Scan to see if filehandle already present */
1191 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1192 if ((fl->filehandle.n_len == imf->filehandle.n_len) &&
1193 (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes,
1194 fl->filehandle.n_len) == 0)) {
1195 /* imf is the correct filehandle */
1196 break;
1197 }
1198 }
1199
1200 debuglog("Completed iteration. Proceeding\n");
1201
1202 if (imf == NULL) {
1203 /* No lock found */
1204 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1205 return (HW_DENIED_NOLOCK);
1206 }
1207
1208 /* Lock found */
1209 --imf->refcount;
1210
1211 if (imf->refcount < 0) {
1212 debuglog("Negative hardware reference count\n");
1213 }
1214
1215 if (imf->refcount <= 0) {
1216 close(imf->fd);
1217 LIST_REMOVE(imf, monfilelist);
1218 free(imf->filehandle.n_bytes);
1219 free(imf);
1220 }
1221 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1222 return (HW_GRANTED);
1223 }
1224
1225 enum hwlock_status
1226 test_hwlock(fl, conflicting_fl)
1227 const struct file_lock *fl __unused;
1228 struct file_lock **conflicting_fl __unused;
1229 {
1230
1231 /*
1232 * XXX: lock tests on hardware are not required until
1233 * true partial file testing is done on the underlying file
1234 */
1235 return (HW_RESERR);
1236 }
1237
1238
1239
1240 /*
1241 * Below here are routines for manipulating blocked lock requests
1242 * They should only be called from the XXX_partialfilelock routines
1243 * if at all possible
1244 */
1245
1246 void
1247 add_blockingfilelock(struct file_lock *fl)
1248 {
1249 struct file_lock *ifl, *nfl;
1250
1251 debuglog("Entering add_blockingfilelock\n");
1252
1253 /*
1254 * Check for a duplicate lock request.
1255 * If found, deallocate the older request.
1256 */
1257 ifl = LIST_FIRST(&blockedlocklist_head);
1258 for (; ifl != NULL; ifl = nfl) {
1259 debuglog("Pointer to file lock: %p\n",ifl);
1260 debuglog("****Dump of ifl****\n");
1261 dump_filelock(ifl);
1262 debuglog("*******************\n");
1263
1264 nfl = LIST_NEXT(ifl, nfslocklist);
1265
1266 if (fl->filehandle.n_len != ifl->filehandle.n_len)
1267 continue;
1268 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
1269 fl->filehandle.n_len))
1270 continue;
1271
1272 /* Filehandles match, check region */
1273 if ((fl->client.l_offset != ifl->client.l_offset) ||
1274 (fl->client.l_len != ifl->client.l_len))
1275 continue;
1276
1277 /* Regions match, check the identity */
1278 if (!same_filelock_identity(fl,ifl))
1279 continue;
1280
1281 debuglog("add_blockingfilelock: removing duplicate lock request.\n");
1282 remove_blockingfilelock(ifl);
1283 deallocate_file_lock(ifl);
1284 break;
1285 }
1286
1287 /*
1288 * Clear the blocking flag so that it can be reused without
1289 * adding it to the blocking queue a second time
1290 */
1291
1292 fl->blocking = 0;
1293 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1294
1295 debuglog("Exiting add_blockingfilelock\n");
1296 }
1297
1298 void
1299 remove_blockingfilelock(struct file_lock *fl)
1300 {
1301
1302 debuglog("Entering remove_blockingfilelock\n");
1303
1304 LIST_REMOVE(fl, nfslocklist);
1305
1306 debuglog("Exiting remove_blockingfilelock\n");
1307 }
1308
1309 void
1310 clear_blockingfilelock(const char *hostname)
1311 {
1312 struct file_lock *ifl,*nfl;
1313
1314 /*
1315 * Normally, LIST_FOREACH is called for, but since
1316 * the current element *is* the iterator, deleting it
1317 * would mess up the iteration. Thus, a next element
1318 * must be used explicitly
1319 */
1320
1321 ifl = LIST_FIRST(&blockedlocklist_head);
1322
1323 while (ifl != NULL) {
1324 nfl = LIST_NEXT(ifl, nfslocklist);
1325
1326 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1327 remove_blockingfilelock(ifl);
1328 deallocate_file_lock(ifl);
1329 }
1330
1331 ifl = nfl;
1332 }
1333 }
1334
1335 int need_retry_blocked_locks = 0; /* need to call retry_blockingfilelocklist() */
1336
1337 void
1338 retry_blockingfilelocklist(netobj *fh)
1339 {
1340 /*
1341 * If fh is given, then retry just the locks with the
1342 * same filehandle in the blocked list.
1343 * Otherwise, simply retry all locks in the blocked list.
1344 */
1345 struct file_lock *ifl, *nfl, *pfl; /* Iterator */
1346 enum partialfilelock_status pflstatus;
1347 int rv;
1348
1349 debuglog("Entering retry_blockingfilelocklist\n");
1350
1351 need_retry_blocked_locks = 0;
1352
1353 pfl = NULL;
1354 ifl = LIST_FIRST(&blockedlocklist_head);
1355 debuglog("Iterator choice %p\n",ifl);
1356
1357 while (ifl != NULL) {
1358 /*
1359 * SUBTLE BUG: The next element must be worked out before the
1360 * current element has been moved
1361 */
1362 nfl = LIST_NEXT(ifl, nfslocklist);
1363 debuglog("Iterator choice %p\n",ifl);
1364 debuglog("Prev iterator choice %p\n",pfl);
1365 debuglog("Next iterator choice %p\n",nfl);
1366
1367 /* if given a filehandle, only retry locks for the same filehandle */
1368 if (fh && !same_netobj(fh, &ifl->filehandle)) {
1369 ifl = nfl;
1370 continue;
1371 }
1372
1373 /*
1374 * SUBTLE BUG: The file_lock must be removed from the
1375 * old list so that it's list pointers get disconnected
1376 * before being allowed to participate in the new list
1377 * which will automatically add it in if necessary.
1378 */
1379
1380 LIST_REMOVE(ifl, nfslocklist);
1381 pflstatus = lock_partialfilelock(ifl);
1382
1383 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1384 debuglog("Granted blocked lock\n");
1385 /* lock granted and is now being used */
1386 rv = send_granted(ifl, 0);
1387 if (rv) {
1388 /*
1389 * Uh oh... the NLM_GRANTED message failed.
1390 * About the only thing we can do is drop the lock.
1391 * Note: this could be bad if the error was only
1392 * transient. Hopefully, if the client is still
1393 * waiting for the lock, they will resend the request.
1394 */
1395 do_unlock(ifl);
1396 /* ifl is NO LONGER VALID AT THIS POINT */
1397 }
1398 } else if (pflstatus == PFL_HWDENIED_STALEFH) {
1399 /*
1400 * Uh oh...
1401 * It would be nice if we could inform the client of
1402 * this error. Unfortunately, there's no way to do
1403 * that in the NLM protocol (can't send "granted"
1404 * message with an error and there's no "never going
1405 * to be granted" message).
1406 *
1407 * Since there's no chance of this blocked request ever
1408 * succeeding, we drop the lock request rather than
1409 * needlessly keeping it around just to rot forever in
1410 * the blocked lock list.
1411 *
1412 * Hopefully, if the client is still waiting for the lock,
1413 * they will resend the request (and get an error then).
1414 *
1415 * XXX Note: PFL_HWDENIED_READONLY could potentially
1416 * be handled this way as well, although that would
1417 * only be an issue if a file system changed from
1418 * read-write to read-only out from under a blocked
1419 * lock request, and that's far less likely than a
1420 * file disappearing out from under such a request.
1421 */
1422 deallocate_file_lock(ifl);
1423 /* ifl is NO LONGER VALID AT THIS POINT */
1424 } else {
1425 /* Reinsert lock back into same place in blocked list */
1426 debuglog("Replacing blocked lock\n");
1427 if (pfl != NULL)
1428 LIST_INSERT_AFTER(pfl, ifl, nfslocklist);
1429 else
1430 /* ifl is the only elem. in the list */
1431 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1432 }
1433
1434 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE ||
1435 pflstatus == PFL_HWDENIED_STALEFH) {
1436 /* If ifl was permanently removed from the list, (e.g it */
1437 /* was granted or dropped), pfl should remain where it's at. */
1438 } else {
1439 /* If ifl was left in the list, (e.g it was reinserted back */
1440 /* in place), pfl should simply be moved forward to be ifl */
1441 pfl = ifl;
1442 }
1443 /* Valid increment behavior regardless of state of ifl */
1444 ifl = nfl;
1445 }
1446
1447 debuglog("Exiting retry_blockingfilelocklist\n");
1448 }
1449
1450 /*
1451 * Below here are routines associated with manipulating all
1452 * aspects of the partial file locking system (list, hardware, etc.)
1453 */
1454
1455 /*
1456 * Please note that lock monitoring must be done at this level which
1457 * keeps track of *individual* lock requests on lock and unlock
1458 *
1459 * XXX: Split unlocking is going to make the unlock code miserable
1460 */
1461
1462 /*
1463 * lock_partialfilelock:
1464 *
1465 * Argument fl gets modified as its list housekeeping entries get modified
1466 * upon insertion into the NFS lock list
1467 *
1468 * This routine makes several assumptions:
1469 * 1) It (will) pass locks through to flock to lock the entire underlying file
1470 * and then parcel out NFS locks if it gets control of the file.
1471 * This matches the old rpc.lockd file semantics (except where it
1472 * is now more correct). It is the safe solution, but will cause
1473 * overly restrictive blocking if someone is trying to use the
1474 * underlying files without using NFS. This appears to be an
1475 * acceptable tradeoff since most people use standalone NFS servers.
1476 * XXX: The right solution is probably kevent combined with fcntl
1477 *
1478 * 2) Nothing modifies the lock lists between testing and granting
1479 * I have no idea whether this is a useful assumption or not
1480 */
1481
1482 enum partialfilelock_status
1483 lock_partialfilelock(struct file_lock *fl)
1484 {
1485 enum partialfilelock_status retval;
1486 enum nfslock_status lnlstatus;
1487 enum hwlock_status hwstatus;
1488
1489 debuglog("Entering lock_partialfilelock\n");
1490
1491 retval = PFL_DENIED;
1492
1493 /*
1494 * Execute the NFS lock first, if possible, as it is significantly
1495 * easier and less expensive to undo than the filesystem lock
1496 */
1497
1498 lnlstatus = lock_nfslock(fl);
1499
1500 switch (lnlstatus) {
1501 case NFS_GRANTED:
1502 case NFS_GRANTED_DUPLICATE:
1503 /*
1504 * At this point, the NFS lock is allocated and active.
1505 * Remember to clean it up if the hardware lock fails
1506 */
1507 hwstatus = lock_hwlock(fl);
1508
1509 switch (hwstatus) {
1510 case HW_GRANTED:
1511 case HW_GRANTED_DUPLICATE:
1512 debuglog("HW GRANTED\n");
1513 /*
1514 * XXX: Fixme: Check hwstatus for duplicate when
1515 * true partial file locking and accounting is
1516 * done on the hardware
1517 */
1518 if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1519 retval = PFL_GRANTED_DUPLICATE;
1520 } else {
1521 retval = PFL_GRANTED;
1522 }
1523 if (fl->flags & LOCK_MON)
1524 monitor_lock_host_by_name(fl->client_name);
1525 break;
1526 case HW_RESERR:
1527 debuglog("HW RESERR\n");
1528 retval = PFL_HWRESERR;
1529 break;
1530 case HW_DENIED:
1531 debuglog("HW DENIED\n");
1532 retval = PFL_HWDENIED;
1533 break;
1534 case HW_DENIED_NOLOCK:
1535 debuglog("HW DENIED NOLOCK\n");
1536 retval = PFL_HWDENIED_NOLOCK;
1537 break;
1538 case HW_STALEFH:
1539 debuglog("HW STALE FH\n");
1540 retval = PFL_HWDENIED_STALEFH;
1541 break;
1542 case HW_READONLY:
1543 debuglog("HW READONLY\n");
1544 retval = PFL_HWDENIED_READONLY;
1545 break;
1546 default:
1547 debuglog("Unmatched hwstatus %d\n",hwstatus);
1548 break;
1549 }
1550
1551 if (retval != PFL_GRANTED &&
1552 retval != PFL_GRANTED_DUPLICATE) {
1553 /* Clean up the NFS lock */
1554 debuglog("Deleting trial NFS lock\n");
1555 delete_nfslock(fl);
1556 }
1557 break;
1558 case NFS_DENIED:
1559 retval = PFL_NFSDENIED;
1560 break;
1561 case NFS_RESERR:
1562 retval = PFL_NFSRESERR;
1563 default:
1564 debuglog("Unmatched lnlstatus %d\n");
1565 retval = PFL_NFSDENIED_NOLOCK;
1566 break;
1567 }
1568
1569 /*
1570 * By the time fl reaches here, it is completely free again on
1571 * failure. The NFS lock done before attempting the
1572 * hardware lock has been backed out
1573 */
1574
1575 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1576 /* Once last chance to check the lock */
1577 if (fl->blocking == 1) {
1578 if (retval == PFL_NFSDENIED) {
1579 /* Queue the lock */
1580 debuglog("BLOCKING LOCK RECEIVED\n");
1581 retval = PFL_NFSBLOCKED;
1582 add_blockingfilelock(fl);
1583 dump_filelock(fl);
1584 } else {
1585 /* retval is okay as PFL_HWDENIED */
1586 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1587 dump_filelock(fl);
1588 }
1589 } else {
1590 /* Leave retval alone, it's already correct */
1591 debuglog("Lock denied. Non-blocking failure\n");
1592 dump_filelock(fl);
1593 }
1594 }
1595
1596 debuglog("Exiting lock_partialfilelock\n");
1597
1598 return retval;
1599 }
1600
1601 /*
1602 * unlock_partialfilelock:
1603 *
1604 * Given a file_lock, unlock all locks which match.
1605 *
1606 * Note that a given lock might have to unlock ITSELF! See
1607 * clear_partialfilelock for example.
1608 */
1609
1610 enum partialfilelock_status
1611 unlock_partialfilelock(const struct file_lock *fl)
1612 {
1613 struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1614 enum partialfilelock_status retval;
1615 enum nfslock_status unlstatus;
1616 enum hwlock_status unlhwstatus, lhwstatus;
1617
1618 debuglog("Entering unlock_partialfilelock\n");
1619
1620 selffl = NULL;
1621 lfl = NULL;
1622 rfl = NULL;
1623 releasedfl = NULL;
1624 retval = PFL_DENIED;
1625
1626 /*
1627 * There are significant overlap and atomicity issues
1628 * with partially releasing a lock. For example, releasing
1629 * part of an NFS shared lock does *not* always release the
1630 * corresponding part of the file since there is only one
1631 * rpc.lockd UID but multiple users could be requesting it
1632 * from NFS. Also, an unlock request should never allow
1633 * another process to gain a lock on the remaining parts.
1634 * ie. Always apply the new locks before releasing the
1635 * old one
1636 */
1637
1638 /*
1639 * Loop is required since multiple little locks
1640 * can be allocated and then deallocated with one
1641 * big unlock.
1642 *
1643 * The loop is required to be here so that the nfs &
1644 * hw subsystems do not need to communicate with one
1645 * one another
1646 */
1647
1648 do {
1649 debuglog("Value of releasedfl: %p\n",releasedfl);
1650 /* lfl&rfl are created *AND* placed into the NFS lock list if required */
1651 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1652 debuglog("Value of releasedfl: %p\n",releasedfl);
1653
1654
1655 /* XXX: This is grungy. It should be refactored to be cleaner */
1656 if (lfl != NULL) {
1657 lhwstatus = lock_hwlock(lfl);
1658 if (lhwstatus != HW_GRANTED &&
1659 lhwstatus != HW_GRANTED_DUPLICATE) {
1660 debuglog("HW duplicate lock failure for left split\n");
1661 }
1662 if (lfl->flags & LOCK_MON)
1663 monitor_lock_host_by_name(lfl->client_name);
1664 }
1665
1666 if (rfl != NULL) {
1667 lhwstatus = lock_hwlock(rfl);
1668 if (lhwstatus != HW_GRANTED &&
1669 lhwstatus != HW_GRANTED_DUPLICATE) {
1670 debuglog("HW duplicate lock failure for right split\n");
1671 }
1672 if (rfl->flags & LOCK_MON)
1673 monitor_lock_host_by_name(rfl->client_name);
1674 }
1675
1676 switch (unlstatus) {
1677 case NFS_GRANTED:
1678 /* Attempt to unlock on the hardware */
1679 debuglog("NFS unlock granted. Attempting hardware unlock\n");
1680
1681 /* This call *MUST NOT* unlock the two newly allocated locks */
1682 unlhwstatus = unlock_hwlock(fl);
1683 debuglog("HW unlock returned with code %d\n",unlhwstatus);
1684
1685 switch (unlhwstatus) {
1686 case HW_GRANTED:
1687 debuglog("HW unlock granted\n");
1688 if (releasedfl->flags & LOCK_MON)
1689 unmonitor_lock_host(releasedfl->client_name);
1690 retval = PFL_GRANTED;
1691 break;
1692 case HW_DENIED_NOLOCK:
1693 /* Huh?!?! This shouldn't happen */
1694 debuglog("HW unlock denied no lock\n");
1695 retval = PFL_HWRESERR;
1696 /* Break out of do-while */
1697 unlstatus = NFS_RESERR;
1698 break;
1699 default:
1700 debuglog("HW unlock failed\n");
1701 retval = PFL_HWRESERR;
1702 /* Break out of do-while */
1703 unlstatus = NFS_RESERR;
1704 break;
1705 }
1706
1707 debuglog("Exiting with status retval: %d\n",retval);
1708
1709 // XXX sending granted messages before unlock response
1710 // XXX causes unlock response to be corrupted?
1711 // XXX Workaround is to move this to nlm_prot_svc.c
1712 // XXX after the unlock response is sent.
1713 // retry_blockingfilelocklist();
1714 need_retry_blocked_locks = 1;
1715 break;
1716 case NFS_DENIED_NOLOCK:
1717 retval = PFL_GRANTED;
1718 debuglog("All locks cleaned out\n");
1719 break;
1720 default:
1721 retval = PFL_NFSRESERR;
1722 debuglog("NFS unlock failure\n");
1723 dump_filelock(fl);
1724 break;
1725 }
1726
1727 if (releasedfl != NULL) {
1728 if (fl == releasedfl) {
1729 /*
1730 * XXX: YECHHH!!! Attempt to unlock self succeeded
1731 * but we can't deallocate the space yet. This is what
1732 * happens when you don't write malloc and free together
1733 */
1734 debuglog("Attempt to unlock self\n");
1735 selffl = releasedfl;
1736 } else {
1737 /*
1738 * XXX: this deallocation *still* needs to migrate closer
1739 * to the allocation code way up in get_lock or the allocation
1740 * code needs to migrate down (violation of "When you write
1741 * malloc you must write free")
1742 */
1743
1744 deallocate_file_lock(releasedfl);
1745 }
1746 }
1747
1748 } while (unlstatus == NFS_GRANTED);
1749
1750 if (selffl != NULL) {
1751 /*
1752 * This statement wipes out the incoming file lock (fl)
1753 * in spite of the fact that it is declared const
1754 */
1755 debuglog("WARNING! Destroying incoming lock pointer\n");
1756 deallocate_file_lock(selffl);
1757 }
1758
1759 debuglog("Exiting unlock_partialfilelock\n");
1760
1761 return retval;
1762 }
1763
1764 /*
1765 * clear_partialfilelock
1766 *
1767 * Normally called in response to statd state number change.
1768 * Wipe out all locks held by a host. As a bonus, the act of
1769 * doing so should automatically clear their statd entries and
1770 * unmonitor the host.
1771 */
1772
1773 void
1774 clear_partialfilelock(const char *hostname)
1775 {
1776 struct file_lock *ifl, *nfl;
1777 enum partialfilelock_status pfsret;
1778
1779 /* Clear blocking file lock list */
1780 clear_blockingfilelock(hostname);
1781
1782 /* do all required unlocks */
1783 /* Note that unlock can smash the current pointer to a lock */
1784
1785 /*
1786 * Normally, LIST_FOREACH is called for, but since
1787 * the current element *is* the iterator, deleting it
1788 * would mess up the iteration. Thus, a next element
1789 * must be used explicitly
1790 */
1791 restart:
1792 ifl = LIST_FIRST(&nfslocklist_head);
1793
1794 while (ifl != NULL) {
1795 nfl = LIST_NEXT(ifl, nfslocklist);
1796
1797 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1798 /* Unlock destroys ifl out from underneath */
1799 pfsret = unlock_partialfilelock(ifl);
1800 if (pfsret != PFL_GRANTED) {
1801 /* Uh oh... there was some sort of problem. */
1802 /* If we restart the loop, we may get */
1803 /* stuck here forever getting errors. */
1804 /* So, let's just abort the whole scan. */
1805 syslog(LOG_WARNING, "lock clearing for %s failed: %d",
1806 hostname, pfsret);
1807 break;
1808 }
1809 /* ifl is NO LONGER VALID AT THIS POINT */
1810 /* Note: the unlock may deallocate several existing locks. */
1811 /* Therefore, we need to restart the scanning of the list, */
1812 /* because nfl could be pointing to a freed lock. */
1813 goto restart;
1814 }
1815 ifl = nfl;
1816 }
1817 }
1818
1819 /*
1820 * test_partialfilelock:
1821 */
1822 enum partialfilelock_status
1823 test_partialfilelock(const struct file_lock *fl,
1824 struct file_lock **conflicting_fl)
1825 {
1826 enum partialfilelock_status retval;
1827 enum nfslock_status teststatus;
1828
1829 debuglog("Entering testpartialfilelock...\n");
1830
1831 retval = PFL_DENIED;
1832
1833 teststatus = test_nfslock(fl, conflicting_fl);
1834 debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1835
1836 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1837 /* XXX: Add the underlying filesystem locking code */
1838 retval = (teststatus == NFS_GRANTED) ?
1839 PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1840 debuglog("Dumping locks...\n");
1841 dump_filelock(fl);
1842 dump_filelock(*conflicting_fl);
1843 debuglog("Done dumping locks...\n");
1844 } else {
1845 retval = PFL_NFSDENIED;
1846 debuglog("NFS test denied.\n");
1847 dump_filelock(fl);
1848 debuglog("Conflicting.\n");
1849 dump_filelock(*conflicting_fl);
1850 }
1851
1852 debuglog("Exiting testpartialfilelock...\n");
1853
1854 return retval;
1855 }
1856
1857 /*
1858 * Below here are routines associated with translating the partial file locking
1859 * codes into useful codes to send back to the NFS RPC messaging system
1860 */
1861
1862 /*
1863 * These routines translate the (relatively) useful return codes back onto
1864 * the few return codes which the nlm subsystems wishes to trasmit
1865 */
1866
1867 enum nlm4_stats
1868 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1869 {
1870 enum partialfilelock_status pfsret;
1871 enum nlm4_stats retval;
1872
1873 debuglog("Entering do_test...\n");
1874
1875 pfsret = test_partialfilelock(fl,conflicting_fl);
1876
1877 switch (pfsret) {
1878 case PFL_GRANTED:
1879 debuglog("PFL test lock granted\n");
1880 dump_filelock(fl);
1881 dump_filelock(*conflicting_fl);
1882 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1883 break;
1884 case PFL_GRANTED_DUPLICATE:
1885 debuglog("PFL test lock granted--duplicate id detected\n");
1886 dump_filelock(fl);
1887 dump_filelock(*conflicting_fl);
1888 debuglog("Clearing conflicting_fl for call semantics\n");
1889 *conflicting_fl = NULL;
1890 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1891 break;
1892 case PFL_NFSDENIED:
1893 case PFL_HWDENIED:
1894 debuglog("PFL test lock denied\n");
1895 dump_filelock(fl);
1896 dump_filelock(*conflicting_fl);
1897 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1898 break;
1899 case PFL_NFSRESERR:
1900 case PFL_HWRESERR:
1901 debuglog("PFL test lock resource fail\n");
1902 dump_filelock(fl);
1903 dump_filelock(*conflicting_fl);
1904 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1905 break;
1906 default:
1907 debuglog("PFL test lock *FAILED*\n");
1908 dump_filelock(fl);
1909 dump_filelock(*conflicting_fl);
1910 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1911 break;
1912 }
1913
1914 debuglog("Exiting do_test...\n");
1915
1916 return retval;
1917 }
1918
1919 /*
1920 * do_lock: Try to acquire a lock
1921 *
1922 * This routine makes a distinction between NLM versions. I am pretty
1923 * convinced that this should be abstracted out and bounced up a level
1924 */
1925
1926 enum nlm4_stats
1927 do_lock(struct file_lock *fl)
1928 {
1929 enum partialfilelock_status pfsret;
1930 enum nlm4_stats retval;
1931
1932 debuglog("Entering do_lock...\n");
1933
1934 pfsret = lock_partialfilelock(fl);
1935
1936 switch (pfsret) {
1937 case PFL_GRANTED:
1938 debuglog("PFL lock granted");
1939 dump_filelock(fl);
1940 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1941 break;
1942 case PFL_GRANTED_DUPLICATE:
1943 debuglog("PFL lock granted--duplicate id detected");
1944 dump_filelock(fl);
1945 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1946 break;
1947 case PFL_NFSDENIED:
1948 case PFL_HWDENIED:
1949 debuglog("PFL_NFS lock denied");
1950 dump_filelock(fl);
1951 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1952 break;
1953 case PFL_NFSBLOCKED:
1954 case PFL_HWBLOCKED:
1955 debuglog("PFL_NFS blocking lock denied. Queued.\n");
1956 dump_filelock(fl);
1957 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1958 break;
1959 case PFL_NFSRESERR:
1960 case PFL_HWRESERR:
1961 case PFL_NFSDENIED_NOLOCK:
1962 case PFL_HWDENIED_NOLOCK:
1963 debuglog("PFL lock resource alocation fail\n");
1964 dump_filelock(fl);
1965 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1966 break;
1967 case PFL_HWDENIED_STALEFH:
1968 debuglog("PFL_NFS lock denied STALEFH");
1969 dump_filelock(fl);
1970 retval = (fl->flags & LOCK_V4) ? nlm4_stale_fh : nlm_denied;
1971 break;
1972 case PFL_HWDENIED_READONLY:
1973 debuglog("PFL_NFS lock denied READONLY");
1974 dump_filelock(fl);
1975 retval = (fl->flags & LOCK_V4) ? nlm4_rofs : nlm_denied;
1976 break;
1977 default:
1978 debuglog("PFL lock *FAILED*");
1979 dump_filelock(fl);
1980 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1981 break;
1982 }
1983
1984 debuglog("Exiting do_lock...\n");
1985
1986 return retval;
1987 }
1988
1989 enum nlm4_stats
1990 do_unlock(struct file_lock *fl)
1991 {
1992 enum partialfilelock_status pfsret;
1993 enum nlm4_stats retval;
1994
1995 debuglog("Entering do_unlock...\n");
1996 pfsret = unlock_partialfilelock(fl);
1997
1998 switch (pfsret) {
1999 case PFL_GRANTED:
2000 debuglog("PFL unlock granted");
2001 dump_filelock(fl);
2002 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2003 break;
2004 case PFL_NFSDENIED:
2005 case PFL_HWDENIED:
2006 debuglog("PFL_NFS unlock denied");
2007 dump_filelock(fl);
2008 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
2009 break;
2010 case PFL_NFSDENIED_NOLOCK:
2011 case PFL_HWDENIED_NOLOCK:
2012 debuglog("PFL_NFS no lock found\n");
2013 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2014 break;
2015 case PFL_NFSRESERR:
2016 case PFL_HWRESERR:
2017 debuglog("PFL unlock resource failure");
2018 dump_filelock(fl);
2019 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
2020 break;
2021 default:
2022 debuglog("PFL unlock *FAILED*");
2023 dump_filelock(fl);
2024 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2025 break;
2026 }
2027
2028 debuglog("Exiting do_unlock...\n");
2029
2030 return retval;
2031 }
2032
2033 /*
2034 * do_clear
2035 *
2036 * This routine is non-existent because it doesn't have a return code.
2037 * It is here for completeness in case someone *does* need to do return
2038 * codes later. A decent compiler should optimize this away.
2039 */
2040
2041 void
2042 do_clear(const char *hostname)
2043 {
2044
2045 clear_partialfilelock(hostname);
2046 }
2047
2048 /*
2049 * The following routines are all called from the code which the
2050 * RPC layer invokes
2051 */
2052
2053 /*
2054 * testlock(): inform the caller if the requested lock would be granted
2055 *
2056 * returns NULL if lock would granted
2057 * returns pointer to a conflicting nlm4_holder if not
2058 */
2059
2060 struct nlm4_holder *
2061 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
2062 {
2063 struct file_lock test_fl, *conflicting_fl;
2064
2065 if (lock->fh.n_len > NFS_MAX_FH_SIZE) {
2066 debuglog("received fhandle size %d, max size %d",
2067 lock->fh.n_len, NFS_MAX_FH_SIZE);
2068 return NULL;
2069 }
2070
2071 bzero(&test_fl, sizeof(test_fl));
2072
2073 test_fl.filehandle.n_len = lock->fh.n_len;
2074 test_fl.filehandle.n_bytes = lock->fh.n_bytes;
2075 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
2076
2077 siglock();
2078 do_test(&test_fl, &conflicting_fl);
2079
2080 if (conflicting_fl == NULL) {
2081 debuglog("No conflicting lock found\n");
2082 sigunlock();
2083 return NULL;
2084 } else {
2085 debuglog("Found conflicting lock\n");
2086 dump_filelock(conflicting_fl);
2087 sigunlock();
2088 return (&conflicting_fl->client);
2089 }
2090 }
2091
2092 /*
2093 * getlock: try to aquire the lock.
2094 * If file is already locked and we can sleep, put the lock in the list with
2095 * status LKST_WAITING; it'll be processed later.
2096 * Otherwise try to lock. If we're allowed to block, fork a child which
2097 * will do the blocking lock.
2098 */
2099
2100 enum nlm4_stats
2101 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
2102 {
2103 struct file_lock *newfl;
2104 enum nlm4_stats retval;
2105
2106 debuglog("Entering getlock...\n");
2107
2108 if (grace_expired == 0 && lckarg->reclaim == 0)
2109 return (flags & LOCK_V4) ?
2110 nlm4_denied_grace_period : nlm_denied_grace_period;
2111
2112 if (lckarg->alock.fh.n_len > NFS_MAX_FH_SIZE) {
2113 debuglog("received fhandle size %d, max size %d",
2114 lckarg->alock.fh.n_len, NFS_MAX_FH_SIZE);
2115 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2116 }
2117
2118 /* allocate new file_lock for this request */
2119 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->alock.fh,
2120 (struct sockaddr *)svc_getcaller(rqstp->rq_xprt),
2121 lckarg->alock.caller_name);
2122 if (newfl == NULL) {
2123 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
2124 /* failed */
2125 return (flags & LOCK_V4) ?
2126 nlm4_denied_nolocks : nlm_denied_nolocks;
2127 }
2128
2129 fill_file_lock(newfl,
2130 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
2131 lckarg->alock.l_len,
2132 lckarg->state, 0, flags, lckarg->block);
2133
2134 /*
2135 * newfl is now fully constructed and deallocate_file_lock
2136 * can now be used to delete it
2137 */
2138
2139 siglock();
2140 debuglog("Pointer to new lock is %p\n",newfl);
2141
2142 retval = do_lock(newfl);
2143
2144 debuglog("Pointer to new lock is %p\n",newfl);
2145 sigunlock();
2146
2147 switch (retval)
2148 {
2149 case nlm4_granted:
2150 /* case nlm_granted: is the same as nlm4_granted */
2151 /* do_mon(lckarg->alock.caller_name); */
2152 break;
2153 case nlm4_blocked:
2154 /* case nlm_blocked: is the same as nlm4_blocked */
2155 /* do_mon(lckarg->alock.caller_name); */
2156 break;
2157 default:
2158 deallocate_file_lock(newfl);
2159 break;
2160 }
2161
2162 debuglog("Exiting getlock...\n");
2163
2164 return retval;
2165 }
2166
2167
2168 /* unlock a filehandle */
2169 enum nlm4_stats
2170 unlock(nlm4_lock *lock, const int flags)
2171 {
2172 struct file_lock fl;
2173 enum nlm4_stats err;
2174
2175 debuglog("Entering unlock...\n");
2176
2177 if (lock->fh.n_len > NFS_MAX_FH_SIZE) {
2178 debuglog("received fhandle size %d, max size %d",
2179 lock->fh.n_len, NFS_MAX_FH_SIZE);
2180 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2181 }
2182
2183 siglock();
2184
2185 bzero(&fl,sizeof(struct file_lock));
2186 fl.filehandle.n_len = lock->fh.n_len;
2187 fl.filehandle.n_bytes = lock->fh.n_bytes;
2188
2189 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
2190
2191 err = do_unlock(&fl);
2192
2193 sigunlock();
2194
2195 debuglog("Exiting unlock...\n");
2196
2197 return err;
2198 }
2199
2200 /* cancel a blocked lock request */
2201 enum nlm4_stats
2202 cancellock(nlm4_cancargs *args, const int flags)
2203 {
2204 struct file_lock *ifl, *nfl;
2205 enum nlm4_stats err;
2206
2207 debuglog("Entering cancellock...\n");
2208
2209 if (args->alock.fh.n_len > NFS_MAX_FH_SIZE) {
2210 debuglog("received fhandle size %d, max size %d",
2211 args->alock.fh.n_len, NFS_MAX_FH_SIZE);
2212 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2213 }
2214
2215 siglock();
2216
2217 err = (flags & LOCK_V4) ? nlm4_denied : nlm_denied;
2218
2219 /*
2220 * scan blocked lock list for matching request and remove/destroy
2221 */
2222 ifl = LIST_FIRST(&blockedlocklist_head);
2223 for ( ; ifl != NULL; ifl = nfl) {
2224 nfl = LIST_NEXT(ifl, nfslocklist);
2225
2226 /* compare lock fh - filehandle */
2227 if (!same_netobj(&args->alock.fh, &ifl->filehandle))
2228 continue;
2229
2230 /* compare lock caller_name - client_name */
2231 if (strncmp(args->alock.caller_name, ifl->client_name, SM_MAXSTRLEN))
2232 continue;
2233
2234 /* Note: done't compare cookie - client_cookie */
2235 /* The cookie may be specific to the cancel request */
2236 /* and not be the same as the one in the original lock request. */
2237
2238 /* compare lock oh - client.oh */
2239 if (!same_netobj(&args->alock.oh, &ifl->client.oh))
2240 continue;
2241
2242 /* compare lock svid - client.svid */
2243 if (args->alock.svid != ifl->client.svid)
2244 continue;
2245
2246 /* compare lock l_offset - client.l_offset */
2247 if (args->alock.l_offset != ifl->client.l_offset)
2248 continue;
2249
2250 /* compare lock l_len - client.l_len */
2251 if (args->alock.l_len != ifl->client.l_len)
2252 continue;
2253
2254 /* compare exclusive - client.exclusive */
2255 if (args->exclusive != ifl->client.exclusive)
2256 continue;
2257
2258 /* got it */
2259 remove_blockingfilelock(ifl);
2260 deallocate_file_lock(ifl);
2261 err = (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2262 break;
2263 }
2264
2265 sigunlock();
2266
2267 debuglog("Exiting cancellock...\n");
2268
2269 return err;
2270 }
2271
2272
2273 /*
2274 * XXX: The following monitor/unmonitor routines
2275 * have not been extensively tested (ie. no regression
2276 * script exists like for the locking sections)
2277 */
2278
2279 /*
2280 * Find a lock host on a queue. If found:
2281 * bump the ref,
2282 * bump the access time,
2283 * dequeue it from the queue it was found on,
2284 * enqueue it at the front of the "in use" queue.
2285 */
2286 struct host *
2287 get_lock_host(struct hostlst_head *hd, const char *hostname, const struct sockaddr *saddr)
2288 {
2289 struct host *ihp;
2290
2291 if (!hostname && !saddr)
2292 return (NULL);
2293
2294 debuglog("get_lock_host %s\n", hostname ? hostname : "addr");
2295 TAILQ_FOREACH(ihp, hd, hostlst) {
2296 if (hostname && (strncmp(hostname, ihp->name, SM_MAXSTRLEN) != 0))
2297 continue;
2298 if (saddr && addrcmp(saddr, &ihp->addr))
2299 continue;
2300 TAILQ_REMOVE(hd, ihp, hostlst);
2301 /*
2302 * Host is already monitored, so just bump the
2303 * reference count. But don't bump the reference
2304 * count if we're adding additional client-side
2305 * references. Client-side monitors are done by
2306 * address, are never unmonitored, and should only
2307 * take one refcount. Otherwise, repeated calls
2308 * could cause the refcount to wrap.
2309 */
2310 if (!saddr || !ihp->addr.sa_len)
2311 ++ihp->refcnt;
2312 ihp->lastuse = currsec;
2313 /* Host should only be in the monitor list once */
2314 TAILQ_INSERT_HEAD(&hostlst_head, ihp, hostlst);
2315 break;
2316 }
2317 debuglog("get_lock_host %s %s\n",
2318 ihp == NULL ? "did not find" : "found", hostname ? hostname : "addr");
2319 return (ihp);
2320 }
2321
2322 /*
2323 * monitor_lock_host: monitor lock hosts locally with a ref count and
2324 * inform statd
2325 */
2326 void
2327 monitor_lock_host_by_name(const char *hostname)
2328 {
2329 struct host *ihp;
2330
2331 debuglog("monitor_lock_host: %s\n", hostname);
2332 ihp = get_lock_host(&hostlst_head, hostname, NULL);
2333 if (ihp == NULL)
2334 ihp = get_lock_host(&hostlst_unref, hostname, NULL);
2335 if (ihp != NULL) {
2336 debuglog("Monitor_lock_host: %s (cached)\n", hostname);
2337 return;
2338 }
2339
2340 monitor_lock_host(hostname, NULL);
2341 }
2342
2343 void
2344 monitor_lock_host_by_addr(const struct sockaddr *saddr)
2345 {
2346 struct host *ihp;
2347 struct hostent *hp;
2348 char hostaddr[SM_MAXSTRLEN];
2349 struct sockaddr_in *sin = (struct sockaddr_in *)saddr;
2350
2351 if (getnameinfo(saddr, saddr->sa_len, hostaddr, sizeof(hostaddr),
2352 NULL, 0, NI_NUMERICHOST)) {
2353 debuglog("monitor_lock_host: bad address\n");
2354 return;
2355 }
2356 debuglog("monitor_lock_host: %s\n", hostaddr);
2357 ihp = get_lock_host(&hostlst_head, NULL, saddr);
2358 if (ihp == NULL)
2359 ihp = get_lock_host(&hostlst_unref, NULL, saddr);
2360 if (ihp != NULL) {
2361 debuglog("Monitor_lock_host: %s (cached)\n", ihp->name);
2362 return;
2363 }
2364
2365 hp = gethostbyaddr((char*)&sin->sin_addr, sizeof(sin->sin_addr), AF_INET);
2366 if (hp) {
2367 monitor_lock_host(hp->h_name, saddr);
2368 } else {
2369 // herror(hostaddr);
2370 monitor_lock_host(hostaddr, saddr);
2371 }
2372 }
2373
2374 static void
2375 monitor_lock_host(const char *hostname, const struct sockaddr *saddr)
2376 {
2377 struct host *nhp;
2378 struct mon smon;
2379 struct sm_stat_res sres;
2380 int rpcret, statflag;
2381 size_t n;
2382
2383 rpcret = 0;
2384 statflag = 0;
2385
2386 /* Host is not yet monitored, add it */
2387 debuglog("Monitor_lock_host: %s (creating)\n", hostname);
2388 n = strnlen(hostname, SM_MAXSTRLEN);
2389 if (n == SM_MAXSTRLEN) {
2390 debuglog("monitor_lock_host: hostname too long\n");
2391 return;
2392 }
2393 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1);
2394 if (nhp == NULL) {
2395 debuglog("Unable to allocate entry for statd mon\n");
2396 return;
2397 }
2398
2399 /* Allocated new host entry, now fill the fields */
2400 memcpy(nhp->name, hostname, n);
2401 nhp->name[n] = 0;
2402 nhp->refcnt = 1;
2403 nhp->lastuse = currsec;
2404 if (saddr) {
2405 bcopy(saddr, &nhp->addr, saddr->sa_len);
2406 } else {
2407 nhp->addr.sa_len = 0;
2408 }
2409 debuglog("Locally Monitoring host '%s'\n", hostname);
2410
2411 debuglog("Attempting to tell statd\n");
2412
2413 bzero(&smon,sizeof(smon));
2414
2415 smon.mon_id.mon_name = nhp->name;
2416 smon.mon_id.my_id.my_name = "localhost\0";
2417
2418 smon.mon_id.my_id.my_prog = NLM_PROG;
2419 smon.mon_id.my_id.my_vers = NLM_SM;
2420 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2421
2422 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon,
2423 &smon, xdr_sm_stat_res, &sres);
2424
2425 if (rpcret == 0) {
2426 if (sres.res_stat == stat_fail) {
2427 debuglog("Statd call failed\n");
2428 statflag = 0;
2429 } else {
2430 statflag = 1;
2431 }
2432 } else {
2433 debuglog("Rpc call to statd failed with return value: %d\n",
2434 rpcret);
2435 statflag = 0;
2436 }
2437
2438 if (statflag == 1) {
2439 TAILQ_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2440 } else {
2441 free(nhp);
2442 }
2443 }
2444
2445 /*
2446 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2447 */
2448 void
2449 unmonitor_lock_host(const char *hostname)
2450 {
2451 struct host *ihp;
2452
2453 TAILQ_FOREACH(ihp, &hostlst_head, hostlst) {
2454 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2455 /* Host is unmonitored, drop refcount */
2456 --ihp->refcnt;
2457 /* Host should only be in the monitor list once */
2458 break;
2459 }
2460 }
2461
2462 if (ihp == NULL) {
2463 debuglog("Could not find host %16s in mon list\n", hostname);
2464 return;
2465 }
2466
2467 if (ihp->refcnt > 0)
2468 return;
2469
2470 if (ihp->refcnt < 0) {
2471 debuglog("Negative refcount!: %d\n", ihp->refcnt);
2472 }
2473
2474 TAILQ_REMOVE(&hostlst_head, ihp, hostlst);
2475 TAILQ_INSERT_HEAD(&hostlst_unref, ihp, hostlst);
2476 if (host_expire <= 0)
2477 destroy_lock_host(ihp);
2478 }
2479
2480 void
2481 destroy_lock_host(struct host *ihp)
2482 {
2483 struct mon_id smon_id;
2484 struct sm_stat smstat;
2485 int rpcret;
2486
2487 debuglog("Attempting to unmonitor host %16s\n", ihp->name);
2488
2489 bzero(&smon_id,sizeof(smon_id));
2490
2491 smon_id.mon_name = (char *)ihp->name;
2492 smon_id.my_id.my_name = "localhost";
2493 smon_id.my_id.my_prog = NLM_PROG;
2494 smon_id.my_id.my_vers = NLM_SM;
2495 smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2496
2497 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon_id,
2498 &smon_id, xdr_sm_stat, &smstat);
2499
2500 if (rpcret != 0) {
2501 debuglog("Rpc call to unmonitor statd failed with "
2502 " return value: %d: %s", rpcret, clnt_sperrno(rpcret));
2503 } else {
2504 debuglog("Succeeded unmonitoring %16s\n", ihp->name);
2505 }
2506
2507 TAILQ_REMOVE(&hostlst_unref, ihp, hostlst);
2508 free(ihp);
2509 }
2510
2511 /*
2512 * returns 1 if there are hosts to expire or 0 if there are none.
2513 */
2514 int
2515 expire_lock_hosts(void)
2516 {
2517 struct host *ihp;
2518
2519 debuglog("expire_lock_hosts: called\n");
2520 for ( ;; ) {
2521 ihp = TAILQ_LAST(&hostlst_unref, hostlst_head);
2522 if (ihp == NULL)
2523 break;
2524 if (host_expire > 0 && ihp->lastuse >= currsec - host_expire)
2525 break;
2526 debuglog("expire_lock_hosts: expiring %s %d %d %d\n",
2527 ihp->name, (int)ihp->lastuse,
2528 (int)currsec, (int)currsec - host_expire);
2529 destroy_lock_host(ihp);
2530 }
2531 return (TAILQ_LAST(&hostlst_unref, hostlst_head) != NULL);
2532 }
2533
2534 /*
2535 * notify: Clear all locks from a host if statd complains
2536 *
2537 * XXX: This routine has not been thoroughly tested. However, neither
2538 * had the old one been. It used to compare the statd crash state counter
2539 * to the current lock state. The upshot of this was that it basically
2540 * cleared all locks from the specified host 99% of the time (with the
2541 * other 1% being a bug). Consequently, the assumption is that clearing
2542 * all locks from a host when notified by statd is acceptable.
2543 *
2544 * Please note that this routine skips the usual level of redirection
2545 * through a do_* type routine. This introduces a possible level of
2546 * error and might better be written as do_notify and take this one out.
2547
2548 */
2549
2550 void
2551 notify(const char *hostname, const int state)
2552 {
2553 debuglog("notify from %s, new state %d", hostname, state);
2554
2555 siglock();
2556 do_clear(hostname);
2557 sigunlock();
2558
2559 debuglog("Leaving notify\n");
2560 }
2561
2562 int
2563 send_granted(fl, opcode)
2564 struct file_lock *fl;
2565 int opcode __unused;
2566 {
2567 CLIENT *cli;
2568 static char dummy;
2569 struct timeval timeo;
2570 enum clnt_stat rv;
2571 static struct nlm_res retval;
2572 static struct nlm4_res retval4;
2573
2574 debuglog("About to send granted on blocked lock\n");
2575
2576 cli = get_client(fl->addr,
2577 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2578 if (cli == NULL) {
2579 syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2580 fl->client_name);
2581 /*
2582 * We fail to notify remote that the lock has been granted.
2583 * The client will timeout and retry, the lock will be
2584 * granted at this time.
2585 */
2586 return -1;
2587 }
2588 timeo.tv_sec = 0;
2589 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2590
2591 fl->granted_cookie = ++send_granted_cookie;
2592 if (!send_granted_cookie)
2593 send_granted_cookie++;
2594
2595 if (fl->flags & LOCK_V4) {
2596 static nlm4_testargs res;
2597 res.cookie.n_len = sizeof(fl->granted_cookie);
2598 res.cookie.n_bytes = (char*)&fl->granted_cookie;
2599 res.exclusive = fl->client.exclusive;
2600 res.alock.caller_name = fl->client_name;
2601 res.alock.fh.n_len = fl->filehandle.n_len;
2602 res.alock.fh.n_bytes = fl->filehandle.n_bytes;
2603 res.alock.oh = fl->client.oh;
2604 res.alock.svid = fl->client.svid;
2605 res.alock.l_offset = fl->client.l_offset;
2606 res.alock.l_len = fl->client.l_len;
2607 debuglog("sending v4 reply%s",
2608 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2609 if (fl->flags & LOCK_ASYNC) {
2610 rv = clnt_call(cli, NLM4_GRANTED_MSG,
2611 xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo);
2612 } else {
2613 rv = clnt_call(cli, NLM4_GRANTED,
2614 xdr_nlm4_testargs, &res, xdr_nlm4_res,
2615 &retval4, timeo);
2616 }
2617 } else {
2618 static nlm_testargs res;
2619
2620 res.cookie.n_len = sizeof(fl->granted_cookie);
2621 res.cookie.n_bytes = (char*)&fl->granted_cookie;
2622 res.exclusive = fl->client.exclusive;
2623 res.alock.caller_name = fl->client_name;
2624 res.alock.fh.n_len = fl->filehandle.n_len;
2625 res.alock.fh.n_bytes = fl->filehandle.n_bytes;
2626 res.alock.oh = fl->client.oh;
2627 res.alock.svid = fl->client.svid;
2628 res.alock.l_offset = fl->client.l_offset;
2629 res.alock.l_len = fl->client.l_len;
2630 debuglog("sending v1 reply%s",
2631 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2632 if (fl->flags & LOCK_ASYNC) {
2633 rv = clnt_call(cli, NLM_GRANTED_MSG,
2634 xdr_nlm_testargs, &res, xdr_void, &dummy, timeo);
2635 } else {
2636 rv = clnt_call(cli, NLM_GRANTED,
2637 xdr_nlm_testargs, &res, xdr_nlm_res,
2638 &retval, timeo);
2639 }
2640 }
2641 if (debug_level > 2)
2642 debuglog("clnt_call returns %d(%s) for granted",
2643 rv, clnt_sperrno(rv));
2644
2645 if ((rv != RPC_SUCCESS) &&
2646 !((fl->flags & LOCK_ASYNC) && (rv == RPC_TIMEDOUT)))
2647 return -1;
2648 return 0;
2649 }
2650
2651 /*
2652 * granted_failed: remove a granted lock that wasn't successfully
2653 * accepted by the client
2654 */
2655 void
2656 granted_failed(nlm4_res *arg)
2657 {
2658 u_int64_t cookie;
2659 struct file_lock *ifl;
2660
2661 debuglog("Entering granted_failed, status %d\n", arg->stat.stat);
2662
2663 if (arg->cookie.n_len != sizeof(cookie)) {
2664 debuglog("Exiting granted_failed: bogus cookie size %d\n",
2665 arg->cookie.n_len);
2666 return;
2667 }
2668 bcopy(arg->cookie.n_bytes, &cookie, sizeof(cookie));
2669 debuglog("granted_failed, cookie 0x%llx\n", cookie);
2670
2671 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
2672 debuglog("Pointer to file lock: %p\n",ifl);
2673
2674 debuglog("****Dump of ifl****\n");
2675 dump_filelock(ifl);
2676
2677 if (ifl->granted_cookie != cookie)
2678 continue;
2679
2680 debuglog("granted_failed: cookie found\n");
2681 break;
2682 }
2683
2684 if (ifl) {
2685 do_unlock(ifl);
2686 /* ifl is NO LONGER VALID AT THIS POINT */
2687 } else {
2688 debuglog("granted_failed: cookie NOT FOUND\n");
2689 }
2690
2691 debuglog("Exiting granted_failed\n");
2692 }
2693
2694 /*
2695 * getshare: try to acquire a share reservation
2696 */
2697 enum nlm4_stats
2698 getshare(nlm_shareargs *shrarg, struct svc_req *rqstp, const int flags)
2699 {
2700 struct sharefile *shrfile;
2701 struct file_share *sh;
2702 size_t n;
2703
2704 debuglog("Entering getshare...\n");
2705
2706 if (grace_expired == 0 && shrarg->reclaim == 0) {
2707 debuglog("getshare denied - grace period\n");
2708 return (flags & LOCK_V4) ?
2709 nlm4_denied_grace_period :
2710 nlm_denied_grace_period;
2711 }
2712
2713 if (shrarg->share.fh.n_len > NFS_MAX_FH_SIZE) {
2714 debuglog("received fhandle size %d, max size %d",
2715 shrarg->share.fh.n_len, NFS_MAX_FH_SIZE);
2716 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2717 }
2718
2719 /* find file in list of share files */
2720 LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) {
2721 if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) &&
2722 (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2723 shrarg->share.fh.n_len) == 0)) {
2724 /* shrfile is the correct file */
2725 break;
2726 }
2727 }
2728
2729 /* if share file not found, create a new share file */
2730 if (!shrfile) {
2731 fhandle_t fh;
2732 int fd;
2733 fh.fh_len = shrarg->share.fh.n_len;
2734 bcopy(shrarg->share.fh.n_bytes, fh.fh_data, fh.fh_len);
2735 fd = fhopen(&fh, O_RDONLY);
2736 if (fd < 0) {
2737 debuglog("fhopen failed (from %16s): %32s\n",
2738 shrarg->share.caller_name, strerror(errno));
2739 if ((flags & LOCK_V4) == 0)
2740 return nlm_denied;
2741 switch (errno) {
2742 case ESTALE:
2743 return nlm4_stale_fh;
2744 default:
2745 return nlm4_failed;
2746 }
2747 }
2748 shrfile = malloc(sizeof(struct sharefile));
2749 if (!shrfile) {
2750 debuglog("getshare failed: can't allocate sharefile\n");
2751 close(fd);
2752 return (flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
2753 }
2754 shrfile->filehandle.n_len = shrarg->share.fh.n_len;
2755 shrfile->filehandle.n_bytes = malloc(shrarg->share.fh.n_len);
2756 if (!shrfile->filehandle.n_bytes) {
2757 debuglog("getshare failed: can't allocate sharefile filehandle\n");
2758 free(shrfile);
2759 close(fd);
2760 return (flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
2761 }
2762 bcopy(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2763 shrarg->share.fh.n_len);
2764 shrfile->fd = fd;
2765 shrfile->refcount = 0;
2766 shrfile->sharelist_head.lh_first = NULL;
2767 LIST_INSERT_HEAD(&nfssharefilelist_head, shrfile, sharefilelist);
2768 }
2769
2770 /* compare request mode/access to current shares */
2771 LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) {
2772 /* if request host/owner matches a current share... */
2773 if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) &&
2774 same_netobj(&shrarg->share.oh, &sh->oh)) {
2775 /* ...then just update share mode/access */
2776 sh->mode = shrarg->share.mode;
2777 sh->access = shrarg->share.access;
2778 debuglog("getshare: updated existing share\n");
2779 return (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2780 }
2781 if (((shrarg->share.mode & sh->access) != 0) ||
2782 ((shrarg->share.access & sh->mode) != 0)) {
2783 /* share request conflicts with existing share */
2784 debuglog("getshare: conflicts with existing share\n");
2785 return (flags & LOCK_V4) ? nlm4_denied : nlm_denied;
2786 }
2787 }
2788
2789 /* create/init new share */
2790 n = strnlen(shrarg->share.caller_name, SM_MAXSTRLEN);
2791 if (n < SM_MAXSTRLEN) {
2792 sh = malloc(sizeof(*sh) - sizeof(sh->client_name) + n + 1);
2793 } else {
2794 debuglog("getshare failed: hostname too long\n");
2795 sh = NULL;
2796 }
2797 if (!sh) {
2798 debuglog("getshare failed: can't allocate share\n");
2799 if (!shrfile->refcount) {
2800 LIST_REMOVE(shrfile, sharefilelist);
2801 close(shrfile->fd);
2802 free(shrfile->filehandle.n_bytes);
2803 free(shrfile);
2804 }
2805 return (flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
2806 }
2807 bzero(sh, sizeof(*sh) - sizeof(sh->client_name));
2808 sh->oh.n_len = shrarg->share.oh.n_len;
2809 sh->oh.n_bytes = malloc(sh->oh.n_len);
2810 if (!sh->oh.n_bytes) {
2811 debuglog("getshare failed: can't allocate share owner handle\n");
2812 free(sh);
2813 if (!shrfile->refcount) {
2814 LIST_REMOVE(shrfile, sharefilelist);
2815 close(shrfile->fd);
2816 free(shrfile->filehandle.n_bytes);
2817 free(shrfile);
2818 }
2819 return (flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
2820 }
2821 memcpy(sh->client_name, shrarg->share.caller_name, n);
2822 sh->client_name[n] = 0;
2823 sh->mode = shrarg->share.mode;
2824 sh->access = shrarg->share.access;
2825
2826 /* insert new share into file's share list */
2827 LIST_INSERT_HEAD(&shrfile->sharelist_head, sh, nfssharelist);
2828 shrfile->refcount++;
2829
2830 debuglog("Exiting getshare...\n");
2831
2832 return (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2833 }
2834
2835
2836 /* remove a share reservation */
2837 enum nlm4_stats
2838 unshare(nlm_shareargs *shrarg, struct svc_req *rqstp, const int flags)
2839 {
2840 struct sharefile *shrfile;
2841 struct file_share *sh;
2842
2843 debuglog("Entering unshare...\n");
2844
2845 if (shrarg->share.fh.n_len > NFS_MAX_FH_SIZE) {
2846 debuglog("received fhandle size %d, max size %d",
2847 shrarg->share.fh.n_len, NFS_MAX_FH_SIZE);
2848 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2849 }
2850
2851 /* find file in list of share files */
2852 LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) {
2853 if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) &&
2854 (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2855 shrarg->share.fh.n_len) == 0)) {
2856 /* shrfile is the correct file */
2857 break;
2858 }
2859 }
2860
2861 /* if share file not found, return success (per spec) */
2862 if (!shrfile) {
2863 debuglog("unshare: no such share file\n");
2864 return (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2865 }
2866
2867 /* find share */
2868 LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) {
2869 /* if request host/owner matches a current share... */
2870 if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) &&
2871 same_netobj(&shrarg->share.oh, &sh->oh))
2872 break;
2873 }
2874
2875 /* if share not found, return success (per spec) */
2876 if (!sh) {
2877 debuglog("unshare: no such share\n");
2878 return (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2879 }
2880
2881 /* remove share from file and deallocate */
2882 shrfile->refcount--;
2883 LIST_REMOVE(sh, nfssharelist);
2884 free(sh->oh.n_bytes);
2885 free(sh);
2886
2887 /* if file has no more shares, deallocate share file */
2888 if (!shrfile->refcount) {
2889 debuglog("unshare: file has no more shares\n");
2890 LIST_REMOVE(shrfile, sharefilelist);
2891 close(shrfile->fd);
2892 free(shrfile->filehandle.n_bytes);
2893 free(shrfile);
2894 }
2895
2896 debuglog("Exiting unshare...\n");
2897
2898 return (flags & LOCK_V4) ? nlm4_granted : nlm_granted;
2899 }
2900
2901 /*
2902 * do_free_all
2903 *
2904 * Wipe out all non-monitored locks and shares held by a host.
2905 */
2906
2907 void
2908 do_free_all(const char *hostname)
2909 {
2910 struct file_lock *ifl, *nfl;
2911 struct sharefile *shrfile, *nshrfile;
2912 struct file_share *ifs, *nfs;
2913 enum partialfilelock_status pfsret;
2914
2915 /* clear non-monitored blocking file locks */
2916 ifl = LIST_FIRST(&blockedlocklist_head);
2917 while (ifl != NULL) {
2918 nfl = LIST_NEXT(ifl, nfslocklist);
2919
2920 if (((ifl->flags & LOCK_MON) == 0) &&
2921 (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) {
2922 remove_blockingfilelock(ifl);
2923 deallocate_file_lock(ifl);
2924 }
2925
2926 ifl = nfl;
2927 }
2928
2929 /* clear non-monitored file locks */
2930 restart:
2931 ifl = LIST_FIRST(&nfslocklist_head);
2932 while (ifl != NULL) {
2933 nfl = LIST_NEXT(ifl, nfslocklist);
2934
2935 if (((ifl->flags & LOCK_MON) == 0) &&
2936 (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) {
2937 /* Unlock destroys ifl out from underneath */
2938 pfsret = unlock_partialfilelock(ifl);
2939 if (pfsret != PFL_GRANTED) {
2940 /* Uh oh... there was some sort of problem. */
2941 /* If we restart the loop, we may get */
2942 /* stuck here forever getting errors. */
2943 /* So, let's just abort the whole scan. */
2944 syslog(LOG_WARNING, "unmonitored lock clearing for %s failed: %d",
2945 hostname, pfsret);
2946 break;
2947 }
2948 /* ifl is NO LONGER VALID AT THIS POINT */
2949 /* Note: the unlock may deallocate several existing locks. */
2950 /* Therefore, we need to restart the scanning of the list, */
2951 /* because nfl could be pointing to a freed lock. */
2952 goto restart;
2953 }
2954
2955 ifl = nfl;
2956 }
2957
2958 /* clear shares */
2959 shrfile = LIST_FIRST(&nfssharefilelist_head);
2960 while (shrfile != NULL) {
2961 nshrfile = LIST_NEXT(shrfile, sharefilelist);
2962
2963 ifs = LIST_FIRST(&shrfile->sharelist_head);
2964 while (ifs != NULL) {
2965 nfs = LIST_NEXT(ifs, nfssharelist);
2966
2967 if (strncmp(hostname, ifs->client_name, SM_MAXSTRLEN) == 0) {
2968 shrfile->refcount--;
2969 LIST_REMOVE(ifs, nfssharelist);
2970 free(ifs->oh.n_bytes);
2971 free(ifs);
2972 }
2973
2974 ifs = nfs;
2975 }
2976
2977 if (!shrfile->refcount) {
2978 LIST_REMOVE(shrfile, sharefilelist);
2979 close(shrfile->fd);
2980 free(shrfile->filehandle.n_bytes);
2981 free(shrfile);
2982 }
2983
2984 shrfile = nshrfile;
2985 }
2986
2987 }
2988
2989
2990
2991 /*
2992 * Routines below here have not been modified in the overhaul
2993 */
2994
2995 /*
2996 * Are these two routines still required since lockd is not spawning off
2997 * children to service locks anymore? Presumably they were originally
2998 * put in place to prevent a one child from changing the lock list out
2999 * from under another one.
3000 */
3001
3002 void
3003 siglock(void)
3004 {
3005 sigset_t block;
3006
3007 sigemptyset(&block);
3008 sigaddset(&block, SIGCHLD);
3009
3010 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
3011 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
3012 }
3013 }
3014
3015 void
3016 sigunlock(void)
3017 {
3018 sigset_t block;
3019
3020 sigemptyset(&block);
3021 sigaddset(&block, SIGCHLD);
3022
3023 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
3024 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
3025 }
3026 }
3027
3028