]> git.saurik.com Git - apple/network_cmds.git/blob - rpc_lockd.tproj/lockd_lock.c
9784236ed8368d741edf1c9f0fa235f16c60d94f
[apple/network_cmds.git] / rpc_lockd.tproj / lockd_lock.c
1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */
2 /* $FreeBSD: src/usr.sbin/rpc.lockd/lockd_lock.c,v 1.10 2002/03/22 19:57:09 alfred Exp $ */
3
4 /*
5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6 * Copyright (c) 2000 Manuel Bouyer.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38 #define LOCKD_DEBUG
39
40 #include <stdio.h>
41 #ifdef LOCKD_DEBUG
42 #include <stdarg.h>
43 #endif
44 #include <stdlib.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <syslog.h>
48 #include <errno.h>
49 #include <string.h>
50 #include <signal.h>
51 #include <rpc/rpc.h>
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <sys/socket.h>
55 #include <sys/param.h>
56 #include <sys/mount.h>
57 #include <sys/wait.h>
58 #include <rpcsvc/sm_inter.h>
59 #include <rpcsvc/nlm_prot.h>
60
61 #include "lockd.h"
62 #include "lockd_lock.h"
63
64 #define MAXOBJECTSIZE 64
65 #define MAXBUFFERSIZE 1024
66
67 /*
68 * A set of utilities for managing file locking
69 *
70 * XXX: All locks are in a linked list, a better structure should be used
71 * to improve search/access effeciency.
72 */
73
74 /* struct describing a lock */
75 struct file_lock {
76 LIST_ENTRY(file_lock) nfslocklist;
77 netobj filehandle; /* NFS filehandle */
78 struct sockaddr *addr;
79 struct nlm4_holder client; /* lock holder */
80 u_int64_t granted_cookie;
81 int nsm_status; /* status from the remote lock manager */
82 int status; /* lock status, see below */
83 int flags; /* lock flags, see lockd_lock.h */
84 int blocking; /* blocking lock or not */
85 char client_name[SM_MAXSTRLEN]; /* client_name is really variable length and must be last! */
86 };
87
88 LIST_HEAD(nfslocklist_head, file_lock);
89 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
90
91 LIST_HEAD(blockedlocklist_head, file_lock);
92 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
93
94 /* struct describing a share reservation */
95 struct file_share {
96 LIST_ENTRY(file_share) nfssharelist;
97 netobj oh; /* share holder */
98 short mode;
99 short access;
100 char client_name[SM_MAXSTRLEN]; /* name is really variable length and must be last! */
101 };
102 LIST_HEAD(nfssharelist_head, file_share);
103
104 /* Struct describing a file with share reservations */
105 struct sharefile {
106 LIST_ENTRY(sharefile) sharefilelist;
107 netobj filehandle; /* Local access filehandle */
108 int fd; /* file descriptor: remains open until no more shares */
109 int refcount;
110 struct nfssharelist_head sharelist_head;
111 };
112 LIST_HEAD(nfssharefilelist_head, sharefile);
113 struct nfssharefilelist_head nfssharefilelist_head = LIST_HEAD_INITIALIZER(nfssharefilelist_head);
114
115 /* lock status */
116 #define LKST_LOCKED 1 /* lock is locked */
117 /* XXX: Is this flag file specific or lock specific? */
118 #define LKST_WAITING 2 /* file is already locked by another host */
119 #define LKST_PROCESSING 3 /* child is trying to aquire the lock */
120 #define LKST_DYING 4 /* must dies when we get news from the child */
121
122 /* struct describing a monitored host */
123 struct host {
124 TAILQ_ENTRY(host) hostlst;
125 int refcnt;
126 time_t lastuse;
127 struct sockaddr addr;
128 char name[SM_MAXSTRLEN]; /* name is really variable length and must be last! */
129 };
130 /* list of hosts we monitor */
131 TAILQ_HEAD(hostlst_head, host);
132 struct hostlst_head hostlst_head = TAILQ_HEAD_INITIALIZER(hostlst_head);
133 struct hostlst_head hostlst_unref = TAILQ_HEAD_INITIALIZER(hostlst_unref);
134
135 int host_expire = 60; /* seconds */
136 time_t currsec;
137 u_int64_t send_granted_cookie = 0;
138
139 /*
140 * File monitoring handlers
141 * XXX: These might be able to be removed when kevent support
142 * is placed into the hardware lock/unlock routines. (ie.
143 * let the kernel do all the file monitoring)
144 */
145
146 /* Struct describing a monitored file */
147 struct monfile {
148 LIST_ENTRY(monfile) monfilelist;
149 netobj filehandle; /* Local access filehandle */
150 int fd; /* file descriptor: remains open until unlock! */
151 int refcount;
152 int exclusive;
153 };
154
155 /* List of files we monitor */
156 LIST_HEAD(monfilelist_head, monfile);
157 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
158
159 static int debugdelay = 0;
160
161 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
162 NFS_DENIED, NFS_DENIED_NOLOCK,
163 NFS_RESERR };
164
165 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
166 HW_DENIED, HW_DENIED_NOLOCK,
167 HW_STALEFH, HW_READONLY, HW_RESERR };
168
169 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
170 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
171 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
172
173 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
174 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
175 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */
176 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
177
178 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
179
180 int send_granted(struct file_lock *fl, int opcode);
181 void siglock(void);
182 void sigunlock(void);
183 void destroy_lock_host(struct host *ihp);
184 static void monitor_lock_host(const char *hostname, const struct sockaddr *addr);
185
186 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
187 const bool_t exclusive, struct nlm4_holder *dest);
188 struct file_lock * allocate_file_lock(const netobj *lockowner,
189 const netobj *filehandle, const struct sockaddr *addr,
190 const char *caller_name);
191 void deallocate_file_lock(struct file_lock *fl);
192 void fill_file_lock(struct file_lock *fl,
193 const bool_t exclusive, const int32_t svid,
194 const u_int64_t offset, const u_int64_t len,
195 const int state, const int status, const int flags, const int blocking);
196 int regions_overlap(const u_int64_t start1, const u_int64_t len1,
197 const u_int64_t start2, const u_int64_t len2);;
198 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene,
199 const u_int64_t startu, const u_int64_t lenu,
200 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
201 int same_netobj(const netobj *n0, const netobj *n1);
202 int same_filelock_identity(const struct file_lock *fl0,
203 const struct file_lock *fl2);
204
205 static void debuglog(char const *fmt, ...);
206 void dump_static_object(const unsigned char* object, const int sizeof_object,
207 unsigned char* hbuff, const int sizeof_hbuff,
208 unsigned char* cbuff, const int sizeof_cbuff);
209 void dump_netobj(const struct netobj *nobj);
210 void dump_filelock(const struct file_lock *fl);
211 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl);
212 enum nfslock_status test_nfslock(const struct file_lock *fl,
213 struct file_lock **conflicting_fl);
214 enum nfslock_status lock_nfslock(struct file_lock *fl);
215 enum nfslock_status delete_nfslock(struct file_lock *fl);
216 enum nfslock_status unlock_nfslock(const struct file_lock *fl,
217 struct file_lock **released_lock, struct file_lock **left_lock,
218 struct file_lock **right_lock);
219 enum hwlock_status lock_hwlock(struct file_lock *fl);
220 enum split_status split_nfslock(const struct file_lock *exist_lock,
221 const struct file_lock *unlock_lock, struct file_lock **left_lock,
222 struct file_lock **right_lock);
223 void add_blockingfilelock(struct file_lock *fl);
224 enum hwlock_status unlock_hwlock(const struct file_lock *fl);
225 enum hwlock_status test_hwlock(const struct file_lock *fl,
226 struct file_lock **conflicting_fl);
227 void remove_blockingfilelock(struct file_lock *fl);
228 void clear_blockingfilelock(const char *hostname);
229 void retry_blockingfilelocklist(netobj *fh);
230 enum partialfilelock_status unlock_partialfilelock(
231 const struct file_lock *fl);
232 void clear_partialfilelock(const char *hostname);
233 enum partialfilelock_status test_partialfilelock(
234 const struct file_lock *fl, struct file_lock **conflicting_fl);
235 enum nlm_stats do_test(struct file_lock *fl,
236 struct file_lock **conflicting_fl);
237 enum nlm_stats do_unlock(struct file_lock *fl);
238 enum nlm_stats do_lock(struct file_lock *fl);
239 void do_clear(const char *hostname);
240
241
242 void
243 debuglog(char const *fmt, ...)
244 {
245 va_list ap;
246
247 if (debug_level < 1) {
248 return;
249 }
250
251 sleep(debugdelay);
252
253 va_start(ap, fmt);
254 vsyslog(LOG_DEBUG, fmt, ap);
255 va_end(ap);
256 }
257
258 void
259 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
260 const unsigned char *object;
261 const int size_object;
262 unsigned char *hbuff;
263 const int size_hbuff;
264 unsigned char *cbuff;
265 const int size_cbuff;
266 {
267 int i, objectsize;
268
269 if (debug_level < 2) {
270 return;
271 }
272
273 objectsize = size_object;
274
275 if (objectsize == 0) {
276 debuglog("object is size 0\n");
277 } else {
278 if (objectsize > MAXOBJECTSIZE) {
279 debuglog("Object of size %d being clamped"
280 "to size %d\n", objectsize, MAXOBJECTSIZE);
281 objectsize = MAXOBJECTSIZE;
282 }
283
284 if (hbuff != NULL) {
285 if (size_hbuff < objectsize*2+1) {
286 debuglog("Hbuff not large enough."
287 " Increase size\n");
288 } else {
289 for(i=0;i<objectsize;i++) {
290 sprintf(hbuff+i*2,"%02x",*(object+i));
291 }
292 *(hbuff+i*2) = '\0';
293 }
294 }
295
296 if (cbuff != NULL) {
297 if (size_cbuff < objectsize+1) {
298 debuglog("Cbuff not large enough."
299 " Increase Size\n");
300 }
301
302 for(i=0;i<objectsize;i++) {
303 if (*(object+i) >= 32 && *(object+i) <= 127) {
304 *(cbuff+i) = *(object+i);
305 } else {
306 *(cbuff+i) = '.';
307 }
308 }
309 *(cbuff+i) = '\0';
310 }
311 }
312 }
313
314 void
315 dump_netobj(const struct netobj *nobj)
316 {
317 char hbuff[MAXBUFFERSIZE*2];
318 char cbuff[MAXBUFFERSIZE];
319
320 if (debug_level < 2) {
321 return;
322 }
323
324 if (nobj == NULL) {
325 debuglog("Null netobj pointer\n");
326 }
327 else if (nobj->n_len == 0) {
328 debuglog("Size zero netobj\n");
329 } else {
330 dump_static_object(nobj->n_bytes, nobj->n_len,
331 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
332 debuglog("netobj: len: %d data: %s ::: %s\n",
333 nobj->n_len, hbuff, cbuff);
334 }
335 }
336
337 /* #define DUMP_FILELOCK_VERBOSE */
338 void
339 dump_filelock(const struct file_lock *fl)
340 {
341 #ifdef DUMP_FILELOCK_VERBOSE
342 char hbuff[MAXBUFFERSIZE*2];
343 char cbuff[MAXBUFFERSIZE];
344 #endif
345
346 if (debug_level < 2) {
347 return;
348 }
349
350 if (fl != NULL) {
351 debuglog("Dumping file lock structure @ %p\n", fl);
352
353 #ifdef DUMP_FILELOCK_VERBOSE
354 dump_static_object((unsigned char *)&fl->filehandle.n_bytes,
355 fl->filehandle.n_len, hbuff, sizeof(hbuff),
356 cbuff, sizeof(cbuff));
357 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff);
358 #endif
359
360 debuglog("Dumping nlm4_holder:\n"
361 "exc: %x svid: %x offset:len %llx:%llx\n",
362 fl->client.exclusive, fl->client.svid,
363 fl->client.l_offset, fl->client.l_len);
364
365 #ifdef DUMP_FILELOCK_VERBOSE
366 debuglog("Dumping client identity:\n");
367 dump_netobj(&fl->client.oh);
368
369 debuglog("nsm: %d status: %d flags: %d locker: %d"
370 " fd: %d\n", fl->nsm_status, fl->status,
371 fl->flags, fl->locker, fl->fd);
372 #endif
373 } else {
374 debuglog("NULL file lock structure\n");
375 }
376 }
377
378 void
379 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
380 const struct nlm4_lock *src;
381 const bool_t exclusive;
382 struct nlm4_holder *dest;
383 {
384
385 dest->exclusive = exclusive;
386 dest->oh.n_len = src->oh.n_len;
387 dest->oh.n_bytes = src->oh.n_bytes;
388 dest->svid = src->svid;
389 dest->l_offset = src->l_offset;
390 dest->l_len = src->l_len;
391 }
392
393
394 size_t
395 strnlen(const char *s, size_t len)
396 {
397 size_t n;
398
399 for (n = 0; s[n] != 0 && n < len; n++)
400 ;
401 return n;
402 }
403
404 /*
405 * allocate_file_lock: Create a lock with the given parameters
406 */
407
408 struct file_lock *
409 allocate_file_lock(const netobj *lockowner, const netobj *filehandle,
410 const struct sockaddr *addr, const char *caller_name)
411 {
412 struct file_lock *newfl;
413 size_t n;
414
415 /* Beware of rubbish input! */
416 n = strnlen(caller_name, SM_MAXSTRLEN);
417 if (n == SM_MAXSTRLEN) {
418 return NULL;
419 }
420
421 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1);
422 if (newfl == NULL) {
423 return NULL;
424 }
425 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name));
426 memcpy(newfl->client_name, caller_name, n);
427 newfl->client_name[n] = 0;
428
429 newfl->client.oh.n_bytes = malloc(lockowner->n_len);
430 if (newfl->client.oh.n_bytes == NULL) {
431 free(newfl);
432 return NULL;
433 }
434 newfl->client.oh.n_len = lockowner->n_len;
435 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
436
437 newfl->filehandle.n_bytes = malloc(filehandle->n_len);
438 if (newfl->filehandle.n_bytes == NULL) {
439 free(newfl->client.oh.n_bytes);
440 free(newfl);
441 return NULL;
442 }
443 newfl->filehandle.n_len = filehandle->n_len;
444 bcopy(filehandle->n_bytes, newfl->filehandle.n_bytes, filehandle->n_len);
445
446 newfl->addr = malloc(addr->sa_len);
447 if (newfl->addr == NULL) {
448 free(newfl->client.oh.n_bytes);
449 free(newfl);
450 return NULL;
451 }
452 memcpy(newfl->addr, addr, addr->sa_len);
453
454 return newfl;
455 }
456
457 /*
458 * file_file_lock: Force creation of a valid file lock
459 */
460 void
461 fill_file_lock(struct file_lock *fl,
462 const bool_t exclusive, const int32_t svid,
463 const u_int64_t offset, const u_int64_t len,
464 const int state, const int status, const int flags, const int blocking)
465 {
466 fl->client.exclusive = exclusive;
467 fl->client.svid = svid;
468 fl->client.l_offset = offset;
469 fl->client.l_len = len;
470
471 fl->nsm_status = state;
472 fl->status = status;
473 fl->flags = flags;
474 fl->blocking = blocking;
475 }
476
477 /*
478 * deallocate_file_lock: Free all storage associated with a file lock
479 */
480 void
481 deallocate_file_lock(struct file_lock *fl)
482 {
483 free(fl->addr);
484 free(fl->client.oh.n_bytes);
485 free(fl->filehandle.n_bytes);
486 free(fl);
487 }
488
489 /*
490 * regions_overlap(): This function examines the two provided regions for
491 * overlap.
492 */
493 int
494 regions_overlap(start1, len1, start2, len2)
495 const u_int64_t start1, len1, start2, len2;
496 {
497 u_int64_t d1,d2,d3,d4;
498 enum split_status result;
499
500 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
501 start1, len1, start2, len2);
502
503 result = region_compare(start1, len1, start2, len2,
504 &d1, &d2, &d3, &d4);
505
506 debuglog("Exiting region overlap with val: %d\n",result);
507
508 if (result == SPL_DISJOINT) {
509 return 0;
510 } else {
511 return 1;
512 }
513
514 return (result);
515 }
516
517 /*
518 * region_compare(): Examine lock regions and split appropriately
519 *
520 * XXX: Fix 64 bit overflow problems
521 * XXX: Check to make sure I got *ALL* the cases.
522 * XXX: This DESPERATELY needs a regression test.
523 */
524 enum split_status
525 region_compare(starte, lene, startu, lenu,
526 start1, len1, start2, len2)
527 const u_int64_t starte, lene, startu, lenu;
528 u_int64_t *start1, *len1, *start2, *len2;
529 {
530 /*
531 * Please pay attention to the sequential exclusions
532 * of the if statements!!!
533 */
534 enum LFLAGS lflags;
535 enum RFLAGS rflags;
536 enum split_status retval;
537
538 retval = SPL_DISJOINT;
539
540 if (lene == 0 && lenu == 0) {
541 /* Examine left edge of locker */
542 if (startu < starte) {
543 lflags = LEDGE_LEFT;
544 } else if (startu == starte) {
545 lflags = LEDGE_LBOUNDARY;
546 } else {
547 lflags = LEDGE_INSIDE;
548 }
549
550 rflags = REDGE_RBOUNDARY; /* Both are infiinite */
551
552 if (lflags == LEDGE_INSIDE) {
553 *start1 = starte;
554 *len1 = startu - starte;
555 }
556
557 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
558 retval = SPL_CONTAINED;
559 } else {
560 retval = SPL_LOCK1;
561 }
562 } else if (lene == 0 && lenu != 0) {
563 /* Established lock is infinite */
564 /* Examine left edge of unlocker */
565 if (startu < starte) {
566 lflags = LEDGE_LEFT;
567 } else if (startu == starte) {
568 lflags = LEDGE_LBOUNDARY;
569 } else if (startu > starte) {
570 lflags = LEDGE_INSIDE;
571 }
572
573 /* Examine right edge of unlocker */
574 if (startu + lenu < starte) {
575 /* Right edge of unlocker left of established lock */
576 rflags = REDGE_LEFT;
577 return SPL_DISJOINT;
578 } else if (startu + lenu == starte) {
579 /* Right edge of unlocker on start of established lock */
580 rflags = REDGE_LBOUNDARY;
581 return SPL_DISJOINT;
582 } else { /* Infinifty is right of finity */
583 /* Right edge of unlocker inside established lock */
584 rflags = REDGE_INSIDE;
585 }
586
587 if (lflags == LEDGE_INSIDE) {
588 *start1 = starte;
589 *len1 = startu - starte;
590 retval |= SPL_LOCK1;
591 }
592
593 if (rflags == REDGE_INSIDE) {
594 /* Create right lock */
595 *start2 = startu+lenu;
596 *len2 = 0;
597 retval |= SPL_LOCK2;
598 }
599 } else if (lene != 0 && lenu == 0) {
600 /* Unlocker is infinite */
601 /* Examine left edge of unlocker */
602 if (startu < starte) {
603 lflags = LEDGE_LEFT;
604 retval = SPL_CONTAINED;
605 return retval;
606 } else if (startu == starte) {
607 lflags = LEDGE_LBOUNDARY;
608 retval = SPL_CONTAINED;
609 return retval;
610 } else if ((startu > starte) && (startu < starte + lene - 1)) {
611 lflags = LEDGE_INSIDE;
612 } else if (startu == starte + lene - 1) {
613 lflags = LEDGE_RBOUNDARY;
614 } else { /* startu > starte + lene -1 */
615 lflags = LEDGE_RIGHT;
616 return SPL_DISJOINT;
617 }
618
619 rflags = REDGE_RIGHT; /* Infinity is right of finity */
620
621 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
622 *start1 = starte;
623 *len1 = startu - starte;
624 retval |= SPL_LOCK1;
625 return retval;
626 }
627
628 } else {
629 /* Both locks are finite */
630
631 /* Examine left edge of unlocker */
632 if (startu < starte) {
633 lflags = LEDGE_LEFT;
634 } else if (startu == starte) {
635 lflags = LEDGE_LBOUNDARY;
636 } else if ((startu > starte) && (startu < starte + lene - 1)) {
637 lflags = LEDGE_INSIDE;
638 } else if (startu == starte + lene - 1) {
639 lflags = LEDGE_RBOUNDARY;
640 } else { /* startu > starte + lene -1 */
641 lflags = LEDGE_RIGHT;
642 return SPL_DISJOINT;
643 }
644
645 /* Examine right edge of unlocker */
646 if (startu + lenu < starte) {
647 /* Right edge of unlocker left of established lock */
648 rflags = REDGE_LEFT;
649 return SPL_DISJOINT;
650 } else if (startu + lenu == starte) {
651 /* Right edge of unlocker on start of established lock */
652 rflags = REDGE_LBOUNDARY;
653 return SPL_DISJOINT;
654 } else if (startu + lenu < starte + lene) {
655 /* Right edge of unlocker inside established lock */
656 rflags = REDGE_INSIDE;
657 } else if (startu + lenu == starte + lene) {
658 /* Right edge of unlocker on right edge of established lock */
659 rflags = REDGE_RBOUNDARY;
660 } else { /* startu + lenu > starte + lene */
661 /* Right edge of unlocker is right of established lock */
662 rflags = REDGE_RIGHT;
663 }
664
665 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
666 /* Create left lock */
667 *start1 = starte;
668 *len1 = (startu - starte);
669 retval |= SPL_LOCK1;
670 }
671
672 if (rflags == REDGE_INSIDE) {
673 /* Create right lock */
674 *start2 = startu+lenu;
675 *len2 = starte+lene-(startu+lenu);
676 retval |= SPL_LOCK2;
677 }
678
679 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
680 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
681 retval = SPL_CONTAINED;
682 }
683 }
684
685 return retval;
686 }
687
688 /*
689 * same_netobj: Compares the apprpriate bits of a netobj for identity
690 */
691 int
692 same_netobj(const netobj *n0, const netobj *n1)
693 {
694 int retval;
695
696 retval = 0;
697
698 debuglog("Entering netobj identity check\n");
699
700 if (n0->n_len == n1->n_len) {
701 debuglog("Preliminary length check passed\n");
702 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
703 debuglog("netobj %smatch\n", retval ? "" : "mis");
704 }
705
706 return (retval);
707 }
708
709 /*
710 * same_filelock_identity: Compares the appropriate bits of a file_lock
711 */
712 int
713 same_filelock_identity(fl0, fl1)
714 const struct file_lock *fl0, *fl1;
715 {
716 int retval;
717
718 retval = 0;
719
720 debuglog("Checking filelock identity\n");
721
722 /*
723 * Check process ids and host information.
724 */
725 retval = (fl0->client.svid == fl1->client.svid &&
726 same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
727
728 debuglog("Exiting checking filelock identity: retval: %d\n",retval);
729
730 return (retval);
731 }
732
733 /*
734 * Below here are routines associated with manipulating the NFS
735 * lock list.
736 */
737
738 /*
739 * get_lock_matching_unlock: Return a lock which matches the given unlock lock
740 * or NULL otherwise
741 * XXX: It is a shame that this duplicates so much code from test_nfslock.
742 */
743 struct file_lock *
744 get_lock_matching_unlock(const struct file_lock *fl)
745 {
746 struct file_lock *ifl; /* Iterator */
747
748 debuglog("Entering lock_matching_unlock\n");
749 debuglog("********Dump of fl*****************\n");
750 dump_filelock(fl);
751
752 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
753 debuglog("Pointer to file lock: %p\n",ifl);
754
755 debuglog("****Dump of ifl****\n");
756 dump_filelock(ifl);
757 debuglog("*******************\n");
758
759 /*
760 * XXX: It is conceivable that someone could use the NLM RPC
761 * system to directly access filehandles. This may be a
762 * security hazard as the filehandle code may bypass normal
763 * file access controls
764 */
765 if (fl->filehandle.n_len != ifl->filehandle.n_len)
766 continue;
767 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
768 fl->filehandle.n_len))
769 continue;
770
771 debuglog("matching_unlock: Filehandles match, "
772 "checking regions\n");
773
774 /* Filehandles match, check for region overlap */
775 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
776 ifl->client.l_offset, ifl->client.l_len))
777 continue;
778
779 debuglog("matching_unlock: Region overlap"
780 " found %llu : %llu -- %llu : %llu\n",
781 fl->client.l_offset,fl->client.l_len,
782 ifl->client.l_offset,ifl->client.l_len);
783
784 /* Regions overlap, check the identity */
785 if (!same_filelock_identity(fl,ifl))
786 continue;
787
788 debuglog("matching_unlock: Duplicate lock id. Granting\n");
789 return (ifl);
790 }
791
792 debuglog("Exiting lock_matching_unlock\n");
793
794 return (NULL);
795 }
796
797 /*
798 * test_nfslock: check for NFS lock in lock list
799 *
800 * This routine makes the following assumptions:
801 * 1) Nothing will adjust the lock list during a lookup
802 *
803 * This routine has an intersting quirk which bit me hard.
804 * The conflicting_fl is the pointer to the conflicting lock.
805 * However, to modify the "*pointer* to the conflicting lock" rather
806 * that the "conflicting lock itself" one must pass in a "pointer to
807 * the pointer of the conflicting lock". Gross.
808 */
809
810 enum nfslock_status
811 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
812 {
813 struct file_lock *ifl; /* Iterator */
814 enum nfslock_status retval;
815
816 debuglog("Entering test_nfslock\n");
817
818 retval = NFS_GRANTED;
819 (*conflicting_fl) = NULL;
820
821 debuglog("Entering lock search loop\n");
822
823 debuglog("***********************************\n");
824 debuglog("Dumping match filelock\n");
825 debuglog("***********************************\n");
826 dump_filelock(fl);
827 debuglog("***********************************\n");
828
829 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
830 if (retval == NFS_DENIED)
831 break;
832
833 debuglog("Top of lock loop\n");
834 debuglog("Pointer to file lock: %p\n",ifl);
835
836 debuglog("***********************************\n");
837 debuglog("Dumping test filelock\n");
838 debuglog("***********************************\n");
839 dump_filelock(ifl);
840 debuglog("***********************************\n");
841
842 /*
843 * XXX: It is conceivable that someone could use the NLM RPC
844 * system to directly access filehandles. This may be a
845 * security hazard as the filehandle code may bypass normal
846 * file access controls
847 */
848 if (fl->filehandle.n_len != ifl->filehandle.n_len)
849 continue;
850 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
851 fl->filehandle.n_len))
852 continue;
853
854 debuglog("test_nfslock: filehandle match found\n");
855
856 /* Filehandles match, check for region overlap */
857 if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
858 ifl->client.l_offset, ifl->client.l_len))
859 continue;
860
861 debuglog("test_nfslock: Region overlap found"
862 " %llu : %llu -- %llu : %llu\n",
863 fl->client.l_offset,fl->client.l_len,
864 ifl->client.l_offset,ifl->client.l_len);
865
866 /* Regions overlap, check the exclusivity */
867 if (!(fl->client.exclusive || ifl->client.exclusive))
868 continue;
869
870 debuglog("test_nfslock: Exclusivity failure: %d %d\n",
871 fl->client.exclusive,
872 ifl->client.exclusive);
873
874 if (same_filelock_identity(fl,ifl)) {
875 debuglog("test_nfslock: Duplicate id. Granting\n");
876 (*conflicting_fl) = ifl;
877 retval = NFS_GRANTED_DUPLICATE;
878 } else {
879 /* locking attempt fails */
880 debuglog("test_nfslock: Lock attempt failed\n");
881 debuglog("Desired lock\n");
882 dump_filelock(fl);
883 debuglog("Conflicting lock\n");
884 dump_filelock(ifl);
885 (*conflicting_fl) = ifl;
886 retval = NFS_DENIED;
887 }
888 }
889
890 debuglog("Dumping file locks\n");
891 debuglog("Exiting test_nfslock\n");
892
893 return (retval);
894 }
895
896 /*
897 * lock_nfslock: attempt to create a lock in the NFS lock list
898 *
899 * This routine tests whether the lock will be granted and then adds
900 * the entry to the lock list if so.
901 *
902 * Argument fl gets modified as its list housekeeping entries get modified
903 * upon insertion into the NFS lock list
904 *
905 * This routine makes several assumptions:
906 * 1) It is perfectly happy to grant a duplicate lock from the same pid.
907 * While this seems to be intuitively wrong, it is required for proper
908 * Posix semantics during unlock. It is absolutely imperative to not
909 * unlock the main lock before the two child locks are established. Thus,
910 * one has be be able to create duplicate locks over an existing lock
911 * 2) It currently accepts duplicate locks from the same id,pid
912 */
913
914 enum nfslock_status
915 lock_nfslock(struct file_lock *fl)
916 {
917 enum nfslock_status retval;
918 struct file_lock *dummy_fl;
919
920 dummy_fl = NULL;
921
922 debuglog("Entering lock_nfslock...\n");
923
924 retval = test_nfslock(fl,&dummy_fl);
925
926 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
927 debuglog("Inserting lock...\n");
928 dump_filelock(fl);
929 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
930 }
931
932 debuglog("Exiting lock_nfslock...\n");
933
934 return (retval);
935 }
936
937 /*
938 * delete_nfslock: delete an NFS lock list entry
939 *
940 * This routine is used to delete a lock out of the NFS lock list
941 * without regard to status, underlying locks, regions or anything else
942 *
943 * Note that this routine *does not deallocate memory* of the lock.
944 * It just disconnects it from the list. The lock can then be used
945 * by other routines without fear of trashing the list.
946 */
947
948 enum nfslock_status
949 delete_nfslock(struct file_lock *fl)
950 {
951
952 LIST_REMOVE(fl, nfslocklist);
953
954 return (NFS_GRANTED);
955 }
956
957 enum split_status
958 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
959 const struct file_lock *exist_lock, *unlock_lock;
960 struct file_lock **left_lock, **right_lock;
961 {
962 u_int64_t start1, len1, start2, len2;
963 enum split_status spstatus;
964
965 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
966 unlock_lock->client.l_offset, unlock_lock->client.l_len,
967 &start1, &len1, &start2, &len2);
968
969 if ((spstatus & SPL_LOCK1) != 0) {
970 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->filehandle, exist_lock->addr, exist_lock->client_name);
971 if (*left_lock == NULL) {
972 debuglog("Unable to allocate resource for split 1\n");
973 return SPL_RESERR;
974 }
975
976 fill_file_lock(*left_lock,
977 exist_lock->client.exclusive, exist_lock->client.svid,
978 start1, len1,
979 exist_lock->nsm_status,
980 exist_lock->status, exist_lock->flags, exist_lock->blocking);
981 }
982
983 if ((spstatus & SPL_LOCK2) != 0) {
984 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->filehandle, exist_lock->addr, exist_lock->client_name);
985 if (*right_lock == NULL) {
986 debuglog("Unable to allocate resource for split 1\n");
987 if (*left_lock != NULL) {
988 deallocate_file_lock(*left_lock);
989 }
990 return SPL_RESERR;
991 }
992
993 fill_file_lock(*right_lock,
994 exist_lock->client.exclusive, exist_lock->client.svid,
995 start2, len2,
996 exist_lock->nsm_status,
997 exist_lock->status, exist_lock->flags, exist_lock->blocking);
998 }
999
1000 return spstatus;
1001 }
1002
1003 enum nfslock_status
1004 unlock_nfslock(fl, released_lock, left_lock, right_lock)
1005 const struct file_lock *fl;
1006 struct file_lock **released_lock;
1007 struct file_lock **left_lock;
1008 struct file_lock **right_lock;
1009 {
1010 struct file_lock *mfl; /* Matching file lock */
1011 enum nfslock_status retval;
1012 enum split_status spstatus;
1013
1014 debuglog("Entering unlock_nfslock\n");
1015
1016 *released_lock = NULL;
1017 *left_lock = NULL;
1018 *right_lock = NULL;
1019
1020 retval = NFS_DENIED_NOLOCK;
1021
1022 debuglog("Attempting to match lock...\n");
1023 mfl = get_lock_matching_unlock(fl);
1024
1025 if (mfl != NULL) {
1026 debuglog("Unlock matched. Querying for split\n");
1027
1028 spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
1029
1030 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
1031 debuglog("********Split dumps********");
1032 dump_filelock(mfl);
1033 dump_filelock(fl);
1034 dump_filelock(*left_lock);
1035 dump_filelock(*right_lock);
1036 debuglog("********End Split dumps********");
1037
1038 if (spstatus == SPL_RESERR) {
1039 if (*left_lock != NULL) {
1040 deallocate_file_lock(*left_lock);
1041 *left_lock = NULL;
1042 }
1043
1044 if (*right_lock != NULL) {
1045 deallocate_file_lock(*right_lock);
1046 *right_lock = NULL;
1047 }
1048
1049 return NFS_RESERR;
1050 }
1051
1052 /* Insert new locks from split if required */
1053 if (*left_lock != NULL) {
1054 debuglog("Split left activated\n");
1055 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1056 }
1057
1058 if (*right_lock != NULL) {
1059 debuglog("Split right activated\n");
1060 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1061 }
1062
1063 /* Unlock the lock since it matches identity */
1064 LIST_REMOVE(mfl, nfslocklist);
1065 *released_lock = mfl;
1066 retval = NFS_GRANTED;
1067 }
1068
1069 debuglog("Exiting unlock_nfslock\n");
1070
1071 return retval;
1072 }
1073
1074 /*
1075 * Below here are the routines for manipulating the file lock directly
1076 * on the disk hardware itself
1077 */
1078 enum hwlock_status
1079 lock_hwlock(struct file_lock *fl)
1080 {
1081 struct monfile *imf,*nmf;
1082 int lflags, flerror;
1083
1084 /* Scan to see if filehandle already present */
1085 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1086 if ((fl->filehandle.n_len == imf->filehandle.n_len) &&
1087 (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes,
1088 fl->filehandle.n_len) == 0)) {
1089 /* imf is the correct filehandle */
1090 break;
1091 }
1092 }
1093
1094 /*
1095 * Filehandle already exists (we control the file)
1096 * *AND* NFS has already cleared the lock for availability
1097 * Grant it and bump the refcount.
1098 */
1099 if (imf != NULL) {
1100 ++(imf->refcount);
1101 return (HW_GRANTED);
1102 }
1103
1104 /* No filehandle found, create and go */
1105 nmf = malloc(sizeof(struct monfile));
1106 if (nmf == NULL) {
1107 debuglog("hwlock resource allocation failure\n");
1108 return (HW_RESERR);
1109 }
1110 nmf->filehandle.n_bytes = malloc(fl->filehandle.n_len);
1111 if (nmf->filehandle.n_bytes == NULL) {
1112 debuglog("hwlock resource allocation failure\n");
1113 free(nmf);
1114 return (HW_RESERR);
1115 }
1116
1117 /* XXX: Is O_RDWR always the correct mode? */
1118 nmf->fd = fhopen((fhandle_t *)fl->filehandle.n_bytes, O_RDWR);
1119 if (nmf->fd < 0) {
1120 debuglog("fhopen failed (from %16s): %32s\n",
1121 fl->client_name, strerror(errno));
1122 free(nmf->filehandle.n_bytes);
1123 free(nmf);
1124 switch (errno) {
1125 case ESTALE:
1126 return (HW_STALEFH);
1127 case EROFS:
1128 return (HW_READONLY);
1129 default:
1130 return (HW_RESERR);
1131 }
1132 }
1133
1134 /* File opened correctly, fill the monitor struct */
1135 nmf->filehandle.n_len = fl->filehandle.n_len;
1136 bcopy(fl->filehandle.n_bytes, nmf->filehandle.n_bytes, fl->filehandle.n_len);
1137 nmf->refcount = 1;
1138 nmf->exclusive = fl->client.exclusive;
1139
1140 lflags = (nmf->exclusive == 1) ?
1141 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1142
1143 flerror = flock(nmf->fd, lflags);
1144
1145 if (flerror != 0) {
1146 debuglog("flock failed (from %16s): %32s\n",
1147 fl->client_name, strerror(errno));
1148 close(nmf->fd);
1149 free(nmf->filehandle.n_bytes);
1150 free(nmf);
1151 switch (errno) {
1152 case EAGAIN:
1153 return (HW_DENIED);
1154 case ESTALE:
1155 return (HW_STALEFH);
1156 case EROFS:
1157 return (HW_READONLY);
1158 default:
1159 return (HW_RESERR);
1160 break;
1161 }
1162 }
1163
1164 /* File opened and locked */
1165 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1166
1167 debuglog("flock succeeded (from %16s)\n", fl->client_name);
1168 return (HW_GRANTED);
1169 }
1170
1171 enum hwlock_status
1172 unlock_hwlock(const struct file_lock *fl)
1173 {
1174 struct monfile *imf;
1175
1176 debuglog("Entering unlock_hwlock\n");
1177 debuglog("Entering loop interation\n");
1178
1179 /* Scan to see if filehandle already present */
1180 LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1181 if ((fl->filehandle.n_len == imf->filehandle.n_len) &&
1182 (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes,
1183 fl->filehandle.n_len) == 0)) {
1184 /* imf is the correct filehandle */
1185 break;
1186 }
1187 }
1188
1189 debuglog("Completed iteration. Proceeding\n");
1190
1191 if (imf == NULL) {
1192 /* No lock found */
1193 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1194 return (HW_DENIED_NOLOCK);
1195 }
1196
1197 /* Lock found */
1198 --imf->refcount;
1199
1200 if (imf->refcount < 0) {
1201 debuglog("Negative hardware reference count\n");
1202 }
1203
1204 if (imf->refcount <= 0) {
1205 close(imf->fd);
1206 LIST_REMOVE(imf, monfilelist);
1207 free(imf->filehandle.n_bytes);
1208 free(imf);
1209 }
1210 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1211 return (HW_GRANTED);
1212 }
1213
1214 enum hwlock_status
1215 test_hwlock(fl, conflicting_fl)
1216 const struct file_lock *fl __unused;
1217 struct file_lock **conflicting_fl __unused;
1218 {
1219
1220 /*
1221 * XXX: lock tests on hardware are not required until
1222 * true partial file testing is done on the underlying file
1223 */
1224 return (HW_RESERR);
1225 }
1226
1227
1228
1229 /*
1230 * Below here are routines for manipulating blocked lock requests
1231 * They should only be called from the XXX_partialfilelock routines
1232 * if at all possible
1233 */
1234
1235 void
1236 add_blockingfilelock(struct file_lock *fl)
1237 {
1238 struct file_lock *ifl, *nfl;
1239
1240 debuglog("Entering add_blockingfilelock\n");
1241
1242 /*
1243 * Check for a duplicate lock request.
1244 * If found, deallocate the older request.
1245 */
1246 ifl = LIST_FIRST(&blockedlocklist_head);
1247 for (; ifl != NULL; ifl = nfl) {
1248 debuglog("Pointer to file lock: %p\n",ifl);
1249 debuglog("****Dump of ifl****\n");
1250 dump_filelock(ifl);
1251 debuglog("*******************\n");
1252
1253 nfl = LIST_NEXT(ifl, nfslocklist);
1254
1255 if (fl->filehandle.n_len != ifl->filehandle.n_len)
1256 continue;
1257 if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes,
1258 fl->filehandle.n_len))
1259 continue;
1260
1261 /* Filehandles match, check region */
1262 if ((fl->client.l_offset != ifl->client.l_offset) ||
1263 (fl->client.l_len != ifl->client.l_len))
1264 continue;
1265
1266 /* Regions match, check the identity */
1267 if (!same_filelock_identity(fl,ifl))
1268 continue;
1269
1270 debuglog("add_blockingfilelock: removing duplicate lock request.\n");
1271 remove_blockingfilelock(ifl);
1272 deallocate_file_lock(ifl);
1273 break;
1274 }
1275
1276 /*
1277 * Clear the blocking flag so that it can be reused without
1278 * adding it to the blocking queue a second time
1279 */
1280
1281 fl->blocking = 0;
1282 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1283
1284 debuglog("Exiting add_blockingfilelock\n");
1285 }
1286
1287 void
1288 remove_blockingfilelock(struct file_lock *fl)
1289 {
1290
1291 debuglog("Entering remove_blockingfilelock\n");
1292
1293 LIST_REMOVE(fl, nfslocklist);
1294
1295 debuglog("Exiting remove_blockingfilelock\n");
1296 }
1297
1298 void
1299 clear_blockingfilelock(const char *hostname)
1300 {
1301 struct file_lock *ifl,*nfl;
1302
1303 /*
1304 * Normally, LIST_FOREACH is called for, but since
1305 * the current element *is* the iterator, deleting it
1306 * would mess up the iteration. Thus, a next element
1307 * must be used explicitly
1308 */
1309
1310 ifl = LIST_FIRST(&blockedlocklist_head);
1311
1312 while (ifl != NULL) {
1313 nfl = LIST_NEXT(ifl, nfslocklist);
1314
1315 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1316 remove_blockingfilelock(ifl);
1317 deallocate_file_lock(ifl);
1318 }
1319
1320 ifl = nfl;
1321 }
1322 }
1323
1324 int need_retry_blocked_locks = 0; /* need to call retry_blockingfilelocklist() */
1325
1326 void
1327 retry_blockingfilelocklist(netobj *fh)
1328 {
1329 /*
1330 * If fh is given, then retry just the locks with the
1331 * same filehandle in the blocked list.
1332 * Otherwise, simply retry all locks in the blocked list.
1333 */
1334 struct file_lock *ifl, *nfl, *pfl; /* Iterator */
1335 enum partialfilelock_status pflstatus;
1336 int rv;
1337
1338 debuglog("Entering retry_blockingfilelocklist\n");
1339
1340 need_retry_blocked_locks = 0;
1341
1342 pfl = NULL;
1343 ifl = LIST_FIRST(&blockedlocklist_head);
1344 debuglog("Iterator choice %p\n",ifl);
1345
1346 while (ifl != NULL) {
1347 /*
1348 * SUBTLE BUG: The next element must be worked out before the
1349 * current element has been moved
1350 */
1351 nfl = LIST_NEXT(ifl, nfslocklist);
1352 debuglog("Iterator choice %p\n",ifl);
1353 debuglog("Prev iterator choice %p\n",pfl);
1354 debuglog("Next iterator choice %p\n",nfl);
1355
1356 /* if given a filehandle, only retry locks for the same filehandle */
1357 if (fh && !same_netobj(fh, &ifl->filehandle)) {
1358 ifl = nfl;
1359 continue;
1360 }
1361
1362 /*
1363 * SUBTLE BUG: The file_lock must be removed from the
1364 * old list so that it's list pointers get disconnected
1365 * before being allowed to participate in the new list
1366 * which will automatically add it in if necessary.
1367 */
1368
1369 LIST_REMOVE(ifl, nfslocklist);
1370 pflstatus = lock_partialfilelock(ifl);
1371
1372 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1373 debuglog("Granted blocked lock\n");
1374 /* lock granted and is now being used */
1375 rv = send_granted(ifl, 0);
1376 if (rv) {
1377 /*
1378 * Uh oh... the NLM_GRANTED message failed.
1379 * About the only thing we can do is drop the lock.
1380 * Note: this could be bad if the error was only
1381 * transient. Hopefully, if the client is still
1382 * waiting for the lock, they will resend the request.
1383 */
1384 do_unlock(ifl);
1385 /* ifl is NO LONGER VALID AT THIS POINT */
1386 }
1387 } else {
1388 /* Reinsert lock back into same place in blocked list */
1389 debuglog("Replacing blocked lock\n");
1390 if (pfl != NULL)
1391 LIST_INSERT_AFTER(pfl, ifl, nfslocklist);
1392 else
1393 /* ifl is the only elem. in the list */
1394 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1395 }
1396
1397 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1398 /* If ifl was permanently removed from the list, (e.g the */
1399 /* lock was granted), pfl should remain where it's at. */
1400 } else {
1401 /* If ifl was left in the list, (e.g it was reinserted back */
1402 /* in place), pfl should simply be moved forward to be ifl */
1403 pfl = ifl;
1404 }
1405 /* Valid increment behavior regardless of state of ifl */
1406 ifl = nfl;
1407 }
1408
1409 debuglog("Exiting retry_blockingfilelocklist\n");
1410 }
1411
1412 /*
1413 * Below here are routines associated with manipulating all
1414 * aspects of the partial file locking system (list, hardware, etc.)
1415 */
1416
1417 /*
1418 * Please note that lock monitoring must be done at this level which
1419 * keeps track of *individual* lock requests on lock and unlock
1420 *
1421 * XXX: Split unlocking is going to make the unlock code miserable
1422 */
1423
1424 /*
1425 * lock_partialfilelock:
1426 *
1427 * Argument fl gets modified as its list housekeeping entries get modified
1428 * upon insertion into the NFS lock list
1429 *
1430 * This routine makes several assumptions:
1431 * 1) It (will) pass locks through to flock to lock the entire underlying file
1432 * and then parcel out NFS locks if it gets control of the file.
1433 * This matches the old rpc.lockd file semantics (except where it
1434 * is now more correct). It is the safe solution, but will cause
1435 * overly restrictive blocking if someone is trying to use the
1436 * underlying files without using NFS. This appears to be an
1437 * acceptable tradeoff since most people use standalone NFS servers.
1438 * XXX: The right solution is probably kevent combined with fcntl
1439 *
1440 * 2) Nothing modifies the lock lists between testing and granting
1441 * I have no idea whether this is a useful assumption or not
1442 */
1443
1444 enum partialfilelock_status
1445 lock_partialfilelock(struct file_lock *fl)
1446 {
1447 enum partialfilelock_status retval;
1448 enum nfslock_status lnlstatus;
1449 enum hwlock_status hwstatus;
1450
1451 debuglog("Entering lock_partialfilelock\n");
1452
1453 retval = PFL_DENIED;
1454
1455 /*
1456 * Execute the NFS lock first, if possible, as it is significantly
1457 * easier and less expensive to undo than the filesystem lock
1458 */
1459
1460 lnlstatus = lock_nfslock(fl);
1461
1462 switch (lnlstatus) {
1463 case NFS_GRANTED:
1464 case NFS_GRANTED_DUPLICATE:
1465 /*
1466 * At this point, the NFS lock is allocated and active.
1467 * Remember to clean it up if the hardware lock fails
1468 */
1469 hwstatus = lock_hwlock(fl);
1470
1471 switch (hwstatus) {
1472 case HW_GRANTED:
1473 case HW_GRANTED_DUPLICATE:
1474 debuglog("HW GRANTED\n");
1475 /*
1476 * XXX: Fixme: Check hwstatus for duplicate when
1477 * true partial file locking and accounting is
1478 * done on the hardware
1479 */
1480 if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1481 retval = PFL_GRANTED_DUPLICATE;
1482 } else {
1483 retval = PFL_GRANTED;
1484 }
1485 if (fl->flags & LOCK_MON)
1486 monitor_lock_host_by_name(fl->client_name);
1487 break;
1488 case HW_RESERR:
1489 debuglog("HW RESERR\n");
1490 retval = PFL_HWRESERR;
1491 break;
1492 case HW_DENIED:
1493 debuglog("HW DENIED\n");
1494 retval = PFL_HWDENIED;
1495 break;
1496 default:
1497 debuglog("Unmatched hwstatus %d\n",hwstatus);
1498 break;
1499 }
1500
1501 if (retval != PFL_GRANTED &&
1502 retval != PFL_GRANTED_DUPLICATE) {
1503 /* Clean up the NFS lock */
1504 debuglog("Deleting trial NFS lock\n");
1505 delete_nfslock(fl);
1506 }
1507 break;
1508 case NFS_DENIED:
1509 retval = PFL_NFSDENIED;
1510 break;
1511 case NFS_RESERR:
1512 retval = PFL_NFSRESERR;
1513 default:
1514 debuglog("Unmatched lnlstatus %d\n");
1515 retval = PFL_NFSDENIED_NOLOCK;
1516 break;
1517 }
1518
1519 /*
1520 * By the time fl reaches here, it is completely free again on
1521 * failure. The NFS lock done before attempting the
1522 * hardware lock has been backed out
1523 */
1524
1525 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1526 /* Once last chance to check the lock */
1527 if (fl->blocking == 1) {
1528 if (retval == PFL_NFSDENIED) {
1529 /* Queue the lock */
1530 debuglog("BLOCKING LOCK RECEIVED\n");
1531 retval = PFL_NFSBLOCKED;
1532 add_blockingfilelock(fl);
1533 dump_filelock(fl);
1534 } else {
1535 /* retval is okay as PFL_HWDENIED */
1536 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1537 dump_filelock(fl);
1538 }
1539 } else {
1540 /* Leave retval alone, it's already correct */
1541 debuglog("Lock denied. Non-blocking failure\n");
1542 dump_filelock(fl);
1543 }
1544 }
1545
1546 debuglog("Exiting lock_partialfilelock\n");
1547
1548 return retval;
1549 }
1550
1551 /*
1552 * unlock_partialfilelock:
1553 *
1554 * Given a file_lock, unlock all locks which match.
1555 *
1556 * Note that a given lock might have to unlock ITSELF! See
1557 * clear_partialfilelock for example.
1558 */
1559
1560 enum partialfilelock_status
1561 unlock_partialfilelock(const struct file_lock *fl)
1562 {
1563 struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1564 enum partialfilelock_status retval;
1565 enum nfslock_status unlstatus;
1566 enum hwlock_status unlhwstatus, lhwstatus;
1567
1568 debuglog("Entering unlock_partialfilelock\n");
1569
1570 selffl = NULL;
1571 lfl = NULL;
1572 rfl = NULL;
1573 releasedfl = NULL;
1574 retval = PFL_DENIED;
1575
1576 /*
1577 * There are significant overlap and atomicity issues
1578 * with partially releasing a lock. For example, releasing
1579 * part of an NFS shared lock does *not* always release the
1580 * corresponding part of the file since there is only one
1581 * rpc.lockd UID but multiple users could be requesting it
1582 * from NFS. Also, an unlock request should never allow
1583 * another process to gain a lock on the remaining parts.
1584 * ie. Always apply the new locks before releasing the
1585 * old one
1586 */
1587
1588 /*
1589 * Loop is required since multiple little locks
1590 * can be allocated and then deallocated with one
1591 * big unlock.
1592 *
1593 * The loop is required to be here so that the nfs &
1594 * hw subsystems do not need to communicate with one
1595 * one another
1596 */
1597
1598 do {
1599 debuglog("Value of releasedfl: %p\n",releasedfl);
1600 /* lfl&rfl are created *AND* placed into the NFS lock list if required */
1601 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1602 debuglog("Value of releasedfl: %p\n",releasedfl);
1603
1604
1605 /* XXX: This is grungy. It should be refactored to be cleaner */
1606 if (lfl != NULL) {
1607 lhwstatus = lock_hwlock(lfl);
1608 if (lhwstatus != HW_GRANTED &&
1609 lhwstatus != HW_GRANTED_DUPLICATE) {
1610 debuglog("HW duplicate lock failure for left split\n");
1611 }
1612 if (lfl->flags & LOCK_MON)
1613 monitor_lock_host_by_name(lfl->client_name);
1614 }
1615
1616 if (rfl != NULL) {
1617 lhwstatus = lock_hwlock(rfl);
1618 if (lhwstatus != HW_GRANTED &&
1619 lhwstatus != HW_GRANTED_DUPLICATE) {
1620 debuglog("HW duplicate lock failure for right split\n");
1621 }
1622 if (rfl->flags & LOCK_MON)
1623 monitor_lock_host_by_name(rfl->client_name);
1624 }
1625
1626 switch (unlstatus) {
1627 case NFS_GRANTED:
1628 /* Attempt to unlock on the hardware */
1629 debuglog("NFS unlock granted. Attempting hardware unlock\n");
1630
1631 /* This call *MUST NOT* unlock the two newly allocated locks */
1632 unlhwstatus = unlock_hwlock(fl);
1633 debuglog("HW unlock returned with code %d\n",unlhwstatus);
1634
1635 switch (unlhwstatus) {
1636 case HW_GRANTED:
1637 debuglog("HW unlock granted\n");
1638 if (releasedfl->flags & LOCK_MON)
1639 unmonitor_lock_host(releasedfl->client_name);
1640 retval = PFL_GRANTED;
1641 break;
1642 case HW_DENIED_NOLOCK:
1643 /* Huh?!?! This shouldn't happen */
1644 debuglog("HW unlock denied no lock\n");
1645 retval = PFL_HWRESERR;
1646 /* Break out of do-while */
1647 unlstatus = NFS_RESERR;
1648 break;
1649 default:
1650 debuglog("HW unlock failed\n");
1651 retval = PFL_HWRESERR;
1652 /* Break out of do-while */
1653 unlstatus = NFS_RESERR;
1654 break;
1655 }
1656
1657 debuglog("Exiting with status retval: %d\n",retval);
1658
1659 // XXX sending granted messages before unlock response
1660 // XXX causes unlock response to be corrupted?
1661 // XXX Workaround is to move this to nlm_prot_svc.c
1662 // XXX after the unlock response is sent.
1663 // retry_blockingfilelocklist();
1664 need_retry_blocked_locks = 1;
1665 break;
1666 case NFS_DENIED_NOLOCK:
1667 retval = PFL_GRANTED;
1668 debuglog("All locks cleaned out\n");
1669 break;
1670 default:
1671 retval = PFL_NFSRESERR;
1672 debuglog("NFS unlock failure\n");
1673 dump_filelock(fl);
1674 break;
1675 }
1676
1677 if (releasedfl != NULL) {
1678 if (fl == releasedfl) {
1679 /*
1680 * XXX: YECHHH!!! Attempt to unlock self succeeded
1681 * but we can't deallocate the space yet. This is what
1682 * happens when you don't write malloc and free together
1683 */
1684 debuglog("Attempt to unlock self\n");
1685 selffl = releasedfl;
1686 } else {
1687 /*
1688 * XXX: this deallocation *still* needs to migrate closer
1689 * to the allocation code way up in get_lock or the allocation
1690 * code needs to migrate down (violation of "When you write
1691 * malloc you must write free")
1692 */
1693
1694 deallocate_file_lock(releasedfl);
1695 }
1696 }
1697
1698 } while (unlstatus == NFS_GRANTED);
1699
1700 if (selffl != NULL) {
1701 /*
1702 * This statement wipes out the incoming file lock (fl)
1703 * in spite of the fact that it is declared const
1704 */
1705 debuglog("WARNING! Destroying incoming lock pointer\n");
1706 deallocate_file_lock(selffl);
1707 }
1708
1709 debuglog("Exiting unlock_partialfilelock\n");
1710
1711 return retval;
1712 }
1713
1714 /*
1715 * clear_partialfilelock
1716 *
1717 * Normally called in response to statd state number change.
1718 * Wipe out all locks held by a host. As a bonus, the act of
1719 * doing so should automatically clear their statd entries and
1720 * unmonitor the host.
1721 */
1722
1723 void
1724 clear_partialfilelock(const char *hostname)
1725 {
1726 struct file_lock *ifl, *nfl;
1727
1728 /* Clear blocking file lock list */
1729 clear_blockingfilelock(hostname);
1730
1731 /* do all required unlocks */
1732 /* Note that unlock can smash the current pointer to a lock */
1733
1734 /*
1735 * Normally, LIST_FOREACH is called for, but since
1736 * the current element *is* the iterator, deleting it
1737 * would mess up the iteration. Thus, a next element
1738 * must be used explicitly
1739 */
1740
1741 ifl = LIST_FIRST(&nfslocklist_head);
1742
1743 while (ifl != NULL) {
1744 nfl = LIST_NEXT(ifl, nfslocklist);
1745
1746 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1747 /* Unlock destroys ifl out from underneath */
1748 unlock_partialfilelock(ifl);
1749 /* ifl is NO LONGER VALID AT THIS POINT */
1750 }
1751 ifl = nfl;
1752 }
1753 }
1754
1755 /*
1756 * test_partialfilelock:
1757 */
1758 enum partialfilelock_status
1759 test_partialfilelock(const struct file_lock *fl,
1760 struct file_lock **conflicting_fl)
1761 {
1762 enum partialfilelock_status retval;
1763 enum nfslock_status teststatus;
1764
1765 debuglog("Entering testpartialfilelock...\n");
1766
1767 retval = PFL_DENIED;
1768
1769 teststatus = test_nfslock(fl, conflicting_fl);
1770 debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1771
1772 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1773 /* XXX: Add the underlying filesystem locking code */
1774 retval = (teststatus == NFS_GRANTED) ?
1775 PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1776 debuglog("Dumping locks...\n");
1777 dump_filelock(fl);
1778 dump_filelock(*conflicting_fl);
1779 debuglog("Done dumping locks...\n");
1780 } else {
1781 retval = PFL_NFSDENIED;
1782 debuglog("NFS test denied.\n");
1783 dump_filelock(fl);
1784 debuglog("Conflicting.\n");
1785 dump_filelock(*conflicting_fl);
1786 }
1787
1788 debuglog("Exiting testpartialfilelock...\n");
1789
1790 return retval;
1791 }
1792
1793 /*
1794 * Below here are routines associated with translating the partial file locking
1795 * codes into useful codes to send back to the NFS RPC messaging system
1796 */
1797
1798 /*
1799 * These routines translate the (relatively) useful return codes back onto
1800 * the few return codes which the nlm subsystems wishes to trasmit
1801 */
1802
1803 enum nlm_stats
1804 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1805 {
1806 enum partialfilelock_status pfsret;
1807 enum nlm_stats retval;
1808
1809 debuglog("Entering do_test...\n");
1810
1811 pfsret = test_partialfilelock(fl,conflicting_fl);
1812
1813 switch (pfsret) {
1814 case PFL_GRANTED:
1815 debuglog("PFL test lock granted\n");
1816 dump_filelock(fl);
1817 dump_filelock(*conflicting_fl);
1818 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1819 break;
1820 case PFL_GRANTED_DUPLICATE:
1821 debuglog("PFL test lock granted--duplicate id detected\n");
1822 dump_filelock(fl);
1823 dump_filelock(*conflicting_fl);
1824 debuglog("Clearing conflicting_fl for call semantics\n");
1825 *conflicting_fl = NULL;
1826 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1827 break;
1828 case PFL_NFSDENIED:
1829 case PFL_HWDENIED:
1830 debuglog("PFL test lock denied\n");
1831 dump_filelock(fl);
1832 dump_filelock(*conflicting_fl);
1833 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1834 break;
1835 case PFL_NFSRESERR:
1836 case PFL_HWRESERR:
1837 debuglog("PFL test lock resource fail\n");
1838 dump_filelock(fl);
1839 dump_filelock(*conflicting_fl);
1840 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1841 break;
1842 default:
1843 debuglog("PFL test lock *FAILED*\n");
1844 dump_filelock(fl);
1845 dump_filelock(*conflicting_fl);
1846 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1847 break;
1848 }
1849
1850 debuglog("Exiting do_test...\n");
1851
1852 return retval;
1853 }
1854
1855 /*
1856 * do_lock: Try to acquire a lock
1857 *
1858 * This routine makes a distinction between NLM versions. I am pretty
1859 * convinced that this should be abstracted out and bounced up a level
1860 */
1861
1862 enum nlm_stats
1863 do_lock(struct file_lock *fl)
1864 {
1865 enum partialfilelock_status pfsret;
1866 enum nlm_stats retval;
1867
1868 debuglog("Entering do_lock...\n");
1869
1870 pfsret = lock_partialfilelock(fl);
1871
1872 switch (pfsret) {
1873 case PFL_GRANTED:
1874 debuglog("PFL lock granted");
1875 dump_filelock(fl);
1876 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1877 break;
1878 case PFL_GRANTED_DUPLICATE:
1879 debuglog("PFL lock granted--duplicate id detected");
1880 dump_filelock(fl);
1881 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1882 break;
1883 case PFL_NFSDENIED:
1884 case PFL_HWDENIED:
1885 debuglog("PFL_NFS lock denied");
1886 dump_filelock(fl);
1887 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1888 break;
1889 case PFL_NFSBLOCKED:
1890 case PFL_HWBLOCKED:
1891 debuglog("PFL_NFS blocking lock denied. Queued.\n");
1892 dump_filelock(fl);
1893 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1894 break;
1895 case PFL_NFSRESERR:
1896 case PFL_HWRESERR:
1897 debuglog("PFL lock resource alocation fail\n");
1898 dump_filelock(fl);
1899 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1900 break;
1901 default:
1902 debuglog("PFL lock *FAILED*");
1903 dump_filelock(fl);
1904 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1905 break;
1906 }
1907
1908 debuglog("Exiting do_lock...\n");
1909
1910 return retval;
1911 }
1912
1913 enum nlm_stats
1914 do_unlock(struct file_lock *fl)
1915 {
1916 enum partialfilelock_status pfsret;
1917 enum nlm_stats retval;
1918
1919 debuglog("Entering do_unlock...\n");
1920 pfsret = unlock_partialfilelock(fl);
1921
1922 switch (pfsret) {
1923 case PFL_GRANTED:
1924 debuglog("PFL unlock granted");
1925 dump_filelock(fl);
1926 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1927 break;
1928 case PFL_NFSDENIED:
1929 case PFL_HWDENIED:
1930 debuglog("PFL_NFS unlock denied");
1931 dump_filelock(fl);
1932 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1933 break;
1934 case PFL_NFSDENIED_NOLOCK:
1935 case PFL_HWDENIED_NOLOCK:
1936 debuglog("PFL_NFS no lock found\n");
1937 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1938 break;
1939 case PFL_NFSRESERR:
1940 case PFL_HWRESERR:
1941 debuglog("PFL unlock resource failure");
1942 dump_filelock(fl);
1943 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1944 break;
1945 default:
1946 debuglog("PFL unlock *FAILED*");
1947 dump_filelock(fl);
1948 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1949 break;
1950 }
1951
1952 debuglog("Exiting do_unlock...\n");
1953
1954 return retval;
1955 }
1956
1957 /*
1958 * do_clear
1959 *
1960 * This routine is non-existent because it doesn't have a return code.
1961 * It is here for completeness in case someone *does* need to do return
1962 * codes later. A decent compiler should optimize this away.
1963 */
1964
1965 void
1966 do_clear(const char *hostname)
1967 {
1968
1969 clear_partialfilelock(hostname);
1970 }
1971
1972 /*
1973 * The following routines are all called from the code which the
1974 * RPC layer invokes
1975 */
1976
1977 /*
1978 * testlock(): inform the caller if the requested lock would be granted
1979 *
1980 * returns NULL if lock would granted
1981 * returns pointer to a conflicting nlm4_holder if not
1982 */
1983
1984 struct nlm4_holder *
1985 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1986 {
1987 struct file_lock test_fl, *conflicting_fl;
1988
1989 if (lock->fh.n_len != sizeof(fhandle_t)) {
1990 debuglog("received fhandle size %d, local size %d",
1991 lock->fh.n_len, (int)sizeof(fhandle_t));
1992 return NULL;
1993 }
1994
1995 bzero(&test_fl, sizeof(test_fl));
1996
1997 test_fl.filehandle.n_len = lock->fh.n_len;
1998 test_fl.filehandle.n_bytes = lock->fh.n_bytes;
1999 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
2000
2001 siglock();
2002 do_test(&test_fl, &conflicting_fl);
2003
2004 if (conflicting_fl == NULL) {
2005 debuglog("No conflicting lock found\n");
2006 sigunlock();
2007 return NULL;
2008 } else {
2009 debuglog("Found conflicting lock\n");
2010 dump_filelock(conflicting_fl);
2011 sigunlock();
2012 return (&conflicting_fl->client);
2013 }
2014 }
2015
2016 /*
2017 * getlock: try to aquire the lock.
2018 * If file is already locked and we can sleep, put the lock in the list with
2019 * status LKST_WAITING; it'll be processed later.
2020 * Otherwise try to lock. If we're allowed to block, fork a child which
2021 * will do the blocking lock.
2022 */
2023
2024 enum nlm_stats
2025 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
2026 {
2027 struct file_lock *newfl;
2028 enum nlm_stats retval;
2029
2030 debuglog("Entering getlock...\n");
2031
2032 if (grace_expired == 0 && lckarg->reclaim == 0)
2033 return (flags & LOCK_V4) ?
2034 nlm4_denied_grace_period : nlm_denied_grace_period;
2035
2036 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
2037 debuglog("received fhandle size %d, local size %d",
2038 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
2039 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2040 }
2041
2042 /* allocate new file_lock for this request */
2043 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->alock.fh,
2044 (struct sockaddr *)svc_getcaller(rqstp->rq_xprt),
2045 lckarg->alock.caller_name);
2046 if (newfl == NULL) {
2047 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
2048 /* failed */
2049 return (flags & LOCK_V4) ?
2050 nlm4_denied_nolocks : nlm_denied_nolocks;
2051 }
2052
2053 fill_file_lock(newfl,
2054 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
2055 lckarg->alock.l_len,
2056 lckarg->state, 0, flags, lckarg->block);
2057
2058 /*
2059 * newfl is now fully constructed and deallocate_file_lock
2060 * can now be used to delete it
2061 */
2062
2063 siglock();
2064 debuglog("Pointer to new lock is %p\n",newfl);
2065
2066 retval = do_lock(newfl);
2067
2068 debuglog("Pointer to new lock is %p\n",newfl);
2069 sigunlock();
2070
2071 switch (retval)
2072 {
2073 case nlm4_granted:
2074 /* case nlm_granted: is the same as nlm4_granted */
2075 /* do_mon(lckarg->alock.caller_name); */
2076 break;
2077 case nlm4_blocked:
2078 /* case nlm_blocked: is the same as nlm4_blocked */
2079 /* do_mon(lckarg->alock.caller_name); */
2080 break;
2081 default:
2082 deallocate_file_lock(newfl);
2083 break;
2084 }
2085
2086 debuglog("Exiting getlock...\n");
2087
2088 return retval;
2089 }
2090
2091
2092 /* unlock a filehandle */
2093 enum nlm_stats
2094 unlock(nlm4_lock *lock, const int flags)
2095 {
2096 struct file_lock fl;
2097 enum nlm_stats err;
2098
2099 debuglog("Entering unlock...\n");
2100
2101 if (lock->fh.n_len != sizeof(fhandle_t)) {
2102 debuglog("received fhandle size %d, local size %d",
2103 lock->fh.n_len, (int)sizeof(fhandle_t));
2104 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2105 }
2106
2107 siglock();
2108
2109 bzero(&fl,sizeof(struct file_lock));
2110 fl.filehandle.n_len = lock->fh.n_len;
2111 fl.filehandle.n_bytes = lock->fh.n_bytes;
2112
2113 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
2114
2115 err = do_unlock(&fl);
2116
2117 sigunlock();
2118
2119 debuglog("Exiting unlock...\n");
2120
2121 return err;
2122 }
2123
2124 /* cancel a blocked lock request */
2125 enum nlm_stats
2126 cancellock(nlm4_cancargs *args, const int flags)
2127 {
2128 struct file_lock *ifl, *nfl;
2129 enum nlm_stats err;
2130
2131 debuglog("Entering cancellock...\n");
2132
2133 if (args->alock.fh.n_len != sizeof(fhandle_t)) {
2134 debuglog("received fhandle size %d, local size %d",
2135 args->alock.fh.n_len, (int)sizeof(fhandle_t));
2136 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2137 }
2138
2139 siglock();
2140
2141 err = nlm_denied;
2142
2143 /*
2144 * scan blocked lock list for matching request and remove/destroy
2145 */
2146 ifl = LIST_FIRST(&blockedlocklist_head);
2147 for ( ; ifl != NULL; ifl = nfl) {
2148 nfl = LIST_NEXT(ifl, nfslocklist);
2149
2150 /* compare lock fh - filehandle */
2151 if (!same_netobj(&args->alock.fh, &ifl->filehandle))
2152 continue;
2153
2154 /* compare lock caller_name - client_name */
2155 if (strncmp(args->alock.caller_name, ifl->client_name, SM_MAXSTRLEN))
2156 continue;
2157
2158 /* Note: done't compare cookie - client_cookie */
2159 /* The cookie may be specific to the cancel request */
2160 /* and not be the same as the one in the original lock request. */
2161
2162 /* compare lock oh - client.oh */
2163 if (!same_netobj(&args->alock.oh, &ifl->client.oh))
2164 continue;
2165
2166 /* compare lock svid - client.svid */
2167 if (args->alock.svid != ifl->client.svid)
2168 continue;
2169
2170 /* compare lock l_offset - client.l_offset */
2171 if (args->alock.l_offset != ifl->client.l_offset)
2172 continue;
2173
2174 /* compare lock l_len - client.l_len */
2175 if (args->alock.l_len != ifl->client.l_len)
2176 continue;
2177
2178 /* compare exclusive - client.exclusive */
2179 if (args->exclusive != ifl->client.exclusive)
2180 continue;
2181
2182 /* got it */
2183 remove_blockingfilelock(ifl);
2184 deallocate_file_lock(ifl);
2185 err = nlm_granted;
2186 break;
2187 }
2188
2189 sigunlock();
2190
2191 debuglog("Exiting cancellock...\n");
2192
2193 return err;
2194 }
2195
2196
2197 /*
2198 * XXX: The following monitor/unmonitor routines
2199 * have not been extensively tested (ie. no regression
2200 * script exists like for the locking sections)
2201 */
2202
2203 /*
2204 * Find a lock host on a queue. If found:
2205 * bump the ref,
2206 * bump the access time,
2207 * dequeue it from the queue it was found on,
2208 * enqueue it at the front of the "in use" queue.
2209 */
2210 struct host *
2211 get_lock_host(struct hostlst_head *hd, const char *hostname, const struct sockaddr *saddr)
2212 {
2213 struct host *ihp;
2214
2215 if (!hostname && !saddr)
2216 return (NULL);
2217
2218 debuglog("get_lock_host %s\n", hostname ? hostname : "addr");
2219 TAILQ_FOREACH(ihp, hd, hostlst) {
2220 if (hostname && (strncmp(hostname, ihp->name, SM_MAXSTRLEN) != 0))
2221 continue;
2222 if (saddr && addrcmp(saddr, &ihp->addr))
2223 continue;
2224 TAILQ_REMOVE(hd, ihp, hostlst);
2225 /*
2226 * Host is already monitored, so just bump the
2227 * reference count. But don't bump the reference
2228 * count if we're adding additional client-side
2229 * references. Client-side monitors are done by
2230 * address, are never unmonitored, and should only
2231 * take one refcount. Otherwise, repeated calls
2232 * could cause the refcount to wrap.
2233 */
2234 if (!saddr || !ihp->addr.sa_len)
2235 ++ihp->refcnt;
2236 ihp->lastuse = currsec;
2237 /* Host should only be in the monitor list once */
2238 TAILQ_INSERT_HEAD(&hostlst_head, ihp, hostlst);
2239 break;
2240 }
2241 debuglog("get_lock_host %s %s\n",
2242 ihp == NULL ? "did not find" : "found", hostname ? hostname : "addr");
2243 return (ihp);
2244 }
2245
2246 /*
2247 * monitor_lock_host: monitor lock hosts locally with a ref count and
2248 * inform statd
2249 */
2250 void
2251 monitor_lock_host_by_name(const char *hostname)
2252 {
2253 struct host *ihp;
2254
2255 debuglog("monitor_lock_host: %s\n", hostname);
2256 ihp = get_lock_host(&hostlst_head, hostname, NULL);
2257 if (ihp == NULL)
2258 ihp = get_lock_host(&hostlst_unref, hostname, NULL);
2259 if (ihp != NULL) {
2260 debuglog("Monitor_lock_host: %s (cached)\n", hostname);
2261 return;
2262 }
2263
2264 monitor_lock_host(hostname, NULL);
2265 }
2266
2267 void
2268 monitor_lock_host_by_addr(const struct sockaddr *saddr)
2269 {
2270 struct host *ihp;
2271 struct hostent *hp;
2272 char hostaddr[SM_MAXSTRLEN];
2273 struct sockaddr_in *sin = (struct sockaddr_in *)saddr;
2274
2275 if (getnameinfo(saddr, saddr->sa_len, hostaddr, sizeof(hostaddr),
2276 NULL, 0, NI_NUMERICHOST)) {
2277 debuglog("monitor_lock_host: bad address\n");
2278 return;
2279 }
2280 debuglog("monitor_lock_host: %s\n", hostaddr);
2281 ihp = get_lock_host(&hostlst_head, NULL, saddr);
2282 if (ihp == NULL)
2283 ihp = get_lock_host(&hostlst_unref, NULL, saddr);
2284 if (ihp != NULL) {
2285 debuglog("Monitor_lock_host: %s (cached)\n", ihp->name);
2286 return;
2287 }
2288
2289 hp = gethostbyaddr((char*)&sin->sin_addr, sizeof(sin->sin_addr), AF_INET);
2290 if (hp) {
2291 monitor_lock_host(hp->h_name, saddr);
2292 } else {
2293 // herror(hostaddr);
2294 monitor_lock_host(hostaddr, saddr);
2295 }
2296 }
2297
2298 static void
2299 monitor_lock_host(const char *hostname, const struct sockaddr *saddr)
2300 {
2301 struct host *nhp;
2302 struct mon smon;
2303 struct sm_stat_res sres;
2304 int rpcret, statflag;
2305 size_t n;
2306
2307 rpcret = 0;
2308 statflag = 0;
2309
2310 /* Host is not yet monitored, add it */
2311 debuglog("Monitor_lock_host: %s (creating)\n", hostname);
2312 n = strnlen(hostname, SM_MAXSTRLEN);
2313 if (n == SM_MAXSTRLEN) {
2314 debuglog("monitor_lock_host: hostname too long\n");
2315 return;
2316 }
2317 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1);
2318 if (nhp == NULL) {
2319 debuglog("Unable to allocate entry for statd mon\n");
2320 return;
2321 }
2322
2323 /* Allocated new host entry, now fill the fields */
2324 memcpy(nhp->name, hostname, n);
2325 nhp->name[n] = 0;
2326 nhp->refcnt = 1;
2327 nhp->lastuse = currsec;
2328 if (saddr) {
2329 bcopy(saddr, &nhp->addr, saddr->sa_len);
2330 } else {
2331 nhp->addr.sa_len = 0;
2332 }
2333 debuglog("Locally Monitoring host '%s'\n", hostname);
2334
2335 debuglog("Attempting to tell statd\n");
2336
2337 bzero(&smon,sizeof(smon));
2338
2339 smon.mon_id.mon_name = nhp->name;
2340 smon.mon_id.my_id.my_name = "localhost\0";
2341
2342 smon.mon_id.my_id.my_prog = NLM_PROG;
2343 smon.mon_id.my_id.my_vers = NLM_SM;
2344 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2345
2346 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon,
2347 &smon, xdr_sm_stat_res, &sres);
2348
2349 if (rpcret == 0) {
2350 if (sres.res_stat == stat_fail) {
2351 debuglog("Statd call failed\n");
2352 statflag = 0;
2353 } else {
2354 statflag = 1;
2355 }
2356 } else {
2357 debuglog("Rpc call to statd failed with return value: %d\n",
2358 rpcret);
2359 statflag = 0;
2360 }
2361
2362 if (statflag == 1) {
2363 TAILQ_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2364 } else {
2365 free(nhp);
2366 }
2367 }
2368
2369 /*
2370 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2371 */
2372 void
2373 unmonitor_lock_host(const char *hostname)
2374 {
2375 struct host *ihp;
2376
2377 TAILQ_FOREACH(ihp, &hostlst_head, hostlst) {
2378 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2379 /* Host is unmonitored, drop refcount */
2380 --ihp->refcnt;
2381 /* Host should only be in the monitor list once */
2382 break;
2383 }
2384 }
2385
2386 if (ihp == NULL) {
2387 debuglog("Could not find host %16s in mon list\n", hostname);
2388 return;
2389 }
2390
2391 if (ihp->refcnt > 0)
2392 return;
2393
2394 if (ihp->refcnt < 0) {
2395 debuglog("Negative refcount!: %d\n", ihp->refcnt);
2396 }
2397
2398 TAILQ_REMOVE(&hostlst_head, ihp, hostlst);
2399 TAILQ_INSERT_HEAD(&hostlst_unref, ihp, hostlst);
2400 if (host_expire <= 0)
2401 destroy_lock_host(ihp);
2402 }
2403
2404 void
2405 destroy_lock_host(struct host *ihp)
2406 {
2407 struct mon_id smon_id;
2408 struct sm_stat smstat;
2409 int rpcret;
2410
2411 debuglog("Attempting to unmonitor host %16s\n", ihp->name);
2412
2413 bzero(&smon_id,sizeof(smon_id));
2414
2415 smon_id.mon_name = (char *)ihp->name;
2416 smon_id.my_id.my_name = "localhost";
2417 smon_id.my_id.my_prog = NLM_PROG;
2418 smon_id.my_id.my_vers = NLM_SM;
2419 smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2420
2421 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon_id,
2422 &smon_id, xdr_sm_stat, &smstat);
2423
2424 if (rpcret != 0) {
2425 debuglog("Rpc call to unmonitor statd failed with "
2426 " return value: %d: %s", rpcret, clnt_sperrno(rpcret));
2427 } else {
2428 debuglog("Succeeded unmonitoring %16s\n", ihp->name);
2429 }
2430
2431 TAILQ_REMOVE(&hostlst_unref, ihp, hostlst);
2432 free(ihp);
2433 }
2434
2435 /*
2436 * returns 1 if there are hosts to expire or 0 if there are none.
2437 */
2438 int
2439 expire_lock_hosts(void)
2440 {
2441 struct host *ihp;
2442
2443 debuglog("expire_lock_hosts: called\n");
2444 for ( ;; ) {
2445 ihp = TAILQ_LAST(&hostlst_unref, hostlst_head);
2446 if (ihp == NULL)
2447 break;
2448 if (host_expire > 0 && ihp->lastuse >= currsec - host_expire)
2449 break;
2450 debuglog("expire_lock_hosts: expiring %s %d %d %d\n",
2451 ihp->name, (int)ihp->lastuse,
2452 (int)currsec, (int)currsec - host_expire);
2453 destroy_lock_host(ihp);
2454 }
2455 return (TAILQ_LAST(&hostlst_unref, hostlst_head) != NULL);
2456 }
2457
2458 /*
2459 * notify: Clear all locks from a host if statd complains
2460 *
2461 * XXX: This routine has not been thoroughly tested. However, neither
2462 * had the old one been. It used to compare the statd crash state counter
2463 * to the current lock state. The upshot of this was that it basically
2464 * cleared all locks from the specified host 99% of the time (with the
2465 * other 1% being a bug). Consequently, the assumption is that clearing
2466 * all locks from a host when notified by statd is acceptable.
2467 *
2468 * Please note that this routine skips the usual level of redirection
2469 * through a do_* type routine. This introduces a possible level of
2470 * error and might better be written as do_notify and take this one out.
2471
2472 */
2473
2474 void
2475 notify(const char *hostname, const int state)
2476 {
2477 debuglog("notify from %s, new state %d", hostname, state);
2478
2479 siglock();
2480 do_clear(hostname);
2481 sigunlock();
2482
2483 debuglog("Leaving notify\n");
2484 }
2485
2486 int
2487 send_granted(fl, opcode)
2488 struct file_lock *fl;
2489 int opcode __unused;
2490 {
2491 CLIENT *cli;
2492 static char dummy;
2493 struct timeval timeo;
2494 enum clnt_stat rv;
2495 static struct nlm_res retval;
2496 static struct nlm4_res retval4;
2497
2498 debuglog("About to send granted on blocked lock\n");
2499
2500 cli = get_client(fl->addr,
2501 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2502 if (cli == NULL) {
2503 syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2504 fl->client_name);
2505 /*
2506 * We fail to notify remote that the lock has been granted.
2507 * The client will timeout and retry, the lock will be
2508 * granted at this time.
2509 */
2510 return -1;
2511 }
2512 timeo.tv_sec = 0;
2513 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2514
2515 fl->granted_cookie = ++send_granted_cookie;
2516 if (!send_granted_cookie)
2517 send_granted_cookie++;
2518
2519 if (fl->flags & LOCK_V4) {
2520 static nlm4_testargs res;
2521 res.cookie.n_len = sizeof(fl->granted_cookie);
2522 res.cookie.n_bytes = (char*)&fl->granted_cookie;
2523 res.exclusive = fl->client.exclusive;
2524 res.alock.caller_name = fl->client_name;
2525 res.alock.fh.n_len = fl->filehandle.n_len;
2526 res.alock.fh.n_bytes = fl->filehandle.n_bytes;
2527 res.alock.oh = fl->client.oh;
2528 res.alock.svid = fl->client.svid;
2529 res.alock.l_offset = fl->client.l_offset;
2530 res.alock.l_len = fl->client.l_len;
2531 debuglog("sending v4 reply%s",
2532 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2533 if (fl->flags & LOCK_ASYNC) {
2534 rv = clnt_call(cli, NLM4_GRANTED_MSG,
2535 xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo);
2536 } else {
2537 rv = clnt_call(cli, NLM4_GRANTED,
2538 xdr_nlm4_testargs, &res, xdr_nlm4_res,
2539 &retval4, timeo);
2540 }
2541 } else {
2542 static nlm_testargs res;
2543
2544 res.cookie.n_len = sizeof(fl->granted_cookie);
2545 res.cookie.n_bytes = (char*)&fl->granted_cookie;
2546 res.exclusive = fl->client.exclusive;
2547 res.alock.caller_name = fl->client_name;
2548 res.alock.fh.n_len = fl->filehandle.n_len;
2549 res.alock.fh.n_bytes = fl->filehandle.n_bytes;
2550 res.alock.oh = fl->client.oh;
2551 res.alock.svid = fl->client.svid;
2552 res.alock.l_offset = fl->client.l_offset;
2553 res.alock.l_len = fl->client.l_len;
2554 debuglog("sending v1 reply%s",
2555 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2556 if (fl->flags & LOCK_ASYNC) {
2557 rv = clnt_call(cli, NLM_GRANTED_MSG,
2558 xdr_nlm_testargs, &res, xdr_void, &dummy, timeo);
2559 } else {
2560 rv = clnt_call(cli, NLM_GRANTED,
2561 xdr_nlm_testargs, &res, xdr_nlm_res,
2562 &retval, timeo);
2563 }
2564 }
2565 if (debug_level > 2)
2566 debuglog("clnt_call returns %d(%s) for granted",
2567 rv, clnt_sperrno(rv));
2568
2569 if ((rv != RPC_SUCCESS) &&
2570 !((fl->flags & LOCK_ASYNC) && (rv == RPC_TIMEDOUT)))
2571 return -1;
2572 return 0;
2573 }
2574
2575 /*
2576 * granted_failed: remove a granted lock that wasn't successfully
2577 * accepted by the client
2578 */
2579 void
2580 granted_failed(nlm4_res *arg)
2581 {
2582 u_int64_t cookie;
2583 struct file_lock *ifl;
2584
2585 debuglog("Entering granted_failed, status %d\n", arg->stat.stat);
2586
2587 if (arg->cookie.n_len != sizeof(cookie)) {
2588 debuglog("Exiting granted_failed: bogus cookie size %d\n",
2589 arg->cookie.n_len);
2590 return;
2591 }
2592 bcopy(arg->cookie.n_bytes, &cookie, sizeof(cookie));
2593 debuglog("granted_failed, cookie 0x%llx\n", cookie);
2594
2595 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
2596 debuglog("Pointer to file lock: %p\n",ifl);
2597
2598 debuglog("****Dump of ifl****\n");
2599 dump_filelock(ifl);
2600
2601 if (ifl->granted_cookie != cookie)
2602 continue;
2603
2604 debuglog("granted_failed: cookie found\n");
2605 break;
2606 }
2607
2608 if (ifl) {
2609 do_unlock(ifl);
2610 /* ifl is NO LONGER VALID AT THIS POINT */
2611 } else {
2612 debuglog("granted_failed: cookie NOT FOUND\n");
2613 }
2614
2615 debuglog("Exiting granted_failed\n");
2616 }
2617
2618 /*
2619 * getshare: try to acquire a share reservation
2620 */
2621 enum nlm_stats
2622 getshare(nlm_shareargs *shrarg, struct svc_req *rqstp, const int flags)
2623 {
2624 struct sharefile *shrfile;
2625 struct file_share *sh;
2626 size_t n;
2627
2628 debuglog("Entering getshare...\n");
2629
2630 if (grace_expired == 0 && shrarg->reclaim == 0) {
2631 debuglog("getshare denied - grace period\n");
2632 return (flags & LOCK_V4) ?
2633 nlm4_denied_grace_period :
2634 nlm_denied_grace_period;
2635 }
2636
2637 if (shrarg->share.fh.n_len != sizeof(fhandle_t)) {
2638 debuglog("received fhandle size %d, local size %d",
2639 shrarg->share.fh.n_len, (int)sizeof(fhandle_t));
2640 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2641 }
2642
2643 /* find file in list of share files */
2644 LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) {
2645 if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) &&
2646 (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2647 shrarg->share.fh.n_len) == 0)) {
2648 /* shrfile is the correct file */
2649 break;
2650 }
2651 }
2652
2653 /* if share file not found, create a new share file */
2654 if (!shrfile) {
2655 int fd;
2656 fd = fhopen((fhandle_t *)shrarg->share.fh.n_bytes, O_RDONLY);
2657 if (fd < 0) {
2658 debuglog("fhopen failed (from %16s): %32s\n",
2659 shrarg->share.caller_name, strerror(errno));
2660 if ((flags & LOCK_V4) == 0)
2661 return nlm_denied;
2662 switch (errno) {
2663 case ESTALE:
2664 return nlm4_stale_fh;
2665 default:
2666 return nlm4_failed;
2667 }
2668 }
2669 shrfile = malloc(sizeof(struct sharefile));
2670 if (!shrfile) {
2671 debuglog("getshare failed: can't allocate sharefile\n");
2672 close(fd);
2673 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2674 }
2675 shrfile->filehandle.n_len = shrarg->share.fh.n_len;
2676 shrfile->filehandle.n_bytes = malloc(shrarg->share.fh.n_len);
2677 if (!shrfile->filehandle.n_bytes) {
2678 debuglog("getshare failed: can't allocate sharefile filehandle\n");
2679 free(shrfile);
2680 close(fd);
2681 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2682 }
2683 bcopy(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2684 shrarg->share.fh.n_len);
2685 shrfile->fd = fd;
2686 shrfile->refcount = 0;
2687 shrfile->sharelist_head.lh_first = NULL;
2688 LIST_INSERT_HEAD(&nfssharefilelist_head, shrfile, sharefilelist);
2689 }
2690
2691 /* compare request mode/access to current shares */
2692 LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) {
2693 /* if request host/owner matches a current share... */
2694 if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) &&
2695 same_netobj(&shrarg->share.oh, &sh->oh)) {
2696 /* ...then just update share mode/access */
2697 sh->mode = shrarg->share.mode;
2698 sh->access = shrarg->share.access;
2699 debuglog("getshare: updated existing share\n");
2700 return nlm_granted;
2701 }
2702 if (((shrarg->share.mode & sh->access) != 0) ||
2703 ((shrarg->share.access & sh->mode) != 0)) {
2704 /* share request conflicts with existing share */
2705 debuglog("getshare: conflicts with existing share\n");
2706 return nlm_denied;
2707 }
2708 }
2709
2710 /* create/init new share */
2711 n = strnlen(shrarg->share.caller_name, SM_MAXSTRLEN);
2712 if (n < SM_MAXSTRLEN) {
2713 sh = malloc(sizeof(*sh) - sizeof(sh->client_name) + n + 1);
2714 } else {
2715 debuglog("getshare failed: hostname too long\n");
2716 sh = NULL;
2717 }
2718 if (!sh) {
2719 debuglog("getshare failed: can't allocate share\n");
2720 if (!shrfile->refcount) {
2721 LIST_REMOVE(shrfile, sharefilelist);
2722 close(shrfile->fd);
2723 free(shrfile->filehandle.n_bytes);
2724 free(shrfile);
2725 }
2726 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2727 }
2728 bzero(sh, sizeof(*sh) - sizeof(sh->client_name));
2729 sh->oh.n_len = shrarg->share.oh.n_len;
2730 sh->oh.n_bytes = malloc(sh->oh.n_len);
2731 if (!sh->oh.n_bytes) {
2732 debuglog("getshare failed: can't allocate share owner handle\n");
2733 free(sh);
2734 if (!shrfile->refcount) {
2735 LIST_REMOVE(shrfile, sharefilelist);
2736 close(shrfile->fd);
2737 free(shrfile->filehandle.n_bytes);
2738 free(shrfile);
2739 }
2740 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2741 }
2742 memcpy(sh->client_name, shrarg->share.caller_name, n);
2743 sh->client_name[n] = 0;
2744 sh->mode = shrarg->share.mode;
2745 sh->access = shrarg->share.access;
2746
2747 /* insert new share into file's share list */
2748 LIST_INSERT_HEAD(&shrfile->sharelist_head, sh, nfssharelist);
2749 shrfile->refcount++;
2750
2751 debuglog("Exiting getshare...\n");
2752
2753 return nlm_granted;
2754 }
2755
2756
2757 /* remove a share reservation */
2758 enum nlm_stats
2759 unshare(nlm_shareargs *shrarg, struct svc_req *rqstp, const int flags)
2760 {
2761 struct sharefile *shrfile;
2762 struct file_share *sh;
2763
2764 debuglog("Entering unshare...\n");
2765
2766 if (shrarg->share.fh.n_len != sizeof(fhandle_t)) {
2767 debuglog("received fhandle size %d, local size %d",
2768 shrarg->share.fh.n_len, (int)sizeof(fhandle_t));
2769 return (flags & LOCK_V4) ? nlm4_failed : nlm_denied;
2770 }
2771
2772 /* find file in list of share files */
2773 LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) {
2774 if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) &&
2775 (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes,
2776 shrarg->share.fh.n_len) == 0)) {
2777 /* shrfile is the correct file */
2778 break;
2779 }
2780 }
2781
2782 /* if share file not found, return success (per spec) */
2783 if (!shrfile) {
2784 debuglog("unshare: no such share file\n");
2785 return nlm_granted;
2786 }
2787
2788 /* find share */
2789 LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) {
2790 /* if request host/owner matches a current share... */
2791 if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) &&
2792 same_netobj(&shrarg->share.oh, &sh->oh))
2793 break;
2794 }
2795
2796 /* if share not found, return success (per spec) */
2797 if (!sh) {
2798 debuglog("unshare: no such share\n");
2799 return nlm_granted;
2800 }
2801
2802 /* remove share from file and deallocate */
2803 shrfile->refcount--;
2804 LIST_REMOVE(sh, nfssharelist);
2805 free(sh->oh.n_bytes);
2806 free(sh);
2807
2808 /* if file has no more shares, deallocate share file */
2809 if (!shrfile->refcount) {
2810 debuglog("unshare: file has no more shares\n");
2811 LIST_REMOVE(shrfile, sharefilelist);
2812 close(shrfile->fd);
2813 free(shrfile->filehandle.n_bytes);
2814 free(shrfile);
2815 }
2816
2817 debuglog("Exiting unshare...\n");
2818
2819 return nlm_granted;
2820 }
2821
2822 /*
2823 * do_free_all
2824 *
2825 * Wipe out all non-monitored locks and shares held by a host.
2826 */
2827
2828 void
2829 do_free_all(const char *hostname)
2830 {
2831 struct file_lock *ifl, *nfl;
2832 struct sharefile *shrfile, *nshrfile;
2833 struct file_share *ifs, *nfs;
2834
2835 /* clear non-monitored blocking file locks */
2836 ifl = LIST_FIRST(&blockedlocklist_head);
2837 while (ifl != NULL) {
2838 nfl = LIST_NEXT(ifl, nfslocklist);
2839
2840 if (((ifl->flags & LOCK_MON) == 0) &&
2841 (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) {
2842 remove_blockingfilelock(ifl);
2843 deallocate_file_lock(ifl);
2844 }
2845
2846 ifl = nfl;
2847 }
2848
2849 /* clear non-monitored file locks */
2850 ifl = LIST_FIRST(&nfslocklist_head);
2851 while (ifl != NULL) {
2852 nfl = LIST_NEXT(ifl, nfslocklist);
2853
2854 if (((ifl->flags & LOCK_MON) == 0) &&
2855 (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) {
2856 /* Unlock destroys ifl out from underneath */
2857 unlock_partialfilelock(ifl);
2858 /* ifl is NO LONGER VALID AT THIS POINT */
2859 }
2860
2861 ifl = nfl;
2862 }
2863
2864 /* clear shares */
2865 shrfile = LIST_FIRST(&nfssharefilelist_head);
2866 while (shrfile != NULL) {
2867 nshrfile = LIST_NEXT(shrfile, sharefilelist);
2868
2869 ifs = LIST_FIRST(&shrfile->sharelist_head);
2870 while (ifs != NULL) {
2871 nfs = LIST_NEXT(ifs, nfssharelist);
2872
2873 if (strncmp(hostname, ifs->client_name, SM_MAXSTRLEN) == 0) {
2874 shrfile->refcount--;
2875 LIST_REMOVE(ifs, nfssharelist);
2876 free(ifs->oh.n_bytes);
2877 free(ifs);
2878 }
2879
2880 ifs = nfs;
2881 }
2882
2883 if (!shrfile->refcount) {
2884 LIST_REMOVE(shrfile, sharefilelist);
2885 close(shrfile->fd);
2886 free(shrfile->filehandle.n_bytes);
2887 free(shrfile);
2888 }
2889
2890 shrfile = nshrfile;
2891 }
2892
2893 }
2894
2895
2896
2897 /*
2898 * Routines below here have not been modified in the overhaul
2899 */
2900
2901 /*
2902 * Are these two routines still required since lockd is not spawning off
2903 * children to service locks anymore? Presumably they were originally
2904 * put in place to prevent a one child from changing the lock list out
2905 * from under another one.
2906 */
2907
2908 void
2909 siglock(void)
2910 {
2911 sigset_t block;
2912
2913 sigemptyset(&block);
2914 sigaddset(&block, SIGCHLD);
2915
2916 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2917 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2918 }
2919 }
2920
2921 void
2922 sigunlock(void)
2923 {
2924 sigset_t block;
2925
2926 sigemptyset(&block);
2927 sigaddset(&block, SIGCHLD);
2928
2929 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2930 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2931 }
2932 }
2933
2934