]>
Commit | Line | Data |
---|---|---|
ac2f15b3 A |
1 | /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ |
2 | /* $FreeBSD: src/usr.sbin/rpc.lockd/lockd_lock.c,v 1.10 2002/03/22 19:57:09 alfred Exp $ */ | |
3 | ||
4 | /* | |
5 | * Copyright (c) 2001 Andrew P. Lentvorski, Jr. | |
6 | * Copyright (c) 2000 Manuel Bouyer. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions | |
10 | * are met: | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. All advertising materials mentioning features or use of this software | |
17 | * must display the following acknowledgement: | |
18 | * This product includes software developed by the University of | |
19 | * California, Berkeley and its contributors. | |
20 | * 4. Neither the name of the University nor the names of its contributors | |
21 | * may be used to endorse or promote products derived from this software | |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | */ | |
37 | ||
38 | #define LOCKD_DEBUG | |
39 | ||
40 | #include <stdio.h> | |
41 | #ifdef LOCKD_DEBUG | |
42 | #include <stdarg.h> | |
43 | #endif | |
44 | #include <stdlib.h> | |
45 | #include <unistd.h> | |
46 | #include <fcntl.h> | |
47 | #include <syslog.h> | |
48 | #include <errno.h> | |
49 | #include <string.h> | |
50 | #include <signal.h> | |
51 | #include <rpc/rpc.h> | |
52 | #include <sys/types.h> | |
53 | #include <sys/stat.h> | |
54 | #include <sys/socket.h> | |
55 | #include <sys/param.h> | |
56 | #include <sys/mount.h> | |
57 | #include <sys/wait.h> | |
58 | #include <rpcsvc/sm_inter.h> | |
59 | #include <rpcsvc/nlm_prot.h> | |
60 | ||
61 | #include "lockd.h" | |
62 | #include "lockd_lock.h" | |
63 | ||
64 | #define MAXOBJECTSIZE 64 | |
65 | #define MAXBUFFERSIZE 1024 | |
66 | ||
67 | /* | |
68 | * SM_MAXSTRLEN is usually 1024. This means that lock requests and | |
69 | * host name monitoring entries are *MUCH* larger than they should be | |
70 | */ | |
71 | ||
72 | /* | |
73 | * A set of utilities for managing file locking | |
74 | * | |
75 | * XXX: All locks are in a linked list, a better structure should be used | |
76 | * to improve search/access effeciency. | |
77 | */ | |
78 | ||
79 | /* struct describing a lock */ | |
80 | struct file_lock { | |
81 | LIST_ENTRY(file_lock) nfslocklist; | |
82 | netobj filehandle; /* NFS filehandle */ | |
83 | struct sockaddr *addr; | |
84 | struct nlm4_holder client; /* lock holder */ | |
85 | /* XXX: client_cookie used *only* in send_granted */ | |
86 | netobj client_cookie; /* cookie sent by the client */ | |
87 | char client_name[SM_MAXSTRLEN]; | |
88 | int nsm_status; /* status from the remote lock manager */ | |
89 | int status; /* lock status, see below */ | |
90 | int flags; /* lock flags, see lockd_lock.h */ | |
91 | int blocking; /* blocking lock or not */ | |
92 | pid_t locker; /* pid of the child process trying to get the lock */ | |
93 | int fd; /* file descriptor for this lock */ | |
94 | }; | |
95 | ||
96 | LIST_HEAD(nfslocklist_head, file_lock); | |
97 | struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); | |
98 | ||
99 | LIST_HEAD(blockedlocklist_head, file_lock); | |
100 | struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); | |
101 | ||
102 | /* struct describing a share reservation */ | |
103 | struct file_share { | |
104 | LIST_ENTRY(file_share) nfssharelist; | |
105 | netobj oh; /* share holder */ | |
106 | char client_name[SM_MAXSTRLEN]; | |
107 | short mode; | |
108 | short access; | |
109 | }; | |
110 | LIST_HEAD(nfssharelist_head, file_share); | |
111 | ||
112 | /* Struct describing a file with share reservations */ | |
113 | struct sharefile { | |
114 | LIST_ENTRY(sharefile) sharefilelist; | |
115 | netobj filehandle; /* Local access filehandle */ | |
116 | int fd; /* file descriptor: remains open until no more shares */ | |
117 | int refcount; | |
118 | struct nfssharelist_head sharelist_head; | |
119 | }; | |
120 | LIST_HEAD(nfssharefilelist_head, sharefile); | |
121 | struct nfssharefilelist_head nfssharefilelist_head = LIST_HEAD_INITIALIZER(nfssharefilelist_head); | |
122 | ||
123 | /* lock status */ | |
124 | #define LKST_LOCKED 1 /* lock is locked */ | |
125 | /* XXX: Is this flag file specific or lock specific? */ | |
126 | #define LKST_WAITING 2 /* file is already locked by another host */ | |
127 | #define LKST_PROCESSING 3 /* child is trying to aquire the lock */ | |
128 | #define LKST_DYING 4 /* must dies when we get news from the child */ | |
129 | ||
130 | /* struct describing a monitored host */ | |
131 | struct host { | |
132 | LIST_ENTRY(host) hostlst; | |
133 | char name[SM_MAXSTRLEN]; | |
134 | int refcnt; | |
135 | }; | |
136 | /* list of hosts we monitor */ | |
137 | LIST_HEAD(hostlst_head, host); | |
138 | struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); | |
139 | ||
140 | /* | |
141 | * File monitoring handlers | |
142 | * XXX: These might be able to be removed when kevent support | |
143 | * is placed into the hardware lock/unlock routines. (ie. | |
144 | * let the kernel do all the file monitoring) | |
145 | */ | |
146 | ||
147 | /* Struct describing a monitored file */ | |
148 | struct monfile { | |
149 | LIST_ENTRY(monfile) monfilelist; | |
150 | netobj filehandle; /* Local access filehandle */ | |
151 | int fd; /* file descriptor: remains open until unlock! */ | |
152 | int refcount; | |
153 | int exclusive; | |
154 | }; | |
155 | ||
156 | /* List of files we monitor */ | |
157 | LIST_HEAD(monfilelist_head, monfile); | |
158 | struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); | |
159 | ||
160 | static int debugdelay = 0; | |
161 | ||
162 | enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, | |
163 | NFS_DENIED, NFS_DENIED_NOLOCK, | |
164 | NFS_RESERR }; | |
165 | ||
166 | enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, | |
167 | HW_DENIED, HW_DENIED_NOLOCK, | |
168 | HW_STALEFH, HW_READONLY, HW_RESERR }; | |
169 | ||
170 | enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, | |
171 | PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, | |
172 | PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; | |
173 | ||
174 | enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; | |
175 | enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; | |
176 | /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ | |
177 | enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; | |
178 | ||
179 | enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); | |
180 | ||
181 | void send_granted(struct file_lock *fl, int opcode); | |
182 | void siglock(void); | |
183 | void sigunlock(void); | |
184 | void monitor_lock_host(const char *hostname); | |
185 | void unmonitor_lock_host(const char *hostname); | |
186 | ||
187 | void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, | |
188 | const bool_t exclusive, struct nlm4_holder *dest); | |
189 | struct file_lock * allocate_file_lock(const netobj *lockowner, | |
190 | const netobj *matchcookie, const netobj *filehandle); | |
191 | void deallocate_file_lock(struct file_lock *fl); | |
192 | void fill_file_lock(struct file_lock *fl, | |
193 | struct sockaddr *addr, const bool_t exclusive, const int32_t svid, | |
194 | const u_int64_t offset, const u_int64_t len, const char *caller_name, | |
195 | const int state, const int status, const int flags, const int blocking); | |
196 | int regions_overlap(const u_int64_t start1, const u_int64_t len1, | |
197 | const u_int64_t start2, const u_int64_t len2);; | |
198 | enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, | |
199 | const u_int64_t startu, const u_int64_t lenu, | |
200 | u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); | |
201 | int same_netobj(const netobj *n0, const netobj *n1); | |
202 | int same_filelock_identity(const struct file_lock *fl0, | |
203 | const struct file_lock *fl2); | |
204 | ||
205 | static void debuglog(char const *fmt, ...); | |
206 | void dump_static_object(const unsigned char* object, const int sizeof_object, | |
207 | unsigned char* hbuff, const int sizeof_hbuff, | |
208 | unsigned char* cbuff, const int sizeof_cbuff); | |
209 | void dump_netobj(const struct netobj *nobj); | |
210 | void dump_filelock(const struct file_lock *fl); | |
211 | struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); | |
212 | enum nfslock_status test_nfslock(const struct file_lock *fl, | |
213 | struct file_lock **conflicting_fl); | |
214 | enum nfslock_status lock_nfslock(struct file_lock *fl); | |
215 | enum nfslock_status delete_nfslock(struct file_lock *fl); | |
216 | enum nfslock_status unlock_nfslock(const struct file_lock *fl, | |
217 | struct file_lock **released_lock, struct file_lock **left_lock, | |
218 | struct file_lock **right_lock); | |
219 | enum hwlock_status lock_hwlock(struct file_lock *fl); | |
220 | enum split_status split_nfslock(const struct file_lock *exist_lock, | |
221 | const struct file_lock *unlock_lock, struct file_lock **left_lock, | |
222 | struct file_lock **right_lock); | |
223 | void add_blockingfilelock(struct file_lock *fl); | |
224 | enum hwlock_status unlock_hwlock(const struct file_lock *fl); | |
225 | enum hwlock_status test_hwlock(const struct file_lock *fl, | |
226 | struct file_lock **conflicting_fl); | |
227 | void remove_blockingfilelock(struct file_lock *fl); | |
228 | void clear_blockingfilelock(const char *hostname); | |
229 | void retry_blockingfilelocklist(void); | |
230 | enum partialfilelock_status unlock_partialfilelock( | |
231 | const struct file_lock *fl); | |
232 | void clear_partialfilelock(const char *hostname); | |
233 | enum partialfilelock_status test_partialfilelock( | |
234 | const struct file_lock *fl, struct file_lock **conflicting_fl); | |
235 | enum nlm_stats do_test(struct file_lock *fl, | |
236 | struct file_lock **conflicting_fl); | |
237 | enum nlm_stats do_unlock(struct file_lock *fl); | |
238 | enum nlm_stats do_lock(struct file_lock *fl); | |
239 | void do_clear(const char *hostname); | |
240 | ||
241 | ||
242 | void | |
243 | debuglog(char const *fmt, ...) | |
244 | { | |
245 | va_list ap; | |
246 | ||
247 | if (debug_level < 1) { | |
248 | return; | |
249 | } | |
250 | ||
251 | sleep(debugdelay); | |
252 | ||
253 | va_start(ap, fmt); | |
254 | vsyslog(LOG_DEBUG, fmt, ap); | |
255 | va_end(ap); | |
256 | } | |
257 | ||
258 | void | |
259 | dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) | |
260 | const unsigned char *object; | |
261 | const int size_object; | |
262 | unsigned char *hbuff; | |
263 | const int size_hbuff; | |
264 | unsigned char *cbuff; | |
265 | const int size_cbuff; | |
266 | { | |
267 | int i, objectsize; | |
268 | ||
269 | if (debug_level < 2) { | |
270 | return; | |
271 | } | |
272 | ||
273 | objectsize = size_object; | |
274 | ||
275 | if (objectsize == 0) { | |
276 | debuglog("object is size 0\n"); | |
277 | } else { | |
278 | if (objectsize > MAXOBJECTSIZE) { | |
279 | debuglog("Object of size %d being clamped" | |
280 | "to size %d\n", objectsize, MAXOBJECTSIZE); | |
281 | objectsize = MAXOBJECTSIZE; | |
282 | } | |
283 | ||
284 | if (hbuff != NULL) { | |
285 | if (size_hbuff < objectsize*2+1) { | |
286 | debuglog("Hbuff not large enough." | |
287 | " Increase size\n"); | |
288 | } else { | |
289 | for(i=0;i<objectsize;i++) { | |
290 | sprintf(hbuff+i*2,"%02x",*(object+i)); | |
291 | } | |
292 | *(hbuff+i*2) = '\0'; | |
293 | } | |
294 | } | |
295 | ||
296 | if (cbuff != NULL) { | |
297 | if (size_cbuff < objectsize+1) { | |
298 | debuglog("Cbuff not large enough." | |
299 | " Increase Size\n"); | |
300 | } | |
301 | ||
302 | for(i=0;i<objectsize;i++) { | |
303 | if (*(object+i) >= 32 && *(object+i) <= 127) { | |
304 | *(cbuff+i) = *(object+i); | |
305 | } else { | |
306 | *(cbuff+i) = '.'; | |
307 | } | |
308 | } | |
309 | *(cbuff+i) = '\0'; | |
310 | } | |
311 | } | |
312 | } | |
313 | ||
314 | void | |
315 | dump_netobj(const struct netobj *nobj) | |
316 | { | |
317 | char hbuff[MAXBUFFERSIZE*2]; | |
318 | char cbuff[MAXBUFFERSIZE]; | |
319 | ||
320 | if (debug_level < 2) { | |
321 | return; | |
322 | } | |
323 | ||
324 | if (nobj == NULL) { | |
325 | debuglog("Null netobj pointer\n"); | |
326 | } | |
327 | else if (nobj->n_len == 0) { | |
328 | debuglog("Size zero netobj\n"); | |
329 | } else { | |
330 | dump_static_object(nobj->n_bytes, nobj->n_len, | |
331 | hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); | |
332 | debuglog("netobj: len: %d data: %s ::: %s\n", | |
333 | nobj->n_len, hbuff, cbuff); | |
334 | } | |
335 | } | |
336 | ||
337 | /* #define DUMP_FILELOCK_VERBOSE */ | |
338 | void | |
339 | dump_filelock(const struct file_lock *fl) | |
340 | { | |
341 | #ifdef DUMP_FILELOCK_VERBOSE | |
342 | char hbuff[MAXBUFFERSIZE*2]; | |
343 | char cbuff[MAXBUFFERSIZE]; | |
344 | #endif | |
345 | ||
346 | if (debug_level < 2) { | |
347 | return; | |
348 | } | |
349 | ||
350 | if (fl != NULL) { | |
351 | debuglog("Dumping file lock structure @ %p\n", fl); | |
352 | ||
353 | #ifdef DUMP_FILELOCK_VERBOSE | |
354 | dump_static_object((unsigned char *)&fl->filehandle.n_bytes, | |
355 | fl->filehandle.n_len, hbuff, sizeof(hbuff), | |
356 | cbuff, sizeof(cbuff)); | |
357 | debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); | |
358 | #endif | |
359 | ||
360 | debuglog("Dumping nlm4_holder:\n" | |
361 | "exc: %x svid: %x offset:len %llx:%llx\n", | |
362 | fl->client.exclusive, fl->client.svid, | |
363 | fl->client.l_offset, fl->client.l_len); | |
364 | ||
365 | #ifdef DUMP_FILELOCK_VERBOSE | |
366 | debuglog("Dumping client identity:\n"); | |
367 | dump_netobj(&fl->client.oh); | |
368 | ||
369 | debuglog("Dumping client cookie:\n"); | |
370 | dump_netobj(&fl->client_cookie); | |
371 | ||
372 | debuglog("nsm: %d status: %d flags: %d locker: %d" | |
373 | " fd: %d\n", fl->nsm_status, fl->status, | |
374 | fl->flags, fl->locker, fl->fd); | |
375 | #endif | |
376 | } else { | |
377 | debuglog("NULL file lock structure\n"); | |
378 | } | |
379 | } | |
380 | ||
381 | void | |
382 | copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) | |
383 | const struct nlm4_lock *src; | |
384 | const bool_t exclusive; | |
385 | struct nlm4_holder *dest; | |
386 | { | |
387 | ||
388 | dest->exclusive = exclusive; | |
389 | dest->oh.n_len = src->oh.n_len; | |
390 | dest->oh.n_bytes = src->oh.n_bytes; | |
391 | dest->svid = src->svid; | |
392 | dest->l_offset = src->l_offset; | |
393 | dest->l_len = src->l_len; | |
394 | } | |
395 | ||
396 | ||
397 | /* | |
398 | * allocate_file_lock: Create a lock with the given parameters | |
399 | */ | |
400 | ||
401 | struct file_lock * | |
402 | allocate_file_lock(const netobj *lockowner, const netobj *matchcookie, const netobj *filehandle) | |
403 | { | |
404 | struct file_lock *newfl; | |
405 | ||
406 | newfl = malloc(sizeof(struct file_lock)); | |
407 | if (newfl == NULL) { | |
408 | return NULL; | |
409 | } | |
410 | bzero(newfl, sizeof(newfl)); | |
411 | ||
412 | newfl->client.oh.n_bytes = malloc(lockowner->n_len); | |
413 | if (newfl->client.oh.n_bytes == NULL) { | |
414 | free(newfl); | |
415 | return NULL; | |
416 | } | |
417 | newfl->client.oh.n_len = lockowner->n_len; | |
418 | bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); | |
419 | ||
420 | newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); | |
421 | if (newfl->client_cookie.n_bytes == NULL) { | |
422 | free(newfl->client.oh.n_bytes); | |
423 | free(newfl); | |
424 | return NULL; | |
425 | } | |
426 | newfl->client_cookie.n_len = matchcookie->n_len; | |
427 | bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); | |
428 | ||
429 | newfl->filehandle.n_bytes = malloc(filehandle->n_len); | |
430 | if (newfl->filehandle.n_bytes == NULL) { | |
431 | free(newfl->client_cookie.n_bytes); | |
432 | free(newfl->client.oh.n_bytes); | |
433 | free(newfl); | |
434 | return NULL; | |
435 | } | |
436 | newfl->filehandle.n_len = filehandle->n_len; | |
437 | bcopy(filehandle->n_bytes, newfl->filehandle.n_bytes, filehandle->n_len); | |
438 | ||
439 | return newfl; | |
440 | } | |
441 | ||
442 | /* | |
443 | * file_file_lock: Force creation of a valid file lock | |
444 | */ | |
445 | void | |
446 | fill_file_lock(struct file_lock *fl, | |
447 | struct sockaddr *addr, const bool_t exclusive, const int32_t svid, | |
448 | const u_int64_t offset, const u_int64_t len, const char *caller_name, | |
449 | const int state, const int status, const int flags, const int blocking) | |
450 | { | |
451 | fl->addr = addr; | |
452 | ||
453 | fl->client.exclusive = exclusive; | |
454 | fl->client.svid = svid; | |
455 | fl->client.l_offset = offset; | |
456 | fl->client.l_len = len; | |
457 | ||
458 | strncpy(fl->client_name, caller_name, SM_MAXSTRLEN); | |
459 | ||
460 | fl->nsm_status = state; | |
461 | fl->status = status; | |
462 | fl->flags = flags; | |
463 | fl->blocking = blocking; | |
464 | } | |
465 | ||
466 | /* | |
467 | * deallocate_file_lock: Free all storage associated with a file lock | |
468 | */ | |
469 | void | |
470 | deallocate_file_lock(struct file_lock *fl) | |
471 | { | |
472 | free(fl->client.oh.n_bytes); | |
473 | free(fl->client_cookie.n_bytes); | |
474 | free(fl->filehandle.n_bytes); | |
475 | free(fl); | |
476 | } | |
477 | ||
478 | /* | |
479 | * regions_overlap(): This function examines the two provided regions for | |
480 | * overlap. | |
481 | */ | |
482 | int | |
483 | regions_overlap(start1, len1, start2, len2) | |
484 | const u_int64_t start1, len1, start2, len2; | |
485 | { | |
486 | u_int64_t d1,d2,d3,d4; | |
487 | enum split_status result; | |
488 | ||
489 | debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", | |
490 | start1, len1, start2, len2); | |
491 | ||
492 | result = region_compare(start1, len1, start2, len2, | |
493 | &d1, &d2, &d3, &d4); | |
494 | ||
495 | debuglog("Exiting region overlap with val: %d\n",result); | |
496 | ||
497 | if (result == SPL_DISJOINT) { | |
498 | return 0; | |
499 | } else { | |
500 | return 1; | |
501 | } | |
502 | ||
503 | return (result); | |
504 | } | |
505 | ||
506 | /* | |
507 | * region_compare(): Examine lock regions and split appropriately | |
508 | * | |
509 | * XXX: Fix 64 bit overflow problems | |
510 | * XXX: Check to make sure I got *ALL* the cases. | |
511 | * XXX: This DESPERATELY needs a regression test. | |
512 | */ | |
513 | enum split_status | |
514 | region_compare(starte, lene, startu, lenu, | |
515 | start1, len1, start2, len2) | |
516 | const u_int64_t starte, lene, startu, lenu; | |
517 | u_int64_t *start1, *len1, *start2, *len2; | |
518 | { | |
519 | /* | |
520 | * Please pay attention to the sequential exclusions | |
521 | * of the if statements!!! | |
522 | */ | |
523 | enum LFLAGS lflags; | |
524 | enum RFLAGS rflags; | |
525 | enum split_status retval; | |
526 | ||
527 | retval = SPL_DISJOINT; | |
528 | ||
529 | if (lene == 0 && lenu == 0) { | |
530 | /* Examine left edge of locker */ | |
531 | if (startu < starte) { | |
532 | lflags = LEDGE_LEFT; | |
533 | } else if (startu == starte) { | |
534 | lflags = LEDGE_LBOUNDARY; | |
535 | } else { | |
536 | lflags = LEDGE_INSIDE; | |
537 | } | |
538 | ||
539 | rflags = REDGE_RBOUNDARY; /* Both are infiinite */ | |
540 | ||
541 | if (lflags == LEDGE_INSIDE) { | |
542 | *start1 = starte; | |
543 | *len1 = startu - starte; | |
544 | } | |
545 | ||
546 | if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { | |
547 | retval = SPL_CONTAINED; | |
548 | } else { | |
549 | retval = SPL_LOCK1; | |
550 | } | |
551 | } else if (lene == 0 && lenu != 0) { | |
552 | /* Established lock is infinite */ | |
553 | /* Examine left edge of unlocker */ | |
554 | if (startu < starte) { | |
555 | lflags = LEDGE_LEFT; | |
556 | } else if (startu == starte) { | |
557 | lflags = LEDGE_LBOUNDARY; | |
558 | } else if (startu > starte) { | |
559 | lflags = LEDGE_INSIDE; | |
560 | } | |
561 | ||
562 | /* Examine right edge of unlocker */ | |
563 | if (startu + lenu < starte) { | |
564 | /* Right edge of unlocker left of established lock */ | |
565 | rflags = REDGE_LEFT; | |
566 | return SPL_DISJOINT; | |
567 | } else if (startu + lenu == starte) { | |
568 | /* Right edge of unlocker on start of established lock */ | |
569 | rflags = REDGE_LBOUNDARY; | |
570 | return SPL_DISJOINT; | |
571 | } else { /* Infinifty is right of finity */ | |
572 | /* Right edge of unlocker inside established lock */ | |
573 | rflags = REDGE_INSIDE; | |
574 | } | |
575 | ||
576 | if (lflags == LEDGE_INSIDE) { | |
577 | *start1 = starte; | |
578 | *len1 = startu - starte; | |
579 | retval |= SPL_LOCK1; | |
580 | } | |
581 | ||
582 | if (rflags == REDGE_INSIDE) { | |
583 | /* Create right lock */ | |
584 | *start2 = startu+lenu; | |
585 | *len2 = 0; | |
586 | retval |= SPL_LOCK2; | |
587 | } | |
588 | } else if (lene != 0 && lenu == 0) { | |
589 | /* Unlocker is infinite */ | |
590 | /* Examine left edge of unlocker */ | |
591 | if (startu < starte) { | |
592 | lflags = LEDGE_LEFT; | |
593 | retval = SPL_CONTAINED; | |
594 | return retval; | |
595 | } else if (startu == starte) { | |
596 | lflags = LEDGE_LBOUNDARY; | |
597 | retval = SPL_CONTAINED; | |
598 | return retval; | |
599 | } else if ((startu > starte) && (startu < starte + lene - 1)) { | |
600 | lflags = LEDGE_INSIDE; | |
601 | } else if (startu == starte + lene - 1) { | |
602 | lflags = LEDGE_RBOUNDARY; | |
603 | } else { /* startu > starte + lene -1 */ | |
604 | lflags = LEDGE_RIGHT; | |
605 | return SPL_DISJOINT; | |
606 | } | |
607 | ||
608 | rflags = REDGE_RIGHT; /* Infinity is right of finity */ | |
609 | ||
610 | if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { | |
611 | *start1 = starte; | |
612 | *len1 = startu - starte; | |
613 | retval |= SPL_LOCK1; | |
614 | return retval; | |
615 | } | |
616 | ||
617 | } else { | |
618 | /* Both locks are finite */ | |
619 | ||
620 | /* Examine left edge of unlocker */ | |
621 | if (startu < starte) { | |
622 | lflags = LEDGE_LEFT; | |
623 | } else if (startu == starte) { | |
624 | lflags = LEDGE_LBOUNDARY; | |
625 | } else if ((startu > starte) && (startu < starte + lene - 1)) { | |
626 | lflags = LEDGE_INSIDE; | |
627 | } else if (startu == starte + lene - 1) { | |
628 | lflags = LEDGE_RBOUNDARY; | |
629 | } else { /* startu > starte + lene -1 */ | |
630 | lflags = LEDGE_RIGHT; | |
631 | return SPL_DISJOINT; | |
632 | } | |
633 | ||
634 | /* Examine right edge of unlocker */ | |
635 | if (startu + lenu < starte) { | |
636 | /* Right edge of unlocker left of established lock */ | |
637 | rflags = REDGE_LEFT; | |
638 | return SPL_DISJOINT; | |
639 | } else if (startu + lenu == starte) { | |
640 | /* Right edge of unlocker on start of established lock */ | |
641 | rflags = REDGE_LBOUNDARY; | |
642 | return SPL_DISJOINT; | |
643 | } else if (startu + lenu < starte + lene) { | |
644 | /* Right edge of unlocker inside established lock */ | |
645 | rflags = REDGE_INSIDE; | |
646 | } else if (startu + lenu == starte + lene) { | |
647 | /* Right edge of unlocker on right edge of established lock */ | |
648 | rflags = REDGE_RBOUNDARY; | |
649 | } else { /* startu + lenu > starte + lene */ | |
650 | /* Right edge of unlocker is right of established lock */ | |
651 | rflags = REDGE_RIGHT; | |
652 | } | |
653 | ||
654 | if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { | |
655 | /* Create left lock */ | |
656 | *start1 = starte; | |
657 | *len1 = (startu - starte); | |
658 | retval |= SPL_LOCK1; | |
659 | } | |
660 | ||
661 | if (rflags == REDGE_INSIDE) { | |
662 | /* Create right lock */ | |
663 | *start2 = startu+lenu; | |
664 | *len2 = starte+lene-(startu+lenu); | |
665 | retval |= SPL_LOCK2; | |
666 | } | |
667 | ||
668 | if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && | |
669 | (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { | |
670 | retval = SPL_CONTAINED; | |
671 | } | |
672 | } | |
673 | ||
674 | return retval; | |
675 | } | |
676 | ||
677 | /* | |
678 | * same_netobj: Compares the apprpriate bits of a netobj for identity | |
679 | */ | |
680 | int | |
681 | same_netobj(const netobj *n0, const netobj *n1) | |
682 | { | |
683 | int retval; | |
684 | ||
685 | retval = 0; | |
686 | ||
687 | debuglog("Entering netobj identity check\n"); | |
688 | ||
689 | if (n0->n_len == n1->n_len) { | |
690 | debuglog("Preliminary length check passed\n"); | |
691 | retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); | |
692 | debuglog("netobj %smatch\n", retval ? "" : "mis"); | |
693 | } | |
694 | ||
695 | return (retval); | |
696 | } | |
697 | ||
698 | /* | |
699 | * same_filelock_identity: Compares the appropriate bits of a file_lock | |
700 | */ | |
701 | int | |
702 | same_filelock_identity(fl0, fl1) | |
703 | const struct file_lock *fl0, *fl1; | |
704 | { | |
705 | int retval; | |
706 | ||
707 | retval = 0; | |
708 | ||
709 | debuglog("Checking filelock identity\n"); | |
710 | ||
711 | /* | |
712 | * Check process ids and host information. | |
713 | */ | |
714 | retval = (fl0->client.svid == fl1->client.svid && | |
715 | same_netobj(&(fl0->client.oh), &(fl1->client.oh))); | |
716 | ||
717 | debuglog("Exiting checking filelock identity: retval: %d\n",retval); | |
718 | ||
719 | return (retval); | |
720 | } | |
721 | ||
722 | /* | |
723 | * Below here are routines associated with manipulating the NFS | |
724 | * lock list. | |
725 | */ | |
726 | ||
727 | /* | |
728 | * get_lock_matching_unlock: Return a lock which matches the given unlock lock | |
729 | * or NULL otehrwise | |
730 | * XXX: It is a shame that this duplicates so much code from test_nfslock. | |
731 | */ | |
732 | struct file_lock * | |
733 | get_lock_matching_unlock(const struct file_lock *fl) | |
734 | { | |
735 | struct file_lock *ifl; /* Iterator */ | |
736 | ||
737 | debuglog("Entering lock_matching_unlock\n"); | |
738 | debuglog("********Dump of fl*****************\n"); | |
739 | dump_filelock(fl); | |
740 | ||
741 | LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { | |
742 | debuglog("Pointer to file lock: %p\n",ifl); | |
743 | ||
744 | debuglog("****Dump of ifl****\n"); | |
745 | dump_filelock(ifl); | |
746 | debuglog("*******************\n"); | |
747 | ||
748 | /* | |
749 | * XXX: It is conceivable that someone could use the NLM RPC | |
750 | * system to directly access filehandles. This may be a | |
751 | * security hazard as the filehandle code may bypass normal | |
752 | * file access controls | |
753 | */ | |
754 | if (fl->filehandle.n_len != ifl->filehandle.n_len) | |
755 | continue; | |
756 | if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes, | |
757 | fl->filehandle.n_len)) | |
758 | continue; | |
759 | ||
760 | debuglog("matching_unlock: Filehandles match, " | |
761 | "checking regions\n"); | |
762 | ||
763 | /* Filehandles match, check for region overlap */ | |
764 | if (!regions_overlap(fl->client.l_offset, fl->client.l_len, | |
765 | ifl->client.l_offset, ifl->client.l_len)) | |
766 | continue; | |
767 | ||
768 | debuglog("matching_unlock: Region overlap" | |
769 | " found %llu : %llu -- %llu : %llu\n", | |
770 | fl->client.l_offset,fl->client.l_len, | |
771 | ifl->client.l_offset,ifl->client.l_len); | |
772 | ||
773 | /* Regions overlap, check the identity */ | |
774 | if (!same_filelock_identity(fl,ifl)) | |
775 | continue; | |
776 | ||
777 | debuglog("matching_unlock: Duplicate lock id. Granting\n"); | |
778 | return (ifl); | |
779 | } | |
780 | ||
781 | debuglog("Exiting lock_matching_unlock\n"); | |
782 | ||
783 | return (NULL); | |
784 | } | |
785 | ||
786 | /* | |
787 | * test_nfslock: check for NFS lock in lock list | |
788 | * | |
789 | * This routine makes the following assumptions: | |
790 | * 1) Nothing will adjust the lock list during a lookup | |
791 | * | |
792 | * This routine has an intersting quirk which bit me hard. | |
793 | * The conflicting_fl is the pointer to the conflicting lock. | |
794 | * However, to modify the "*pointer* to the conflicting lock" rather | |
795 | * that the "conflicting lock itself" one must pass in a "pointer to | |
796 | * the pointer of the conflicting lock". Gross. | |
797 | */ | |
798 | ||
799 | enum nfslock_status | |
800 | test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) | |
801 | { | |
802 | struct file_lock *ifl; /* Iterator */ | |
803 | enum nfslock_status retval; | |
804 | ||
805 | debuglog("Entering test_nfslock\n"); | |
806 | ||
807 | retval = NFS_GRANTED; | |
808 | (*conflicting_fl) = NULL; | |
809 | ||
810 | debuglog("Entering lock search loop\n"); | |
811 | ||
812 | debuglog("***********************************\n"); | |
813 | debuglog("Dumping match filelock\n"); | |
814 | debuglog("***********************************\n"); | |
815 | dump_filelock(fl); | |
816 | debuglog("***********************************\n"); | |
817 | ||
818 | LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { | |
819 | if (retval == NFS_DENIED) | |
820 | break; | |
821 | ||
822 | debuglog("Top of lock loop\n"); | |
823 | debuglog("Pointer to file lock: %p\n",ifl); | |
824 | ||
825 | debuglog("***********************************\n"); | |
826 | debuglog("Dumping test filelock\n"); | |
827 | debuglog("***********************************\n"); | |
828 | dump_filelock(ifl); | |
829 | debuglog("***********************************\n"); | |
830 | ||
831 | /* | |
832 | * XXX: It is conceivable that someone could use the NLM RPC | |
833 | * system to directly access filehandles. This may be a | |
834 | * security hazard as the filehandle code may bypass normal | |
835 | * file access controls | |
836 | */ | |
837 | if (fl->filehandle.n_len != ifl->filehandle.n_len) | |
838 | continue; | |
839 | if (bcmp(fl->filehandle.n_bytes, ifl->filehandle.n_bytes, | |
840 | fl->filehandle.n_len)) | |
841 | continue; | |
842 | ||
843 | debuglog("test_nfslock: filehandle match found\n"); | |
844 | ||
845 | /* Filehandles match, check for region overlap */ | |
846 | if (!regions_overlap(fl->client.l_offset, fl->client.l_len, | |
847 | ifl->client.l_offset, ifl->client.l_len)) | |
848 | continue; | |
849 | ||
850 | debuglog("test_nfslock: Region overlap found" | |
851 | " %llu : %llu -- %llu : %llu\n", | |
852 | fl->client.l_offset,fl->client.l_len, | |
853 | ifl->client.l_offset,ifl->client.l_len); | |
854 | ||
855 | /* Regions overlap, check the exclusivity */ | |
856 | if (!(fl->client.exclusive || ifl->client.exclusive)) | |
857 | continue; | |
858 | ||
859 | debuglog("test_nfslock: Exclusivity failure: %d %d\n", | |
860 | fl->client.exclusive, | |
861 | ifl->client.exclusive); | |
862 | ||
863 | if (same_filelock_identity(fl,ifl)) { | |
864 | debuglog("test_nfslock: Duplicate id. Granting\n"); | |
865 | (*conflicting_fl) = ifl; | |
866 | retval = NFS_GRANTED_DUPLICATE; | |
867 | } else { | |
868 | /* locking attempt fails */ | |
869 | debuglog("test_nfslock: Lock attempt failed\n"); | |
870 | debuglog("Desired lock\n"); | |
871 | dump_filelock(fl); | |
872 | debuglog("Conflicting lock\n"); | |
873 | dump_filelock(ifl); | |
874 | (*conflicting_fl) = ifl; | |
875 | retval = NFS_DENIED; | |
876 | } | |
877 | } | |
878 | ||
879 | debuglog("Dumping file locks\n"); | |
880 | debuglog("Exiting test_nfslock\n"); | |
881 | ||
882 | return (retval); | |
883 | } | |
884 | ||
885 | /* | |
886 | * lock_nfslock: attempt to create a lock in the NFS lock list | |
887 | * | |
888 | * This routine tests whether the lock will be granted and then adds | |
889 | * the entry to the lock list if so. | |
890 | * | |
891 | * Argument fl gets modified as its list housekeeping entries get modified | |
892 | * upon insertion into the NFS lock list | |
893 | * | |
894 | * This routine makes several assumptions: | |
895 | * 1) It is perfectly happy to grant a duplicate lock from the same pid. | |
896 | * While this seems to be intuitively wrong, it is required for proper | |
897 | * Posix semantics during unlock. It is absolutely imperative to not | |
898 | * unlock the main lock before the two child locks are established. Thus, | |
899 | * one has be be able to create duplicate locks over an existing lock | |
900 | * 2) It currently accepts duplicate locks from the same id,pid | |
901 | */ | |
902 | ||
903 | enum nfslock_status | |
904 | lock_nfslock(struct file_lock *fl) | |
905 | { | |
906 | enum nfslock_status retval; | |
907 | struct file_lock *dummy_fl; | |
908 | ||
909 | dummy_fl = NULL; | |
910 | ||
911 | debuglog("Entering lock_nfslock...\n"); | |
912 | ||
913 | retval = test_nfslock(fl,&dummy_fl); | |
914 | ||
915 | if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { | |
916 | debuglog("Inserting lock...\n"); | |
917 | dump_filelock(fl); | |
918 | LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); | |
919 | } | |
920 | ||
921 | debuglog("Exiting lock_nfslock...\n"); | |
922 | ||
923 | return (retval); | |
924 | } | |
925 | ||
926 | /* | |
927 | * delete_nfslock: delete an NFS lock list entry | |
928 | * | |
929 | * This routine is used to delete a lock out of the NFS lock list | |
930 | * without regard to status, underlying locks, regions or anything else | |
931 | * | |
932 | * Note that this routine *does not deallocate memory* of the lock. | |
933 | * It just disconnects it from the list. The lock can then be used | |
934 | * by other routines without fear of trashing the list. | |
935 | */ | |
936 | ||
937 | enum nfslock_status | |
938 | delete_nfslock(struct file_lock *fl) | |
939 | { | |
940 | ||
941 | LIST_REMOVE(fl, nfslocklist); | |
942 | ||
943 | return (NFS_GRANTED); | |
944 | } | |
945 | ||
946 | enum split_status | |
947 | split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) | |
948 | const struct file_lock *exist_lock, *unlock_lock; | |
949 | struct file_lock **left_lock, **right_lock; | |
950 | { | |
951 | u_int64_t start1, len1, start2, len2; | |
952 | enum split_status spstatus; | |
953 | ||
954 | spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, | |
955 | unlock_lock->client.l_offset, unlock_lock->client.l_len, | |
956 | &start1, &len1, &start2, &len2); | |
957 | ||
958 | if ((spstatus & SPL_LOCK1) != 0) { | |
959 | *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, | |
960 | &exist_lock->filehandle); | |
961 | if (*left_lock == NULL) { | |
962 | debuglog("Unable to allocate resource for split 1\n"); | |
963 | return SPL_RESERR; | |
964 | } | |
965 | ||
966 | fill_file_lock(*left_lock, | |
967 | exist_lock->addr, | |
968 | exist_lock->client.exclusive, exist_lock->client.svid, | |
969 | start1, len1, | |
970 | exist_lock->client_name, exist_lock->nsm_status, | |
971 | exist_lock->status, exist_lock->flags, exist_lock->blocking); | |
972 | } | |
973 | ||
974 | if ((spstatus & SPL_LOCK2) != 0) { | |
975 | *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, | |
976 | &exist_lock->filehandle); | |
977 | if (*right_lock == NULL) { | |
978 | debuglog("Unable to allocate resource for split 1\n"); | |
979 | if (*left_lock != NULL) { | |
980 | deallocate_file_lock(*left_lock); | |
981 | } | |
982 | return SPL_RESERR; | |
983 | } | |
984 | ||
985 | fill_file_lock(*right_lock, | |
986 | exist_lock->addr, | |
987 | exist_lock->client.exclusive, exist_lock->client.svid, | |
988 | start2, len2, | |
989 | exist_lock->client_name, exist_lock->nsm_status, | |
990 | exist_lock->status, exist_lock->flags, exist_lock->blocking); | |
991 | } | |
992 | ||
993 | return spstatus; | |
994 | } | |
995 | ||
996 | enum nfslock_status | |
997 | unlock_nfslock(fl, released_lock, left_lock, right_lock) | |
998 | const struct file_lock *fl; | |
999 | struct file_lock **released_lock; | |
1000 | struct file_lock **left_lock; | |
1001 | struct file_lock **right_lock; | |
1002 | { | |
1003 | struct file_lock *mfl; /* Matching file lock */ | |
1004 | enum nfslock_status retval; | |
1005 | enum split_status spstatus; | |
1006 | ||
1007 | debuglog("Entering unlock_nfslock\n"); | |
1008 | ||
1009 | *released_lock = NULL; | |
1010 | *left_lock = NULL; | |
1011 | *right_lock = NULL; | |
1012 | ||
1013 | retval = NFS_DENIED_NOLOCK; | |
1014 | ||
1015 | printf("Attempting to match lock...\n"); | |
1016 | mfl = get_lock_matching_unlock(fl); | |
1017 | ||
1018 | if (mfl != NULL) { | |
1019 | debuglog("Unlock matched. Querying for split\n"); | |
1020 | ||
1021 | spstatus = split_nfslock(mfl, fl, left_lock, right_lock); | |
1022 | ||
1023 | debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); | |
1024 | debuglog("********Split dumps********"); | |
1025 | dump_filelock(mfl); | |
1026 | dump_filelock(fl); | |
1027 | dump_filelock(*left_lock); | |
1028 | dump_filelock(*right_lock); | |
1029 | debuglog("********End Split dumps********"); | |
1030 | ||
1031 | if (spstatus == SPL_RESERR) { | |
1032 | if (*left_lock != NULL) { | |
1033 | deallocate_file_lock(*left_lock); | |
1034 | *left_lock = NULL; | |
1035 | } | |
1036 | ||
1037 | if (*right_lock != NULL) { | |
1038 | deallocate_file_lock(*right_lock); | |
1039 | *right_lock = NULL; | |
1040 | } | |
1041 | ||
1042 | return NFS_RESERR; | |
1043 | } | |
1044 | ||
1045 | /* Insert new locks from split if required */ | |
1046 | if (*left_lock != NULL) { | |
1047 | debuglog("Split left activated\n"); | |
1048 | LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); | |
1049 | } | |
1050 | ||
1051 | if (*right_lock != NULL) { | |
1052 | debuglog("Split right activated\n"); | |
1053 | LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); | |
1054 | } | |
1055 | ||
1056 | /* Unlock the lock since it matches identity */ | |
1057 | LIST_REMOVE(mfl, nfslocklist); | |
1058 | *released_lock = mfl; | |
1059 | retval = NFS_GRANTED; | |
1060 | } | |
1061 | ||
1062 | debuglog("Exiting unlock_nfslock\n"); | |
1063 | ||
1064 | return retval; | |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * Below here are the routines for manipulating the file lock directly | |
1069 | * on the disk hardware itself | |
1070 | */ | |
1071 | enum hwlock_status | |
1072 | lock_hwlock(struct file_lock *fl) | |
1073 | { | |
1074 | struct monfile *imf,*nmf; | |
1075 | int lflags, flerror; | |
1076 | ||
1077 | /* Scan to see if filehandle already present */ | |
1078 | LIST_FOREACH(imf, &monfilelist_head, monfilelist) { | |
1079 | if ((fl->filehandle.n_len == imf->filehandle.n_len) && | |
1080 | (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes, | |
1081 | fl->filehandle.n_len) == 0)) { | |
1082 | /* imf is the correct filehandle */ | |
1083 | break; | |
1084 | } | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * Filehandle already exists (we control the file) | |
1089 | * *AND* NFS has already cleared the lock for availability | |
1090 | * Grant it and bump the refcount. | |
1091 | */ | |
1092 | if (imf != NULL) { | |
1093 | ++(imf->refcount); | |
1094 | return (HW_GRANTED); | |
1095 | } | |
1096 | ||
1097 | /* No filehandle found, create and go */ | |
1098 | nmf = malloc(sizeof(struct monfile)); | |
1099 | if (nmf == NULL) { | |
1100 | debuglog("hwlock resource allocation failure\n"); | |
1101 | return (HW_RESERR); | |
1102 | } | |
1103 | nmf->filehandle.n_bytes = malloc(fl->filehandle.n_len); | |
1104 | if (nmf == NULL) { | |
1105 | debuglog("hwlock resource allocation failure\n"); | |
1106 | free(nmf); | |
1107 | return (HW_RESERR); | |
1108 | } | |
1109 | ||
1110 | /* XXX: Is O_RDWR always the correct mode? */ | |
1111 | nmf->fd = fhopen((fhandle_t *)fl->filehandle.n_bytes, O_RDWR); | |
1112 | if (nmf->fd < 0) { | |
1113 | debuglog("fhopen failed (from %16s): %32s\n", | |
1114 | fl->client_name, strerror(errno)); | |
1115 | free(nmf); | |
1116 | switch (errno) { | |
1117 | case ESTALE: | |
1118 | return (HW_STALEFH); | |
1119 | case EROFS: | |
1120 | return (HW_READONLY); | |
1121 | default: | |
1122 | return (HW_RESERR); | |
1123 | } | |
1124 | } | |
1125 | ||
1126 | /* File opened correctly, fill the monitor struct */ | |
1127 | nmf->filehandle.n_len = fl->filehandle.n_len; | |
1128 | bcopy(fl->filehandle.n_bytes, nmf->filehandle.n_bytes, fl->filehandle.n_len); | |
1129 | nmf->refcount = 1; | |
1130 | nmf->exclusive = fl->client.exclusive; | |
1131 | ||
1132 | lflags = (nmf->exclusive == 1) ? | |
1133 | (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); | |
1134 | ||
1135 | flerror = flock(nmf->fd, lflags); | |
1136 | ||
1137 | if (flerror != 0) { | |
1138 | debuglog("flock failed (from %16s): %32s\n", | |
1139 | fl->client_name, strerror(errno)); | |
1140 | close(nmf->fd); | |
1141 | free(nmf); | |
1142 | switch (errno) { | |
1143 | case EAGAIN: | |
1144 | return (HW_DENIED); | |
1145 | case ESTALE: | |
1146 | return (HW_STALEFH); | |
1147 | case EROFS: | |
1148 | return (HW_READONLY); | |
1149 | default: | |
1150 | return (HW_RESERR); | |
1151 | break; | |
1152 | } | |
1153 | } | |
1154 | ||
1155 | /* File opened and locked */ | |
1156 | LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); | |
1157 | ||
1158 | debuglog("flock succeeded (from %16s)\n", fl->client_name); | |
1159 | return (HW_GRANTED); | |
1160 | } | |
1161 | ||
1162 | enum hwlock_status | |
1163 | unlock_hwlock(const struct file_lock *fl) | |
1164 | { | |
1165 | struct monfile *imf; | |
1166 | ||
1167 | debuglog("Entering unlock_hwlock\n"); | |
1168 | debuglog("Entering loop interation\n"); | |
1169 | ||
1170 | /* Scan to see if filehandle already present */ | |
1171 | LIST_FOREACH(imf, &monfilelist_head, monfilelist) { | |
1172 | if ((fl->filehandle.n_len == imf->filehandle.n_len) && | |
1173 | (bcmp(fl->filehandle.n_bytes, imf->filehandle.n_bytes, | |
1174 | fl->filehandle.n_len) == 0)) { | |
1175 | /* imf is the correct filehandle */ | |
1176 | break; | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | debuglog("Completed iteration. Proceeding\n"); | |
1181 | ||
1182 | if (imf == NULL) { | |
1183 | /* No lock found */ | |
1184 | debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); | |
1185 | return (HW_DENIED_NOLOCK); | |
1186 | } | |
1187 | ||
1188 | /* Lock found */ | |
1189 | --imf->refcount; | |
1190 | ||
1191 | if (imf->refcount < 0) { | |
1192 | debuglog("Negative hardware reference count\n"); | |
1193 | } | |
1194 | ||
1195 | if (imf->refcount <= 0) { | |
1196 | close(imf->fd); | |
1197 | LIST_REMOVE(imf, monfilelist); | |
1198 | free(imf); | |
1199 | } | |
1200 | debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); | |
1201 | return (HW_GRANTED); | |
1202 | } | |
1203 | ||
1204 | enum hwlock_status | |
1205 | test_hwlock(fl, conflicting_fl) | |
1206 | const struct file_lock *fl __unused; | |
1207 | struct file_lock **conflicting_fl __unused; | |
1208 | { | |
1209 | ||
1210 | /* | |
1211 | * XXX: lock tests on hardware are not required until | |
1212 | * true partial file testing is done on the underlying file | |
1213 | */ | |
1214 | return (HW_RESERR); | |
1215 | } | |
1216 | ||
1217 | ||
1218 | ||
1219 | /* | |
1220 | * Below here are routines for manipulating blocked lock requests | |
1221 | * They should only be called from the XXX_partialfilelock routines | |
1222 | * if at all possible | |
1223 | */ | |
1224 | ||
1225 | void | |
1226 | add_blockingfilelock(struct file_lock *fl) | |
1227 | { | |
1228 | ||
1229 | debuglog("Entering add_blockingfilelock\n"); | |
1230 | ||
1231 | /* | |
1232 | * Clear the blocking flag so that it can be reused without | |
1233 | * adding it to the blocking queue a second time | |
1234 | */ | |
1235 | ||
1236 | fl->blocking = 0; | |
1237 | LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); | |
1238 | ||
1239 | debuglog("Exiting add_blockingfilelock\n"); | |
1240 | } | |
1241 | ||
1242 | void | |
1243 | remove_blockingfilelock(struct file_lock *fl) | |
1244 | { | |
1245 | ||
1246 | debuglog("Entering remove_blockingfilelock\n"); | |
1247 | ||
1248 | LIST_REMOVE(fl, nfslocklist); | |
1249 | ||
1250 | debuglog("Exiting remove_blockingfilelock\n"); | |
1251 | } | |
1252 | ||
1253 | void | |
1254 | clear_blockingfilelock(const char *hostname) | |
1255 | { | |
1256 | struct file_lock *ifl,*nfl; | |
1257 | ||
1258 | /* | |
1259 | * Normally, LIST_FOREACH is called for, but since | |
1260 | * the current element *is* the iterator, deleting it | |
1261 | * would mess up the iteration. Thus, a next element | |
1262 | * must be used explicitly | |
1263 | */ | |
1264 | ||
1265 | ifl = LIST_FIRST(&blockedlocklist_head); | |
1266 | ||
1267 | while (ifl != NULL) { | |
1268 | nfl = LIST_NEXT(ifl, nfslocklist); | |
1269 | ||
1270 | if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { | |
1271 | remove_blockingfilelock(ifl); | |
1272 | deallocate_file_lock(ifl); | |
1273 | } | |
1274 | ||
1275 | ifl = nfl; | |
1276 | } | |
1277 | } | |
1278 | ||
1279 | void | |
1280 | retry_blockingfilelocklist(void) | |
1281 | { | |
1282 | /* Retry all locks in the blocked list */ | |
1283 | struct file_lock *ifl, *nfl, *pfl; /* Iterator */ | |
1284 | enum partialfilelock_status pflstatus; | |
1285 | ||
1286 | debuglog("Entering retry_blockingfilelocklist\n"); | |
1287 | ||
1288 | pfl = NULL; | |
1289 | ifl = LIST_FIRST(&blockedlocklist_head); | |
1290 | debuglog("Iterator choice %p\n",ifl); | |
1291 | ||
1292 | while (ifl != NULL) { | |
1293 | /* | |
1294 | * SUBTLE BUG: The next element must be worked out before the | |
1295 | * current element has been moved | |
1296 | */ | |
1297 | nfl = LIST_NEXT(ifl, nfslocklist); | |
1298 | debuglog("Iterator choice %p\n",ifl); | |
1299 | debuglog("Prev iterator choice %p\n",pfl); | |
1300 | debuglog("Next iterator choice %p\n",nfl); | |
1301 | ||
1302 | /* | |
1303 | * SUBTLE BUG: The file_lock must be removed from the | |
1304 | * old list so that it's list pointers get disconnected | |
1305 | * before being allowed to participate in the new list | |
1306 | * which will automatically add it in if necessary. | |
1307 | */ | |
1308 | ||
1309 | LIST_REMOVE(ifl, nfslocklist); | |
1310 | pflstatus = lock_partialfilelock(ifl); | |
1311 | ||
1312 | if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { | |
1313 | debuglog("Granted blocked lock\n"); | |
1314 | /* lock granted and is now being used */ | |
1315 | send_granted(ifl,0); | |
1316 | /* XXX should undo lock if send_granted fails */ | |
1317 | } else { | |
1318 | /* Reinsert lock back into same place in blocked list */ | |
1319 | debuglog("Replacing blocked lock\n"); | |
1320 | if (pfl != NULL) | |
1321 | LIST_INSERT_AFTER(pfl, ifl, nfslocklist); | |
1322 | else | |
1323 | /* ifl is the only elem. in the list */ | |
1324 | LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); | |
1325 | } | |
1326 | ||
1327 | /* Valid increment behavior regardless of state of ifl */ | |
1328 | ifl = nfl; | |
1329 | /* if a lock was granted incrementing pfl would make it nfl */ | |
1330 | if (pfl != NULL && (LIST_NEXT(pfl, nfslocklist) != nfl)) | |
1331 | pfl = LIST_NEXT(pfl, nfslocklist); | |
1332 | else | |
1333 | pfl = LIST_FIRST(&blockedlocklist_head); | |
1334 | } | |
1335 | ||
1336 | debuglog("Exiting retry_blockingfilelocklist\n"); | |
1337 | } | |
1338 | ||
1339 | /* | |
1340 | * Below here are routines associated with manipulating all | |
1341 | * aspects of the partial file locking system (list, hardware, etc.) | |
1342 | */ | |
1343 | ||
1344 | /* | |
1345 | * Please note that lock monitoring must be done at this level which | |
1346 | * keeps track of *individual* lock requests on lock and unlock | |
1347 | * | |
1348 | * XXX: Split unlocking is going to make the unlock code miserable | |
1349 | */ | |
1350 | ||
1351 | /* | |
1352 | * lock_partialfilelock: | |
1353 | * | |
1354 | * Argument fl gets modified as its list housekeeping entries get modified | |
1355 | * upon insertion into the NFS lock list | |
1356 | * | |
1357 | * This routine makes several assumptions: | |
1358 | * 1) It (will) pass locks through to flock to lock the entire underlying file | |
1359 | * and then parcel out NFS locks if it gets control of the file. | |
1360 | * This matches the old rpc.lockd file semantics (except where it | |
1361 | * is now more correct). It is the safe solution, but will cause | |
1362 | * overly restrictive blocking if someone is trying to use the | |
1363 | * underlying files without using NFS. This appears to be an | |
1364 | * acceptable tradeoff since most people use standalone NFS servers. | |
1365 | * XXX: The right solution is probably kevent combined with fcntl | |
1366 | * | |
1367 | * 2) Nothing modifies the lock lists between testing and granting | |
1368 | * I have no idea whether this is a useful assumption or not | |
1369 | */ | |
1370 | ||
1371 | enum partialfilelock_status | |
1372 | lock_partialfilelock(struct file_lock *fl) | |
1373 | { | |
1374 | enum partialfilelock_status retval; | |
1375 | enum nfslock_status lnlstatus; | |
1376 | enum hwlock_status hwstatus; | |
1377 | ||
1378 | debuglog("Entering lock_partialfilelock\n"); | |
1379 | ||
1380 | retval = PFL_DENIED; | |
1381 | ||
1382 | /* | |
1383 | * Execute the NFS lock first, if possible, as it is significantly | |
1384 | * easier and less expensive to undo than the filesystem lock | |
1385 | */ | |
1386 | ||
1387 | lnlstatus = lock_nfslock(fl); | |
1388 | ||
1389 | switch (lnlstatus) { | |
1390 | case NFS_GRANTED: | |
1391 | case NFS_GRANTED_DUPLICATE: | |
1392 | /* | |
1393 | * At this point, the NFS lock is allocated and active. | |
1394 | * Remember to clean it up if the hardware lock fails | |
1395 | */ | |
1396 | hwstatus = lock_hwlock(fl); | |
1397 | ||
1398 | switch (hwstatus) { | |
1399 | case HW_GRANTED: | |
1400 | case HW_GRANTED_DUPLICATE: | |
1401 | debuglog("HW GRANTED\n"); | |
1402 | /* | |
1403 | * XXX: Fixme: Check hwstatus for duplicate when | |
1404 | * true partial file locking and accounting is | |
1405 | * done on the hardware | |
1406 | */ | |
1407 | if (lnlstatus == NFS_GRANTED_DUPLICATE) { | |
1408 | retval = PFL_GRANTED_DUPLICATE; | |
1409 | } else { | |
1410 | retval = PFL_GRANTED; | |
1411 | } | |
1412 | if (fl->flags & LOCK_MON) | |
1413 | monitor_lock_host(fl->client_name); | |
1414 | break; | |
1415 | case HW_RESERR: | |
1416 | debuglog("HW RESERR\n"); | |
1417 | retval = PFL_HWRESERR; | |
1418 | break; | |
1419 | case HW_DENIED: | |
1420 | debuglog("HW DENIED\n"); | |
1421 | retval = PFL_HWDENIED; | |
1422 | break; | |
1423 | default: | |
1424 | debuglog("Unmatched hwstatus %d\n",hwstatus); | |
1425 | break; | |
1426 | } | |
1427 | ||
1428 | if (retval != PFL_GRANTED && | |
1429 | retval != PFL_GRANTED_DUPLICATE) { | |
1430 | /* Clean up the NFS lock */ | |
1431 | debuglog("Deleting trial NFS lock\n"); | |
1432 | delete_nfslock(fl); | |
1433 | } | |
1434 | break; | |
1435 | case NFS_DENIED: | |
1436 | retval = PFL_NFSDENIED; | |
1437 | break; | |
1438 | case NFS_RESERR: | |
1439 | retval = PFL_NFSRESERR; | |
1440 | default: | |
1441 | debuglog("Unmatched lnlstatus %d\n"); | |
1442 | retval = PFL_NFSDENIED_NOLOCK; | |
1443 | break; | |
1444 | } | |
1445 | ||
1446 | /* | |
1447 | * By the time fl reaches here, it is completely free again on | |
1448 | * failure. The NFS lock done before attempting the | |
1449 | * hardware lock has been backed out | |
1450 | */ | |
1451 | ||
1452 | if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { | |
1453 | /* Once last chance to check the lock */ | |
1454 | if (fl->blocking == 1) { | |
1455 | /* Queue the lock */ | |
1456 | debuglog("BLOCKING LOCK RECEIVED\n"); | |
1457 | retval = (retval == PFL_NFSDENIED ? | |
1458 | PFL_NFSBLOCKED : PFL_HWBLOCKED); | |
1459 | add_blockingfilelock(fl); | |
1460 | dump_filelock(fl); | |
1461 | } else { | |
1462 | /* Leave retval alone, it's already correct */ | |
1463 | debuglog("Lock denied. Non-blocking failure\n"); | |
1464 | dump_filelock(fl); | |
1465 | } | |
1466 | } | |
1467 | ||
1468 | debuglog("Exiting lock_partialfilelock\n"); | |
1469 | ||
1470 | return retval; | |
1471 | } | |
1472 | ||
1473 | /* | |
1474 | * unlock_partialfilelock: | |
1475 | * | |
1476 | * Given a file_lock, unlock all locks which match. | |
1477 | * | |
1478 | * Note that a given lock might have to unlock ITSELF! See | |
1479 | * clear_partialfilelock for example. | |
1480 | */ | |
1481 | ||
1482 | enum partialfilelock_status | |
1483 | unlock_partialfilelock(const struct file_lock *fl) | |
1484 | { | |
1485 | struct file_lock *lfl,*rfl,*releasedfl,*selffl; | |
1486 | enum partialfilelock_status retval; | |
1487 | enum nfslock_status unlstatus; | |
1488 | enum hwlock_status unlhwstatus, lhwstatus; | |
1489 | ||
1490 | debuglog("Entering unlock_partialfilelock\n"); | |
1491 | ||
1492 | selffl = NULL; | |
1493 | lfl = NULL; | |
1494 | rfl = NULL; | |
1495 | releasedfl = NULL; | |
1496 | retval = PFL_DENIED; | |
1497 | ||
1498 | /* | |
1499 | * There are significant overlap and atomicity issues | |
1500 | * with partially releasing a lock. For example, releasing | |
1501 | * part of an NFS shared lock does *not* always release the | |
1502 | * corresponding part of the file since there is only one | |
1503 | * rpc.lockd UID but multiple users could be requesting it | |
1504 | * from NFS. Also, an unlock request should never allow | |
1505 | * another process to gain a lock on the remaining parts. | |
1506 | * ie. Always apply the new locks before releasing the | |
1507 | * old one | |
1508 | */ | |
1509 | ||
1510 | /* | |
1511 | * Loop is required since multiple little locks | |
1512 | * can be allocated and then deallocated with one | |
1513 | * big unlock. | |
1514 | * | |
1515 | * The loop is required to be here so that the nfs & | |
1516 | * hw subsystems do not need to communicate with one | |
1517 | * one another | |
1518 | */ | |
1519 | ||
1520 | do { | |
1521 | debuglog("Value of releasedfl: %p\n",releasedfl); | |
1522 | /* lfl&rfl are created *AND* placed into the NFS lock list if required */ | |
1523 | unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); | |
1524 | debuglog("Value of releasedfl: %p\n",releasedfl); | |
1525 | ||
1526 | ||
1527 | /* XXX: This is grungy. It should be refactored to be cleaner */ | |
1528 | if (lfl != NULL) { | |
1529 | lhwstatus = lock_hwlock(lfl); | |
1530 | if (lhwstatus != HW_GRANTED && | |
1531 | lhwstatus != HW_GRANTED_DUPLICATE) { | |
1532 | debuglog("HW duplicate lock failure for left split\n"); | |
1533 | } | |
1534 | if (lfl->flags & LOCK_MON) | |
1535 | monitor_lock_host(lfl->client_name); | |
1536 | } | |
1537 | ||
1538 | if (rfl != NULL) { | |
1539 | lhwstatus = lock_hwlock(rfl); | |
1540 | if (lhwstatus != HW_GRANTED && | |
1541 | lhwstatus != HW_GRANTED_DUPLICATE) { | |
1542 | debuglog("HW duplicate lock failure for right split\n"); | |
1543 | } | |
1544 | if (rfl->flags & LOCK_MON) | |
1545 | monitor_lock_host(rfl->client_name); | |
1546 | } | |
1547 | ||
1548 | switch (unlstatus) { | |
1549 | case NFS_GRANTED: | |
1550 | /* Attempt to unlock on the hardware */ | |
1551 | debuglog("NFS unlock granted. Attempting hardware unlock\n"); | |
1552 | ||
1553 | /* This call *MUST NOT* unlock the two newly allocated locks */ | |
1554 | unlhwstatus = unlock_hwlock(fl); | |
1555 | debuglog("HW unlock returned with code %d\n",unlhwstatus); | |
1556 | ||
1557 | switch (unlhwstatus) { | |
1558 | case HW_GRANTED: | |
1559 | debuglog("HW unlock granted\n"); | |
1560 | if (releasedfl->flags & LOCK_MON) | |
1561 | unmonitor_lock_host(releasedfl->client_name); | |
1562 | retval = PFL_GRANTED; | |
1563 | break; | |
1564 | case HW_DENIED_NOLOCK: | |
1565 | /* Huh?!?! This shouldn't happen */ | |
1566 | debuglog("HW unlock denied no lock\n"); | |
1567 | retval = PFL_HWRESERR; | |
1568 | /* Break out of do-while */ | |
1569 | unlstatus = NFS_RESERR; | |
1570 | break; | |
1571 | default: | |
1572 | debuglog("HW unlock failed\n"); | |
1573 | retval = PFL_HWRESERR; | |
1574 | /* Break out of do-while */ | |
1575 | unlstatus = NFS_RESERR; | |
1576 | break; | |
1577 | } | |
1578 | ||
1579 | debuglog("Exiting with status retval: %d\n",retval); | |
1580 | ||
1581 | // XXX sending granted messages before unlock response | |
1582 | // XXX causes unlock response to be corrupted? | |
1583 | // XXX Workaround is to move this to nlm_prot_svc.c | |
1584 | // XXX after the unlock response is sent. | |
1585 | // retry_blockingfilelocklist(); | |
1586 | break; | |
1587 | case NFS_DENIED_NOLOCK: | |
1588 | retval = PFL_GRANTED; | |
1589 | debuglog("All locks cleaned out\n"); | |
1590 | break; | |
1591 | default: | |
1592 | retval = PFL_NFSRESERR; | |
1593 | debuglog("NFS unlock failure\n"); | |
1594 | dump_filelock(fl); | |
1595 | break; | |
1596 | } | |
1597 | ||
1598 | if (releasedfl != NULL) { | |
1599 | if (fl == releasedfl) { | |
1600 | /* | |
1601 | * XXX: YECHHH!!! Attempt to unlock self succeeded | |
1602 | * but we can't deallocate the space yet. This is what | |
1603 | * happens when you don't write malloc and free together | |
1604 | */ | |
1605 | debuglog("Attempt to unlock self\n"); | |
1606 | selffl = releasedfl; | |
1607 | } else { | |
1608 | /* | |
1609 | * XXX: this deallocation *still* needs to migrate closer | |
1610 | * to the allocation code way up in get_lock or the allocation | |
1611 | * code needs to migrate down (violation of "When you write | |
1612 | * malloc you must write free") | |
1613 | */ | |
1614 | ||
1615 | deallocate_file_lock(releasedfl); | |
1616 | } | |
1617 | } | |
1618 | ||
1619 | } while (unlstatus == NFS_GRANTED); | |
1620 | ||
1621 | if (selffl != NULL) { | |
1622 | /* | |
1623 | * This statement wipes out the incoming file lock (fl) | |
1624 | * in spite of the fact that it is declared const | |
1625 | */ | |
1626 | debuglog("WARNING! Destroying incoming lock pointer\n"); | |
1627 | deallocate_file_lock(selffl); | |
1628 | } | |
1629 | ||
1630 | debuglog("Exiting unlock_partialfilelock\n"); | |
1631 | ||
1632 | return retval; | |
1633 | } | |
1634 | ||
1635 | /* | |
1636 | * clear_partialfilelock | |
1637 | * | |
1638 | * Normally called in response to statd state number change. | |
1639 | * Wipe out all locks held by a host. As a bonus, the act of | |
1640 | * doing so should automatically clear their statd entries and | |
1641 | * unmonitor the host. | |
1642 | */ | |
1643 | ||
1644 | void | |
1645 | clear_partialfilelock(const char *hostname) | |
1646 | { | |
1647 | struct file_lock *ifl, *nfl; | |
1648 | ||
1649 | /* Clear blocking file lock list */ | |
1650 | clear_blockingfilelock(hostname); | |
1651 | ||
1652 | /* do all required unlocks */ | |
1653 | /* Note that unlock can smash the current pointer to a lock */ | |
1654 | ||
1655 | /* | |
1656 | * Normally, LIST_FOREACH is called for, but since | |
1657 | * the current element *is* the iterator, deleting it | |
1658 | * would mess up the iteration. Thus, a next element | |
1659 | * must be used explicitly | |
1660 | */ | |
1661 | ||
1662 | ifl = LIST_FIRST(&nfslocklist_head); | |
1663 | ||
1664 | while (ifl != NULL) { | |
1665 | nfl = LIST_NEXT(ifl, nfslocklist); | |
1666 | ||
1667 | if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { | |
1668 | /* Unlock destroys ifl out from underneath */ | |
1669 | unlock_partialfilelock(ifl); | |
1670 | /* ifl is NO LONGER VALID AT THIS POINT */ | |
1671 | } | |
1672 | ifl = nfl; | |
1673 | } | |
1674 | } | |
1675 | ||
1676 | /* | |
1677 | * test_partialfilelock: | |
1678 | */ | |
1679 | enum partialfilelock_status | |
1680 | test_partialfilelock(const struct file_lock *fl, | |
1681 | struct file_lock **conflicting_fl) | |
1682 | { | |
1683 | enum partialfilelock_status retval; | |
1684 | enum nfslock_status teststatus; | |
1685 | ||
1686 | debuglog("Entering testpartialfilelock...\n"); | |
1687 | ||
1688 | retval = PFL_DENIED; | |
1689 | ||
1690 | teststatus = test_nfslock(fl, conflicting_fl); | |
1691 | debuglog("test_partialfilelock: teststatus %d\n",teststatus); | |
1692 | ||
1693 | if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { | |
1694 | /* XXX: Add the underlying filesystem locking code */ | |
1695 | retval = (teststatus == NFS_GRANTED) ? | |
1696 | PFL_GRANTED : PFL_GRANTED_DUPLICATE; | |
1697 | debuglog("Dumping locks...\n"); | |
1698 | dump_filelock(fl); | |
1699 | dump_filelock(*conflicting_fl); | |
1700 | debuglog("Done dumping locks...\n"); | |
1701 | } else { | |
1702 | retval = PFL_NFSDENIED; | |
1703 | debuglog("NFS test denied.\n"); | |
1704 | dump_filelock(fl); | |
1705 | debuglog("Conflicting.\n"); | |
1706 | dump_filelock(*conflicting_fl); | |
1707 | } | |
1708 | ||
1709 | debuglog("Exiting testpartialfilelock...\n"); | |
1710 | ||
1711 | return retval; | |
1712 | } | |
1713 | ||
1714 | /* | |
1715 | * Below here are routines associated with translating the partial file locking | |
1716 | * codes into useful codes to send back to the NFS RPC messaging system | |
1717 | */ | |
1718 | ||
1719 | /* | |
1720 | * These routines translate the (relatively) useful return codes back onto | |
1721 | * the few return codes which the nlm subsystems wishes to trasmit | |
1722 | */ | |
1723 | ||
1724 | enum nlm_stats | |
1725 | do_test(struct file_lock *fl, struct file_lock **conflicting_fl) | |
1726 | { | |
1727 | enum partialfilelock_status pfsret; | |
1728 | enum nlm_stats retval; | |
1729 | ||
1730 | debuglog("Entering do_test...\n"); | |
1731 | ||
1732 | pfsret = test_partialfilelock(fl,conflicting_fl); | |
1733 | ||
1734 | switch (pfsret) { | |
1735 | case PFL_GRANTED: | |
1736 | debuglog("PFL test lock granted\n"); | |
1737 | dump_filelock(fl); | |
1738 | dump_filelock(*conflicting_fl); | |
1739 | retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; | |
1740 | break; | |
1741 | case PFL_GRANTED_DUPLICATE: | |
1742 | debuglog("PFL test lock granted--duplicate id detected\n"); | |
1743 | dump_filelock(fl); | |
1744 | dump_filelock(*conflicting_fl); | |
1745 | debuglog("Clearing conflicting_fl for call semantics\n"); | |
1746 | *conflicting_fl = NULL; | |
1747 | retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; | |
1748 | break; | |
1749 | case PFL_NFSDENIED: | |
1750 | case PFL_HWDENIED: | |
1751 | debuglog("PFL test lock denied\n"); | |
1752 | dump_filelock(fl); | |
1753 | dump_filelock(*conflicting_fl); | |
1754 | retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; | |
1755 | break; | |
1756 | case PFL_NFSRESERR: | |
1757 | case PFL_HWRESERR: | |
1758 | debuglog("PFL test lock resource fail\n"); | |
1759 | dump_filelock(fl); | |
1760 | dump_filelock(*conflicting_fl); | |
1761 | retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; | |
1762 | break; | |
1763 | default: | |
1764 | debuglog("PFL test lock *FAILED*\n"); | |
1765 | dump_filelock(fl); | |
1766 | dump_filelock(*conflicting_fl); | |
1767 | retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
1768 | break; | |
1769 | } | |
1770 | ||
1771 | debuglog("Exiting do_test...\n"); | |
1772 | ||
1773 | return retval; | |
1774 | } | |
1775 | ||
1776 | /* | |
1777 | * do_lock: Try to acquire a lock | |
1778 | * | |
1779 | * This routine makes a distinction between NLM versions. I am pretty | |
1780 | * convinced that this should be abstracted out and bounced up a level | |
1781 | */ | |
1782 | ||
1783 | enum nlm_stats | |
1784 | do_lock(struct file_lock *fl) | |
1785 | { | |
1786 | enum partialfilelock_status pfsret; | |
1787 | enum nlm_stats retval; | |
1788 | ||
1789 | debuglog("Entering do_lock...\n"); | |
1790 | ||
1791 | pfsret = lock_partialfilelock(fl); | |
1792 | ||
1793 | switch (pfsret) { | |
1794 | case PFL_GRANTED: | |
1795 | debuglog("PFL lock granted"); | |
1796 | dump_filelock(fl); | |
1797 | retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; | |
1798 | break; | |
1799 | case PFL_GRANTED_DUPLICATE: | |
1800 | debuglog("PFL lock granted--duplicate id detected"); | |
1801 | dump_filelock(fl); | |
1802 | retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; | |
1803 | break; | |
1804 | case PFL_NFSDENIED: | |
1805 | case PFL_HWDENIED: | |
1806 | debuglog("PFL_NFS lock denied"); | |
1807 | dump_filelock(fl); | |
1808 | retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; | |
1809 | break; | |
1810 | case PFL_NFSBLOCKED: | |
1811 | case PFL_HWBLOCKED: | |
1812 | debuglog("PFL_NFS blocking lock denied. Queued.\n"); | |
1813 | dump_filelock(fl); | |
1814 | retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; | |
1815 | break; | |
1816 | case PFL_NFSRESERR: | |
1817 | case PFL_HWRESERR: | |
1818 | debuglog("PFL lock resource alocation fail\n"); | |
1819 | dump_filelock(fl); | |
1820 | retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; | |
1821 | break; | |
1822 | default: | |
1823 | debuglog("PFL lock *FAILED*"); | |
1824 | dump_filelock(fl); | |
1825 | retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
1826 | break; | |
1827 | } | |
1828 | ||
1829 | debuglog("Exiting do_lock...\n"); | |
1830 | ||
1831 | return retval; | |
1832 | } | |
1833 | ||
1834 | enum nlm_stats | |
1835 | do_unlock(struct file_lock *fl) | |
1836 | { | |
1837 | enum partialfilelock_status pfsret; | |
1838 | enum nlm_stats retval; | |
1839 | ||
1840 | debuglog("Entering do_unlock...\n"); | |
1841 | pfsret = unlock_partialfilelock(fl); | |
1842 | ||
1843 | switch (pfsret) { | |
1844 | case PFL_GRANTED: | |
1845 | debuglog("PFL unlock granted"); | |
1846 | dump_filelock(fl); | |
1847 | retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; | |
1848 | break; | |
1849 | case PFL_NFSDENIED: | |
1850 | case PFL_HWDENIED: | |
1851 | debuglog("PFL_NFS unlock denied"); | |
1852 | dump_filelock(fl); | |
1853 | retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; | |
1854 | break; | |
1855 | case PFL_NFSDENIED_NOLOCK: | |
1856 | case PFL_HWDENIED_NOLOCK: | |
1857 | debuglog("PFL_NFS no lock found\n"); | |
1858 | retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; | |
1859 | break; | |
1860 | case PFL_NFSRESERR: | |
1861 | case PFL_HWRESERR: | |
1862 | debuglog("PFL unlock resource failure"); | |
1863 | dump_filelock(fl); | |
1864 | retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; | |
1865 | break; | |
1866 | default: | |
1867 | debuglog("PFL unlock *FAILED*"); | |
1868 | dump_filelock(fl); | |
1869 | retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
1870 | break; | |
1871 | } | |
1872 | ||
1873 | debuglog("Exiting do_unlock...\n"); | |
1874 | ||
1875 | return retval; | |
1876 | } | |
1877 | ||
1878 | /* | |
1879 | * do_clear | |
1880 | * | |
1881 | * This routine is non-existent because it doesn't have a return code. | |
1882 | * It is here for completeness in case someone *does* need to do return | |
1883 | * codes later. A decent compiler should optimize this away. | |
1884 | */ | |
1885 | ||
1886 | void | |
1887 | do_clear(const char *hostname) | |
1888 | { | |
1889 | ||
1890 | clear_partialfilelock(hostname); | |
1891 | } | |
1892 | ||
1893 | /* | |
1894 | * The following routines are all called from the code which the | |
1895 | * RPC layer invokes | |
1896 | */ | |
1897 | ||
1898 | /* | |
1899 | * testlock(): inform the caller if the requested lock would be granted | |
1900 | * | |
1901 | * returns NULL if lock would granted | |
1902 | * returns pointer to a conflicting nlm4_holder if not | |
1903 | */ | |
1904 | ||
1905 | struct nlm4_holder * | |
1906 | testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused) | |
1907 | { | |
1908 | struct file_lock test_fl, *conflicting_fl; | |
1909 | ||
1910 | bzero(&test_fl, sizeof(test_fl)); | |
1911 | ||
1912 | test_fl.filehandle.n_len = lock->fh.n_len; | |
1913 | test_fl.filehandle.n_bytes = lock->fh.n_bytes; | |
1914 | copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); | |
1915 | ||
1916 | siglock(); | |
1917 | do_test(&test_fl, &conflicting_fl); | |
1918 | ||
1919 | if (conflicting_fl == NULL) { | |
1920 | debuglog("No conflicting lock found\n"); | |
1921 | sigunlock(); | |
1922 | return NULL; | |
1923 | } else { | |
1924 | debuglog("Found conflicting lock\n"); | |
1925 | dump_filelock(conflicting_fl); | |
1926 | sigunlock(); | |
1927 | return (&conflicting_fl->client); | |
1928 | } | |
1929 | } | |
1930 | ||
1931 | /* | |
1932 | * getlock: try to aquire the lock. | |
1933 | * If file is already locked and we can sleep, put the lock in the list with | |
1934 | * status LKST_WAITING; it'll be processed later. | |
1935 | * Otherwise try to lock. If we're allowed to block, fork a child which | |
1936 | * will do the blocking lock. | |
1937 | */ | |
1938 | ||
1939 | enum nlm_stats | |
1940 | getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) | |
1941 | { | |
1942 | struct file_lock *newfl; | |
1943 | enum nlm_stats retval; | |
1944 | ||
1945 | debuglog("Entering getlock...\n"); | |
1946 | ||
1947 | if (grace_expired == 0 && lckarg->reclaim == 0) | |
1948 | return (flags & LOCK_V4) ? | |
1949 | nlm4_denied_grace_period : nlm_denied_grace_period; | |
1950 | ||
1951 | /* allocate new file_lock for this request */ | |
1952 | newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie, &lckarg->alock.fh); | |
1953 | if (newfl == NULL) { | |
1954 | syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); | |
1955 | /* failed */ | |
1956 | return (flags & LOCK_V4) ? | |
1957 | nlm4_denied_nolocks : nlm_denied_nolocks; | |
1958 | } | |
1959 | ||
1960 | if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { | |
1961 | debuglog("recieved fhandle size %d, local size %d", | |
1962 | lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); | |
1963 | } | |
1964 | ||
1965 | fill_file_lock(newfl, | |
1966 | (struct sockaddr *)svc_getcaller(rqstp->rq_xprt), | |
1967 | lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, | |
1968 | lckarg->alock.l_len, | |
1969 | lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block); | |
1970 | ||
1971 | /* | |
1972 | * newfl is now fully constructed and deallocate_file_lock | |
1973 | * can now be used to delete it | |
1974 | */ | |
1975 | ||
1976 | siglock(); | |
1977 | debuglog("Pointer to new lock is %p\n",newfl); | |
1978 | ||
1979 | retval = do_lock(newfl); | |
1980 | ||
1981 | debuglog("Pointer to new lock is %p\n",newfl); | |
1982 | sigunlock(); | |
1983 | ||
1984 | switch (retval) | |
1985 | { | |
1986 | case nlm4_granted: | |
1987 | /* case nlm_granted: is the same as nlm4_granted */ | |
1988 | /* do_mon(lckarg->alock.caller_name); */ | |
1989 | break; | |
1990 | case nlm4_blocked: | |
1991 | /* case nlm_blocked: is the same as nlm4_blocked */ | |
1992 | /* do_mon(lckarg->alock.caller_name); */ | |
1993 | break; | |
1994 | default: | |
1995 | deallocate_file_lock(newfl); | |
1996 | break; | |
1997 | } | |
1998 | ||
1999 | debuglog("Exiting getlock...\n"); | |
2000 | ||
2001 | return retval; | |
2002 | } | |
2003 | ||
2004 | ||
2005 | /* unlock a filehandle */ | |
2006 | enum nlm_stats | |
2007 | unlock(nlm4_lock *lock, const int flags __unused) | |
2008 | { | |
2009 | struct file_lock fl; | |
2010 | enum nlm_stats err; | |
2011 | ||
2012 | siglock(); | |
2013 | ||
2014 | debuglog("Entering unlock...\n"); | |
2015 | ||
2016 | bzero(&fl,sizeof(struct file_lock)); | |
2017 | fl.filehandle.n_len = lock->fh.n_len; | |
2018 | fl.filehandle.n_bytes = lock->fh.n_bytes; | |
2019 | ||
2020 | copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); | |
2021 | ||
2022 | err = do_unlock(&fl); | |
2023 | ||
2024 | sigunlock(); | |
2025 | ||
2026 | debuglog("Exiting unlock...\n"); | |
2027 | ||
2028 | return err; | |
2029 | } | |
2030 | ||
2031 | /* | |
2032 | * XXX: The following monitor/unmonitor routines | |
2033 | * have not been extensively tested (ie. no regression | |
2034 | * script exists like for the locking sections | |
2035 | */ | |
2036 | ||
2037 | /* | |
2038 | * monitor_lock_host: monitor lock hosts locally with a ref count and | |
2039 | * inform statd | |
2040 | */ | |
2041 | void | |
2042 | monitor_lock_host(const char *hostname) | |
2043 | { | |
2044 | struct host *ihp, *nhp; | |
2045 | struct mon smon; | |
2046 | struct sm_stat_res sres; | |
2047 | int rpcret, statflag; | |
2048 | ||
2049 | rpcret = 0; | |
2050 | statflag = 0; | |
2051 | ||
2052 | LIST_FOREACH(ihp, &hostlst_head, hostlst) { | |
2053 | if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { | |
2054 | /* Host is already monitored, bump refcount */ | |
2055 | ++ihp->refcnt; | |
2056 | /* Host should only be in the monitor list once */ | |
2057 | return; | |
2058 | } | |
2059 | } | |
2060 | ||
2061 | /* Host is not yet monitored, add it */ | |
2062 | nhp = malloc(sizeof(struct host)); | |
2063 | ||
2064 | if (nhp == NULL) { | |
2065 | debuglog("Unable to allocate entry for statd mon\n"); | |
2066 | return; | |
2067 | } | |
2068 | ||
2069 | /* Allocated new host entry, now fill the fields */ | |
2070 | strncpy(nhp->name, hostname, SM_MAXSTRLEN); | |
2071 | nhp->refcnt = 1; | |
2072 | debuglog("Locally Monitoring host %16s\n",hostname); | |
2073 | ||
2074 | debuglog("Attempting to tell statd\n"); | |
2075 | ||
2076 | bzero(&smon,sizeof(smon)); | |
2077 | ||
2078 | smon.mon_id.mon_name = nhp->name; | |
2079 | smon.mon_id.my_id.my_name = "localhost\0"; | |
2080 | ||
2081 | smon.mon_id.my_id.my_prog = NLM_PROG; | |
2082 | smon.mon_id.my_id.my_vers = NLM_SM; | |
2083 | smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; | |
2084 | ||
2085 | rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon, | |
2086 | &smon, xdr_sm_stat_res, &sres); | |
2087 | ||
2088 | if (rpcret == 0) { | |
2089 | if (sres.res_stat == stat_fail) { | |
2090 | debuglog("Statd call failed\n"); | |
2091 | statflag = 0; | |
2092 | } else { | |
2093 | statflag = 1; | |
2094 | } | |
2095 | } else { | |
2096 | debuglog("Rpc call to statd failed with return value: %d\n", | |
2097 | rpcret); | |
2098 | statflag = 0; | |
2099 | } | |
2100 | ||
2101 | if (statflag == 1) { | |
2102 | LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); | |
2103 | } else { | |
2104 | free(nhp); | |
2105 | } | |
2106 | ||
2107 | } | |
2108 | ||
2109 | /* | |
2110 | * unmonitor_lock_host: clear monitor ref counts and inform statd when gone | |
2111 | */ | |
2112 | void | |
2113 | unmonitor_lock_host(const char *hostname) | |
2114 | { | |
2115 | struct host *ihp; | |
2116 | struct mon_id smon_id; | |
2117 | struct sm_stat smstat; | |
2118 | int rpcret; | |
2119 | ||
2120 | rpcret = 0; | |
2121 | ||
2122 | for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; | |
2123 | ihp=LIST_NEXT(ihp, hostlst)) { | |
2124 | if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { | |
2125 | /* Host is monitored, bump refcount */ | |
2126 | --ihp->refcnt; | |
2127 | /* Host should only be in the monitor list once */ | |
2128 | break; | |
2129 | } | |
2130 | } | |
2131 | ||
2132 | if (ihp == NULL) { | |
2133 | debuglog("Could not find host %16s in mon list\n", hostname); | |
2134 | return; | |
2135 | } | |
2136 | ||
2137 | if (ihp->refcnt > 0) | |
2138 | return; | |
2139 | ||
2140 | if (ihp->refcnt < 0) { | |
2141 | debuglog("Negative refcount!: %d\n", | |
2142 | ihp->refcnt); | |
2143 | } | |
2144 | ||
2145 | debuglog("Attempting to unmonitor host %16s\n", hostname); | |
2146 | ||
2147 | bzero(&smon_id,sizeof(smon_id)); | |
2148 | ||
2149 | smon_id.mon_name = (char *)hostname; | |
2150 | smon_id.my_id.my_name = "localhost"; | |
2151 | smon_id.my_id.my_prog = NLM_PROG; | |
2152 | smon_id.my_id.my_vers = NLM_SM; | |
2153 | smon_id.my_id.my_proc = NLM_SM_NOTIFY; | |
2154 | ||
2155 | rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon, | |
2156 | &smon_id, xdr_sm_stat_res, &smstat); | |
2157 | ||
2158 | if (rpcret != 0) { | |
2159 | debuglog("Rpc call to unmonitor statd failed with " | |
2160 | " return value: %d\n", rpcret); | |
2161 | } | |
2162 | ||
2163 | LIST_REMOVE(ihp, hostlst); | |
2164 | free(ihp); | |
2165 | } | |
2166 | ||
2167 | /* | |
2168 | * notify: Clear all locks from a host if statd complains | |
2169 | * | |
2170 | * XXX: This routine has not been thoroughly tested. However, neither | |
2171 | * had the old one been. It used to compare the statd crash state counter | |
2172 | * to the current lock state. The upshot of this was that it basically | |
2173 | * cleared all locks from the specified host 99% of the time (with the | |
2174 | * other 1% being a bug). Consequently, the assumption is that clearing | |
2175 | * all locks from a host when notified by statd is acceptable. | |
2176 | * | |
2177 | * Please note that this routine skips the usual level of redirection | |
2178 | * through a do_* type routine. This introduces a possible level of | |
2179 | * error and might better be written as do_notify and take this one out. | |
2180 | ||
2181 | */ | |
2182 | ||
2183 | void | |
2184 | notify(const char *hostname, const int state) | |
2185 | { | |
2186 | debuglog("notify from %s, new state %d", hostname, state); | |
2187 | ||
2188 | siglock(); | |
2189 | do_clear(hostname); | |
2190 | sigunlock(); | |
2191 | ||
2192 | debuglog("Leaving notify\n"); | |
2193 | } | |
2194 | ||
2195 | void | |
2196 | send_granted(fl, opcode) | |
2197 | struct file_lock *fl; | |
2198 | int opcode __unused; | |
2199 | { | |
2200 | CLIENT *cli; | |
2201 | static char dummy; | |
2202 | struct timeval timeo; | |
2203 | int success; | |
2204 | static struct nlm_res retval; | |
2205 | static struct nlm4_res retval4; | |
2206 | ||
2207 | debuglog("About to send granted on blocked lock\n"); | |
2208 | //sleep(1); | |
2209 | debuglog("Blowing off return send\n"); | |
2210 | ||
2211 | cli = get_client(fl->addr, | |
2212 | (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); | |
2213 | if (cli == NULL) { | |
2214 | syslog(LOG_NOTICE, "failed to get CLIENT for %s", | |
2215 | fl->client_name); | |
2216 | /* | |
2217 | * We fail to notify remote that the lock has been granted. | |
2218 | * The client will timeout and retry, the lock will be | |
2219 | * granted at this time. | |
2220 | */ | |
2221 | return; | |
2222 | } | |
2223 | timeo.tv_sec = 0; | |
2224 | timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ | |
2225 | ||
2226 | if (fl->flags & LOCK_V4) { | |
2227 | static nlm4_testargs res; | |
2228 | res.cookie = fl->client_cookie; | |
2229 | res.exclusive = fl->client.exclusive; | |
2230 | res.alock.caller_name = fl->client_name; | |
2231 | res.alock.fh.n_len = fl->filehandle.n_len; | |
2232 | res.alock.fh.n_bytes = fl->filehandle.n_bytes; | |
2233 | res.alock.oh = fl->client.oh; | |
2234 | res.alock.svid = fl->client.svid; | |
2235 | res.alock.l_offset = fl->client.l_offset; | |
2236 | res.alock.l_len = fl->client.l_len; | |
2237 | debuglog("sending v4 reply%s", | |
2238 | (fl->flags & LOCK_ASYNC) ? " (async)":""); | |
2239 | if (fl->flags & LOCK_ASYNC) { | |
2240 | success = clnt_call(cli, NLM4_GRANTED_MSG, | |
2241 | xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo); | |
2242 | } else { | |
2243 | success = clnt_call(cli, NLM4_GRANTED, | |
2244 | xdr_nlm4_testargs, &res, xdr_nlm4_res, | |
2245 | &retval4, timeo); | |
2246 | } | |
2247 | } else { | |
2248 | static nlm_testargs res; | |
2249 | ||
2250 | res.cookie = fl->client_cookie; | |
2251 | res.exclusive = fl->client.exclusive; | |
2252 | res.alock.caller_name = fl->client_name; | |
2253 | res.alock.fh.n_len = fl->filehandle.n_len; | |
2254 | res.alock.fh.n_bytes = fl->filehandle.n_bytes; | |
2255 | res.alock.oh = fl->client.oh; | |
2256 | res.alock.svid = fl->client.svid; | |
2257 | res.alock.l_offset = fl->client.l_offset; | |
2258 | res.alock.l_len = fl->client.l_len; | |
2259 | debuglog("sending v1 reply%s", | |
2260 | (fl->flags & LOCK_ASYNC) ? " (async)":""); | |
2261 | if (fl->flags & LOCK_ASYNC) { | |
2262 | success = clnt_call(cli, NLM_GRANTED_MSG, | |
2263 | xdr_nlm_testargs, &res, xdr_void, &dummy, timeo); | |
2264 | } else { | |
2265 | success = clnt_call(cli, NLM_GRANTED, | |
2266 | xdr_nlm_testargs, &res, xdr_nlm_res, | |
2267 | &retval, timeo); | |
2268 | } | |
2269 | } | |
2270 | if (debug_level > 2) | |
2271 | debuglog("clnt_call returns %d(%s) for granted", | |
2272 | success, clnt_sperrno(success)); | |
2273 | ||
2274 | } | |
2275 | ||
2276 | /* | |
2277 | * getshare: try to acquire a share reservation | |
2278 | */ | |
2279 | enum nlm_stats | |
2280 | getshare(nlm_shareargs *shrarg, struct svc_req *rqstp, const int flags) | |
2281 | { | |
2282 | struct sharefile *shrfile; | |
2283 | struct file_share *sh; | |
2284 | ||
2285 | debuglog("Entering getshare...\n"); | |
2286 | ||
2287 | if (grace_expired == 0 && shrarg->reclaim == 0) { | |
2288 | debuglog("getshare denied - grace period\n"); | |
2289 | return (flags & LOCK_V4) ? | |
2290 | nlm4_denied_grace_period : | |
2291 | nlm_denied_grace_period; | |
2292 | } | |
2293 | ||
2294 | /* find file in list of share files */ | |
2295 | LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) { | |
2296 | if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) && | |
2297 | (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes, | |
2298 | shrarg->share.fh.n_len) == 0)) { | |
2299 | /* shrfile is the correct file */ | |
2300 | break; | |
2301 | } | |
2302 | } | |
2303 | ||
2304 | /* if share file not found, create a new share file */ | |
2305 | if (!shrfile) { | |
2306 | int fd; | |
2307 | fd = fhopen((fhandle_t *)shrarg->share.fh.n_bytes, O_RDONLY); | |
2308 | if (fd < 0) { | |
2309 | debuglog("fhopen failed (from %16s): %32s\n", | |
2310 | shrarg->share.caller_name, strerror(errno)); | |
2311 | if ((flags & LOCK_V4) == 0) | |
2312 | return nlm_denied; | |
2313 | switch (errno) { | |
2314 | case ESTALE: | |
2315 | return nlm4_stale_fh; | |
2316 | default: | |
2317 | return nlm4_failed; | |
2318 | } | |
2319 | } | |
2320 | shrfile = malloc(sizeof(struct sharefile)); | |
2321 | if (!shrfile) { | |
2322 | debuglog("getshare failed: can't allocate sharefile\n"); | |
2323 | close(fd); | |
2324 | return (flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
2325 | } | |
2326 | shrfile->filehandle.n_len = shrarg->share.fh.n_len; | |
2327 | shrfile->filehandle.n_bytes = malloc(shrarg->share.fh.n_len); | |
2328 | if (!shrfile->filehandle.n_bytes) { | |
2329 | debuglog("getshare failed: can't allocate sharefile filehandle\n"); | |
2330 | free(shrfile); | |
2331 | close(fd); | |
2332 | return (flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
2333 | } | |
2334 | bcopy(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes, | |
2335 | shrarg->share.fh.n_len); | |
2336 | shrfile->fd = fd; | |
2337 | shrfile->refcount = 0; | |
2338 | shrfile->sharelist_head.lh_first = NULL; | |
2339 | LIST_INSERT_HEAD(&nfssharefilelist_head, shrfile, sharefilelist); | |
2340 | } | |
2341 | ||
2342 | /* compare request mode/access to current shares */ | |
2343 | LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) { | |
2344 | /* if request host/owner matches a current share... */ | |
2345 | if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) && | |
2346 | same_netobj(&shrarg->share.oh, &sh->oh)) { | |
2347 | /* ...then just update share mode/access */ | |
2348 | sh->mode = shrarg->share.mode; | |
2349 | sh->access = shrarg->share.access; | |
2350 | debuglog("getshare: updated existing share\n"); | |
2351 | return nlm_granted; | |
2352 | } | |
2353 | if (((shrarg->share.mode & sh->access) != 0) || | |
2354 | ((shrarg->share.access & sh->mode) != 0)) { | |
2355 | /* share request conflicts with existing share */ | |
2356 | debuglog("getshare: conflicts with existing share\n"); | |
2357 | return nlm_denied; | |
2358 | } | |
2359 | } | |
2360 | ||
2361 | /* create/init new share */ | |
2362 | sh = malloc(sizeof(struct file_share)); | |
2363 | if (!sh) { | |
2364 | debuglog("getshare failed: can't allocate share\n"); | |
2365 | if (!shrfile->refcount) { | |
2366 | LIST_REMOVE(shrfile, sharefilelist); | |
2367 | close(shrfile->fd); | |
2368 | free(shrfile->filehandle.n_bytes); | |
2369 | free(shrfile); | |
2370 | } | |
2371 | return (flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
2372 | } | |
2373 | sh->oh.n_len = shrarg->share.oh.n_len; | |
2374 | sh->oh.n_bytes = malloc(sh->oh.n_len); | |
2375 | if (!sh->oh.n_bytes) { | |
2376 | debuglog("getshare failed: can't allocate share owner handle\n"); | |
2377 | free(sh); | |
2378 | if (!shrfile->refcount) { | |
2379 | LIST_REMOVE(shrfile, sharefilelist); | |
2380 | close(shrfile->fd); | |
2381 | free(shrfile->filehandle.n_bytes); | |
2382 | free(shrfile); | |
2383 | } | |
2384 | return (flags & LOCK_V4) ? nlm4_failed : nlm_denied; | |
2385 | } | |
2386 | strncpy(sh->client_name, shrarg->share.caller_name, SM_MAXSTRLEN); | |
2387 | sh->mode = shrarg->share.mode; | |
2388 | sh->access = shrarg->share.access; | |
2389 | ||
2390 | /* insert new share into file's share list */ | |
2391 | LIST_INSERT_HEAD(&shrfile->sharelist_head, sh, nfssharelist); | |
2392 | shrfile->refcount++; | |
2393 | ||
2394 | debuglog("Exiting getshare...\n"); | |
2395 | ||
2396 | return nlm_granted; | |
2397 | } | |
2398 | ||
2399 | ||
2400 | /* remove a share reservation */ | |
2401 | enum nlm_stats | |
2402 | unshare(nlm_shareargs *shrarg, struct svc_req *rqstp) | |
2403 | { | |
2404 | struct sharefile *shrfile; | |
2405 | struct file_share *sh; | |
2406 | ||
2407 | debuglog("Entering unshare...\n"); | |
2408 | ||
2409 | /* find file in list of share files */ | |
2410 | LIST_FOREACH(shrfile, &nfssharefilelist_head, sharefilelist) { | |
2411 | if ((shrarg->share.fh.n_len == shrfile->filehandle.n_len) && | |
2412 | (bcmp(shrarg->share.fh.n_bytes, shrfile->filehandle.n_bytes, | |
2413 | shrarg->share.fh.n_len) == 0)) { | |
2414 | /* shrfile is the correct file */ | |
2415 | break; | |
2416 | } | |
2417 | } | |
2418 | ||
2419 | /* if share file not found, return success (per spec) */ | |
2420 | if (!shrfile) { | |
2421 | debuglog("unshare: no such share file\n"); | |
2422 | return nlm_granted; | |
2423 | } | |
2424 | ||
2425 | /* find share */ | |
2426 | LIST_FOREACH(sh, &shrfile->sharelist_head, nfssharelist) { | |
2427 | /* if request host/owner matches a current share... */ | |
2428 | if ((strncmp(shrarg->share.caller_name, sh->client_name, SM_MAXSTRLEN) == 0) && | |
2429 | same_netobj(&shrarg->share.oh, &sh->oh)) | |
2430 | break; | |
2431 | } | |
2432 | ||
2433 | /* if share not found, return success (per spec) */ | |
2434 | if (!sh) { | |
2435 | debuglog("unshare: no such share\n"); | |
2436 | return nlm_granted; | |
2437 | } | |
2438 | ||
2439 | /* remove share from file and deallocate */ | |
2440 | shrfile->refcount--; | |
2441 | LIST_REMOVE(sh, nfssharelist); | |
2442 | free(sh->oh.n_bytes); | |
2443 | free(sh); | |
2444 | ||
2445 | /* if file has no more shares, deallocate share file */ | |
2446 | if (!shrfile->refcount) { | |
2447 | debuglog("unshare: file has no more shares\n"); | |
2448 | LIST_REMOVE(shrfile, sharefilelist); | |
2449 | close(shrfile->fd); | |
2450 | free(shrfile->filehandle.n_bytes); | |
2451 | free(shrfile); | |
2452 | } | |
2453 | ||
2454 | debuglog("Exiting unshare...\n"); | |
2455 | ||
2456 | return nlm_granted; | |
2457 | } | |
2458 | ||
2459 | /* | |
2460 | * do_free_all | |
2461 | * | |
2462 | * Wipe out all non-monitored locks and shares held by a host. | |
2463 | */ | |
2464 | ||
2465 | void | |
2466 | do_free_all(const char *hostname) | |
2467 | { | |
2468 | struct file_lock *ifl, *nfl; | |
2469 | struct sharefile *shrfile, *nshrfile; | |
2470 | struct file_share *ifs, *nfs; | |
2471 | ||
2472 | /* clear non-monitored blocking file locks */ | |
2473 | ifl = LIST_FIRST(&blockedlocklist_head); | |
2474 | while (ifl != NULL) { | |
2475 | nfl = LIST_NEXT(ifl, nfslocklist); | |
2476 | ||
2477 | if (((ifl->flags & LOCK_MON) == 0) && | |
2478 | (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) { | |
2479 | remove_blockingfilelock(ifl); | |
2480 | deallocate_file_lock(ifl); | |
2481 | } | |
2482 | ||
2483 | ifl = nfl; | |
2484 | } | |
2485 | ||
2486 | /* clear non-monitored file locks */ | |
2487 | ifl = LIST_FIRST(&nfslocklist_head); | |
2488 | while (ifl != NULL) { | |
2489 | nfl = LIST_NEXT(ifl, nfslocklist); | |
2490 | ||
2491 | if (((ifl->flags & LOCK_MON) == 0) && | |
2492 | (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0)) { | |
2493 | /* Unlock destroys ifl out from underneath */ | |
2494 | unlock_partialfilelock(ifl); | |
2495 | /* ifl is NO LONGER VALID AT THIS POINT */ | |
2496 | } | |
2497 | ||
2498 | ifl = nfl; | |
2499 | } | |
2500 | ||
2501 | /* clear shares */ | |
2502 | shrfile = LIST_FIRST(&nfssharefilelist_head); | |
2503 | while (shrfile != NULL) { | |
2504 | nshrfile = LIST_NEXT(shrfile, sharefilelist); | |
2505 | ||
2506 | ifs = LIST_FIRST(&shrfile->sharelist_head); | |
2507 | while (ifs != NULL) { | |
2508 | nfs = LIST_NEXT(ifs, nfssharelist); | |
2509 | ||
2510 | if (strncmp(hostname, ifs->client_name, SM_MAXSTRLEN) == 0) { | |
2511 | shrfile->refcount--; | |
2512 | LIST_REMOVE(ifs, nfssharelist); | |
2513 | free(ifs->oh.n_bytes); | |
2514 | free(ifs); | |
2515 | } | |
2516 | ||
2517 | ifs = nfs; | |
2518 | } | |
2519 | ||
2520 | if (!shrfile->refcount) { | |
2521 | LIST_REMOVE(shrfile, sharefilelist); | |
2522 | close(shrfile->fd); | |
2523 | free(shrfile->filehandle.n_bytes); | |
2524 | free(shrfile); | |
2525 | } | |
2526 | ||
2527 | shrfile = nshrfile; | |
2528 | } | |
2529 | ||
2530 | } | |
2531 | ||
2532 | ||
2533 | ||
2534 | /* | |
2535 | * Routines below here have not been modified in the overhaul | |
2536 | */ | |
2537 | ||
2538 | /* | |
2539 | * Are these two routines still required since lockd is not spawning off | |
2540 | * children to service locks anymore? Presumably they were originally | |
2541 | * put in place to prevent a one child from changing the lock list out | |
2542 | * from under another one. | |
2543 | */ | |
2544 | ||
2545 | void | |
2546 | siglock(void) | |
2547 | { | |
2548 | sigset_t block; | |
2549 | ||
2550 | sigemptyset(&block); | |
2551 | sigaddset(&block, SIGCHLD); | |
2552 | ||
2553 | if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { | |
2554 | syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); | |
2555 | } | |
2556 | } | |
2557 | ||
2558 | void | |
2559 | sigunlock(void) | |
2560 | { | |
2561 | sigset_t block; | |
2562 | ||
2563 | sigemptyset(&block); | |
2564 | sigaddset(&block, SIGCHLD); | |
2565 | ||
2566 | if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { | |
2567 | syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); | |
2568 | } | |
2569 | } | |
2570 | ||
2571 |