]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_lockf.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / bsd / kern / kern_lockf.c
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
33 */
34
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mount.h>
41 #include <sys/proc.h>
42 #include <sys/unistd.h>
43 #include <sys/vnode.h>
44 #include <sys/vnode_internal.h>
45 #include <sys/vnode_if.h>
46 #include <sys/malloc.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49
50 #if DEAD_CODE
51 /*
52 * This variable controls the maximum number of processes that will
53 * be checked in doing deadlock detection.
54 */
55 static int maxlockdepth = MAXDEPTH;
56 #endif /* DEAD_CODE */
57
58 #ifdef LOCKF_DEBUG
59 #include <sys/sysctl.h>
60
61 #include <ufs/ufs/quota.h>
62 #include <ufs/ufs/inode.h>
63
64
65 static int lockf_debug = 2;
66 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
67 #endif
68
69 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
70
71 #define NOLOCKF (struct lockf *)0
72 #define SELF 0x1
73 #define OTHERS 0x2
74 #define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
75 static int lf_clearlock(struct lockf *);
76 static int lf_findoverlap(struct lockf *,
77 struct lockf *, int, struct lockf ***, struct lockf **);
78 static struct lockf *
79 lf_getblock(struct lockf *);
80 static int lf_getlock(struct lockf *, struct flock *);
81 static int lf_setlock(struct lockf *);
82 static void lf_split(struct lockf *, struct lockf *);
83 static void lf_wakelock(struct lockf *);
84
85 /*
86 * Advisory record locking support
87 */
88 int
89 lf_advlock(ap)
90 struct vnop_advlock_args /* {
91 struct vnode *a_vp;
92 caddr_t a_id;
93 int a_op;
94 struct flock *a_fl;
95 int a_flags;
96 vfs_context_t a_context;
97 } */ *ap;
98 {
99 struct vnode *vp = ap->a_vp;
100 struct flock *fl = ap->a_fl;
101 vfs_context_t context = ap->a_context;
102 struct lockf *lock;
103 off_t start, end, oadd;
104 u_quad_t size;
105 int error;
106 struct lockf **head = &vp->v_lockf;
107
108 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
109
110 /*
111 * Avoid the common case of unlocking when inode has no locks.
112 */
113 if (*head == (struct lockf *)0) {
114 if (ap->a_op != F_SETLK) {
115 fl->l_type = F_UNLCK;
116 #ifdef LOCKF_DEBUG
117 printf("lf_advlock: unlock without lock\n");
118 #endif /* LOCKF_DEBUG */
119 return (0);
120 }
121 }
122
123 /*
124 * Convert the flock structure into a start and end.
125 */
126 switch (fl->l_whence) {
127
128 case SEEK_SET:
129 case SEEK_CUR:
130 /*
131 * Caller is responsible for adding any necessary offset
132 * when SEEK_CUR is used.
133 */
134 start = fl->l_start;
135 break;
136
137 case SEEK_END:
138
139 if ((error = vnode_size(vp, &size, context)))
140 {
141 #ifdef LOCKF_DEBUG
142 printf("lf_advlock: vnode_getattr failed: %d\n", error);
143 #endif /* LOCKF_DEBUG */
144 return (error);
145 }
146
147 if (size > OFF_MAX ||
148 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
149 return (EOVERFLOW);
150 start = size + fl->l_start;
151 break;
152
153 default:
154 #ifdef LOCKF_DEBUG
155 printf("lf_advlock: unknown whence %d\n", fl->l_whence);
156 #endif /* LOCKF_DEBUG */
157 return (EINVAL);
158 }
159 if (start < 0)
160 {
161 #ifdef LOCKF_DEBUG
162 printf("lf_advlock: start < 0 (%qd)\n", start);
163 #endif /* LOCKF_DEBUG */
164 return (EINVAL);
165 }
166 if (fl->l_len < 0) {
167 if (start == 0)
168 {
169 #ifdef LOCKF_DEBUG
170 printf("lf_advlock: len < 0 & start == 0\n");
171 #endif /* LOCKF_DEBUG */
172 return (EINVAL);
173 }
174 end = start - 1;
175 start += fl->l_len;
176 if (start < 0)
177 {
178 #ifdef LOCKF_DEBUG
179 printf("lf_advlock: start < 0 (%qd)\n", start);
180 #endif /* LOCKF_DEBUG */
181 return (EINVAL);
182 }
183 } else if (fl->l_len == 0)
184 end = -1;
185 else {
186 oadd = fl->l_len - 1;
187 if (oadd > (off_t)(OFF_MAX - start))
188 {
189 #ifdef LOCKF_DEBUG
190 printf("lf_advlock: overflow\n");
191 #endif /* LOCKF_DEBUG */
192 return (EOVERFLOW);
193 }
194 end = start + oadd;
195 }
196 /*
197 * Create the lockf structure
198 */
199 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
200 lock->lf_start = start;
201 lock->lf_end = end;
202 lock->lf_id = ap->a_id;
203 lock->lf_vnode = vp;
204 lock->lf_type = fl->l_type;
205 lock->lf_head = head;
206 lock->lf_next = (struct lockf *)0;
207 TAILQ_INIT(&lock->lf_blkhd);
208 lock->lf_flags = ap->a_flags;
209
210 lck_mtx_lock(&vp->v_lock); /* protect the lockf list */
211 /*
212 * Do the requested operation.
213 */
214 switch(ap->a_op) {
215 case F_SETLK:
216 error = lf_setlock(lock);
217 break;
218
219 case F_UNLCK:
220 error = lf_clearlock(lock);
221 FREE(lock, M_LOCKF);
222 break;
223
224 case F_GETLK:
225 error = lf_getlock(lock, fl);
226 FREE(lock, M_LOCKF);
227 break;
228
229 default:
230 FREE(lock, M_LOCKF);
231 error = EINVAL;
232 break;
233 }
234 lck_mtx_unlock(&vp->v_lock); /* done maniplulating the list */
235
236 #ifdef LOCKF_DEBUG
237 printf("lf_advlock: normal exit: %d\n", error);
238 #endif /* LOCKF_DEBUG */
239 return (error);
240 }
241
242 /*
243 * Set a byte-range lock.
244 */
245 static int
246 lf_setlock(lock)
247 struct lockf *lock;
248 {
249 struct lockf *block;
250 struct lockf **head = lock->lf_head;
251 struct lockf **prev, *overlap, *ltmp;
252 static char lockstr[] = "lockf";
253 int ovcase, priority, needtolink, error;
254 struct vnode *vp = lock->lf_vnode;
255
256 #ifdef LOCKF_DEBUG
257 if (lockf_debug & 1)
258 lf_print("lf_setlock", lock);
259 #endif /* LOCKF_DEBUG */
260
261 /*
262 * Set the priority
263 */
264 priority = PLOCK;
265 if (lock->lf_type == F_WRLCK)
266 priority += 4;
267 priority |= PCATCH;
268 /*
269 * Scan lock list for this file looking for locks that would block us.
270 */
271 while ((block = lf_getblock(lock))) {
272 /*
273 * Free the structure and return if nonblocking.
274 */
275 if ((lock->lf_flags & F_WAIT) == 0) {
276 FREE(lock, M_LOCKF);
277 return (EAGAIN);
278 }
279 #if DEAD_CODE
280 /*
281 * XXX This is dead code on MacOS X; it shouldn't be.
282 */
283 /*
284 * We are blocked. Since flock style locks cover
285 * the whole file, there is no chance for deadlock.
286 * For byte-range locks we must check for deadlock.
287 *
288 * Deadlock detection is done by looking through the
289 * wait channels to see if there are any cycles that
290 * involve us. MAXDEPTH is set just to make sure we
291 * do not go off into neverland.
292 */
293 if ((lock->lf_flags & F_POSIX) &&
294 (block->lf_flags & F_POSIX)) {
295 struct proc *wproc;
296 struct thread *td;
297 struct lockf *waitblock;
298 int i = 0;
299
300 /* The block is waiting on something */
301 /* XXXKSE this is not complete under threads */
302 wproc = (struct proc *)block->lf_id;
303 mtx_lock_spin(&sched_lock);
304 FOREACH_THREAD_IN_PROC(wproc, td) {
305 while (td->td_wchan &&
306 (td->td_wmesg == lockstr) &&
307 (i++ < maxlockdepth)) {
308 waitblock = (struct lockf *)td->td_wchan;
309 /* Get the owner of the blocking lock */
310 waitblock = waitblock->lf_next;
311 if ((waitblock->lf_flags & F_POSIX) == 0)
312 break;
313 wproc = (struct proc *)waitblock->lf_id;
314 if (wproc == (struct proc *)lock->lf_id) {
315 mtx_unlock_spin(&sched_lock);
316 FREE(lock, M_LOCKF);
317 return (EDEADLK);
318 }
319 }
320 }
321 mtx_unlock_spin(&sched_lock);
322 }
323 #endif /* DEAD_CODE */
324 /*
325 * For flock type locks, we must first remove
326 * any shared locks that we hold before we sleep
327 * waiting for an exclusive lock.
328 */
329 if ((lock->lf_flags & F_FLOCK) &&
330 lock->lf_type == F_WRLCK) {
331 lock->lf_type = F_UNLCK;
332 (void) lf_clearlock(lock);
333 lock->lf_type = F_WRLCK;
334 }
335 /*
336 * Add our lock to the blocked list and sleep until we're free.
337 * Remember who blocked us (for deadlock detection).
338 */
339 lock->lf_next = block;
340 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
341 #ifdef LOCKF_DEBUG
342 if (lockf_debug & 1) {
343 lf_print("lf_setlock: blocking on", block);
344 lf_printlist("lf_setlock", block);
345 }
346 #endif /* LOCKF_DEBUG */
347 error = msleep(lock, &vp->v_lock, priority, lockstr, 0);
348 if (error) { /* XXX */
349 /*
350 * We may have been awakened by a signal and/or by a
351 * debugger continuing us (in which cases we must remove
352 * ourselves from the blocked list) and/or by another
353 * process releasing a lock (in which case we have
354 * already been removed from the blocked list and our
355 * lf_next field set to NOLOCKF).
356 */
357 if (lock->lf_next) {
358 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
359 lock->lf_next = NOLOCKF;
360 }
361 FREE(lock, M_LOCKF);
362 return (error);
363 } /* XXX */
364 }
365 /*
366 * No blocks!! Add the lock. Note that we will
367 * downgrade or upgrade any overlapping locks this
368 * process already owns.
369 *
370 * Skip over locks owned by other processes.
371 * Handle any locks that overlap and are owned by ourselves.
372 */
373 prev = head;
374 block = *head;
375 needtolink = 1;
376 for (;;) {
377 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
378 if (ovcase)
379 block = overlap->lf_next;
380 /*
381 * Six cases:
382 * 0) no overlap
383 * 1) overlap == lock
384 * 2) overlap contains lock
385 * 3) lock contains overlap
386 * 4) overlap starts before lock
387 * 5) overlap ends after lock
388 */
389 switch (ovcase) {
390 case 0: /* no overlap */
391 if (needtolink) {
392 *prev = lock;
393 lock->lf_next = overlap;
394 }
395 break;
396
397 case 1: /* overlap == lock */
398 /*
399 * If downgrading lock, others may be
400 * able to acquire it.
401 */
402 if (lock->lf_type == F_RDLCK &&
403 overlap->lf_type == F_WRLCK)
404 lf_wakelock(overlap);
405 overlap->lf_type = lock->lf_type;
406 FREE(lock, M_LOCKF);
407 lock = overlap; /* for debug output below */
408 break;
409
410 case 2: /* overlap contains lock */
411 /*
412 * Check for common starting point and different types.
413 */
414 if (overlap->lf_type == lock->lf_type) {
415 FREE(lock, M_LOCKF);
416 lock = overlap; /* for debug output below */
417 break;
418 }
419 if (overlap->lf_start == lock->lf_start) {
420 *prev = lock;
421 lock->lf_next = overlap;
422 overlap->lf_start = lock->lf_end + 1;
423 } else
424 lf_split(overlap, lock);
425 lf_wakelock(overlap);
426 break;
427
428 case 3: /* lock contains overlap */
429 /*
430 * If downgrading lock, others may be able to
431 * acquire it, otherwise take the list.
432 */
433 if (lock->lf_type == F_RDLCK &&
434 overlap->lf_type == F_WRLCK) {
435 lf_wakelock(overlap);
436 } else {
437 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
438 ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
439 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
440 lf_block);
441 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
442 ltmp, lf_block);
443 ltmp->lf_next = lock;
444 }
445 }
446 /*
447 * Add the new lock if necessary and delete the overlap.
448 */
449 if (needtolink) {
450 *prev = lock;
451 lock->lf_next = overlap->lf_next;
452 prev = &lock->lf_next;
453 needtolink = 0;
454 } else
455 *prev = overlap->lf_next;
456 FREE(overlap, M_LOCKF);
457 continue;
458
459 case 4: /* overlap starts before lock */
460 /*
461 * Add lock after overlap on the list.
462 */
463 lock->lf_next = overlap->lf_next;
464 overlap->lf_next = lock;
465 overlap->lf_end = lock->lf_start - 1;
466 prev = &lock->lf_next;
467 lf_wakelock(overlap);
468 needtolink = 0;
469 continue;
470
471 case 5: /* overlap ends after lock */
472 /*
473 * Add the new lock before overlap.
474 */
475 if (needtolink) {
476 *prev = lock;
477 lock->lf_next = overlap;
478 }
479 overlap->lf_start = lock->lf_end + 1;
480 lf_wakelock(overlap);
481 break;
482 }
483 break;
484 }
485 #ifdef LOCKF_DEBUG
486 if (lockf_debug & 1) {
487 lf_print("lf_setlock: got the lock", lock);
488 lf_printlist("lf_setlock", lock);
489 }
490 #endif /* LOCKF_DEBUG */
491 return (0);
492 }
493
494 /*
495 * Remove a byte-range lock on an inode.
496 *
497 * Generally, find the lock (or an overlap to that lock)
498 * and remove it (or shrink it), then wakeup anyone we can.
499 */
500 static int
501 lf_clearlock(unlock)
502 struct lockf *unlock;
503 {
504 struct lockf **head = unlock->lf_head;
505 struct lockf *lf = *head;
506 struct lockf *overlap, **prev;
507 int ovcase;
508
509 if (lf == NOLOCKF)
510 return (0);
511 #ifdef LOCKF_DEBUG
512 if (unlock->lf_type != F_UNLCK)
513 panic("lf_clearlock: bad type");
514 if (lockf_debug & 1)
515 lf_print("lf_clearlock", unlock);
516 #endif /* LOCKF_DEBUG */
517 prev = head;
518 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
519 /*
520 * Wakeup the list of locks to be retried.
521 */
522 lf_wakelock(overlap);
523
524 switch (ovcase) {
525
526 case 1: /* overlap == lock */
527 *prev = overlap->lf_next;
528 FREE(overlap, M_LOCKF);
529 break;
530
531 case 2: /* overlap contains lock: split it */
532 if (overlap->lf_start == unlock->lf_start) {
533 overlap->lf_start = unlock->lf_end + 1;
534 break;
535 }
536 lf_split(overlap, unlock);
537 overlap->lf_next = unlock->lf_next;
538 break;
539
540 case 3: /* lock contains overlap */
541 *prev = overlap->lf_next;
542 lf = overlap->lf_next;
543 FREE(overlap, M_LOCKF);
544 continue;
545
546 case 4: /* overlap starts before lock */
547 overlap->lf_end = unlock->lf_start - 1;
548 prev = &overlap->lf_next;
549 lf = overlap->lf_next;
550 continue;
551
552 case 5: /* overlap ends after lock */
553 overlap->lf_start = unlock->lf_end + 1;
554 break;
555 }
556 break;
557 }
558 #ifdef LOCKF_DEBUG
559 if (lockf_debug & 1)
560 lf_printlist("lf_clearlock", unlock);
561 #endif /* LOCKF_DEBUG */
562 return (0);
563 }
564
565 /*
566 * Check whether there is a blocking lock,
567 * and if so return its process identifier.
568 */
569 static int
570 lf_getlock(lock, fl)
571 struct lockf *lock;
572 struct flock *fl;
573 {
574 struct lockf *block;
575
576 #ifdef LOCKF_DEBUG
577 if (lockf_debug & 1)
578 lf_print("lf_getlock", lock);
579 #endif /* LOCKF_DEBUG */
580
581 if ((block = lf_getblock(lock))) {
582 fl->l_type = block->lf_type;
583 fl->l_whence = SEEK_SET;
584 fl->l_start = block->lf_start;
585 if (block->lf_end == -1)
586 fl->l_len = 0;
587 else
588 fl->l_len = block->lf_end - block->lf_start + 1;
589 if (block->lf_flags & F_POSIX)
590 fl->l_pid = proc_pid((struct proc *)(block->lf_id));
591 else
592 fl->l_pid = -1;
593 } else {
594 fl->l_type = F_UNLCK;
595 }
596 return (0);
597 }
598
599 /*
600 * Walk the list of locks for an inode and
601 * return the first blocking lock.
602 */
603 static struct lockf *
604 lf_getblock(lock)
605 struct lockf *lock;
606 {
607 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
608 int ovcase;
609
610 prev = lock->lf_head;
611 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
612 /*
613 * We've found an overlap, see if it blocks us
614 */
615 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
616 return (overlap);
617 /*
618 * Nope, point to the next one on the list and
619 * see if it blocks us
620 */
621 lf = overlap->lf_next;
622 }
623 return (NOLOCKF);
624 }
625
626 /*
627 * Walk the list of locks to
628 * find an overlapping lock (if any).
629 *
630 * NOTE: this returns only the FIRST overlapping lock. There
631 * may be more than one.
632 */
633 static int
634 lf_findoverlap(lf, lock, type, prev, overlap)
635 struct lockf *lf;
636 struct lockf *lock;
637 int type;
638 struct lockf ***prev;
639 struct lockf **overlap;
640 {
641 off_t start, end;
642
643 *overlap = lf;
644 if (lf == NOLOCKF)
645 return (0);
646 #ifdef LOCKF_DEBUG
647 if (lockf_debug & 2)
648 lf_print("lf_findoverlap: looking for overlap in", lock);
649 #endif /* LOCKF_DEBUG */
650 start = lock->lf_start;
651 end = lock->lf_end;
652 while (lf != NOLOCKF) {
653 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
654 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
655 *prev = &lf->lf_next;
656 *overlap = lf = lf->lf_next;
657 continue;
658 }
659 #ifdef LOCKF_DEBUG
660 if (lockf_debug & 2)
661 lf_print("\tchecking", lf);
662 #endif /* LOCKF_DEBUG */
663 /*
664 * OK, check for overlap
665 *
666 * Six cases:
667 * 0) no overlap
668 * 1) overlap == lock
669 * 2) overlap contains lock
670 * 3) lock contains overlap
671 * 4) overlap starts before lock
672 * 5) overlap ends after lock
673 */
674 if ((lf->lf_end != -1 && start > lf->lf_end) ||
675 (end != -1 && lf->lf_start > end)) {
676 /* Case 0 */
677 #ifdef LOCKF_DEBUG
678 if (lockf_debug & 2)
679 printf("no overlap\n");
680 #endif /* LOCKF_DEBUG */
681 if ((type & SELF) && end != -1 && lf->lf_start > end)
682 return (0);
683 *prev = &lf->lf_next;
684 *overlap = lf = lf->lf_next;
685 continue;
686 }
687 if ((lf->lf_start == start) && (lf->lf_end == end)) {
688 /* Case 1 */
689 #ifdef LOCKF_DEBUG
690 if (lockf_debug & 2)
691 printf("overlap == lock\n");
692 #endif /* LOCKF_DEBUG */
693 return (1);
694 }
695 if ((lf->lf_start <= start) &&
696 (end != -1) &&
697 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
698 /* Case 2 */
699 #ifdef LOCKF_DEBUG
700 if (lockf_debug & 2)
701 printf("overlap contains lock\n");
702 #endif /* LOCKF_DEBUG */
703 return (2);
704 }
705 if (start <= lf->lf_start &&
706 (end == -1 ||
707 (lf->lf_end != -1 && end >= lf->lf_end))) {
708 /* Case 3 */
709 #ifdef LOCKF_DEBUG
710 if (lockf_debug & 2)
711 printf("lock contains overlap\n");
712 #endif /* LOCKF_DEBUG */
713 return (3);
714 }
715 if ((lf->lf_start < start) &&
716 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
717 /* Case 4 */
718 #ifdef LOCKF_DEBUG
719 if (lockf_debug & 2)
720 printf("overlap starts before lock\n");
721 #endif /* LOCKF_DEBUG */
722 return (4);
723 }
724 if ((lf->lf_start > start) &&
725 (end != -1) &&
726 ((lf->lf_end > end) || (lf->lf_end == -1))) {
727 /* Case 5 */
728 #ifdef LOCKF_DEBUG
729 if (lockf_debug & 2)
730 printf("overlap ends after lock\n");
731 #endif /* LOCKF_DEBUG */
732 return (5);
733 }
734 panic("lf_findoverlap: default");
735 }
736 return (0);
737 }
738
739 /*
740 * Split a lock and a contained region into
741 * two or three locks as necessary.
742 */
743 static void
744 lf_split(lock1, lock2)
745 struct lockf *lock1;
746 struct lockf *lock2;
747 {
748 struct lockf *splitlock;
749
750 #ifdef LOCKF_DEBUG
751 if (lockf_debug & 2) {
752 lf_print("lf_split", lock1);
753 lf_print("splitting from", lock2);
754 }
755 #endif /* LOCKF_DEBUG */
756 /*
757 * Check to see if spliting into only two pieces.
758 */
759 if (lock1->lf_start == lock2->lf_start) {
760 lock1->lf_start = lock2->lf_end + 1;
761 lock2->lf_next = lock1;
762 return;
763 }
764 if (lock1->lf_end == lock2->lf_end) {
765 lock1->lf_end = lock2->lf_start - 1;
766 lock2->lf_next = lock1->lf_next;
767 lock1->lf_next = lock2;
768 return;
769 }
770 /*
771 * Make a new lock consisting of the last part of
772 * the encompassing lock
773 */
774 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
775 bcopy(lock1, splitlock, sizeof *splitlock);
776 splitlock->lf_start = lock2->lf_end + 1;
777 TAILQ_INIT(&splitlock->lf_blkhd);
778 lock1->lf_end = lock2->lf_start - 1;
779 /*
780 * OK, now link it in
781 */
782 splitlock->lf_next = lock1->lf_next;
783 lock2->lf_next = splitlock;
784 lock1->lf_next = lock2;
785 }
786
787 /*
788 * Wakeup a blocklist
789 */
790 static void
791 lf_wakelock(listhead)
792 struct lockf *listhead;
793 {
794 struct lockf *wakelock;
795
796 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
797 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
798 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
799 wakelock->lf_next = NOLOCKF;
800 #ifdef LOCKF_DEBUG
801 if (lockf_debug & 2)
802 lf_print("lf_wakelock: awakening", wakelock);
803 #endif /* LOCKF_DEBUG */
804 wakeup(wakelock);
805 }
806 }
807
808 #ifdef LOCKF_DEBUG
809 /*
810 * Print out a lock.
811 */
812 void
813 lf_print(tag, lock)
814 char *tag;
815 struct lockf *lock;
816 {
817
818 printf("%s: lock %p for ", tag, (void *)lock);
819 if (lock->lf_flags & F_POSIX)
820 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
821 else
822 printf("id %p", (void *)lock->lf_id);
823 if (lock->lf_vnode != 0)
824 printf(" in vno 0x%08x, %s, start %jd, end %jd",
825 lock->lf_vnode,
826 lock->lf_type == F_RDLCK ? "shared" :
827 lock->lf_type == F_WRLCK ? "exclusive" :
828 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
829 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
830 else
831 printf(" %s, start %jd, end %jd",
832 lock->lf_type == F_RDLCK ? "shared" :
833 lock->lf_type == F_WRLCK ? "exclusive" :
834 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
835 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
836 if (!TAILQ_EMPTY(&lock->lf_blkhd))
837 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
838 else
839 printf("\n");
840 }
841
842 void
843 lf_printlist(tag, lock)
844 char *tag;
845 struct lockf *lock;
846 {
847 struct lockf *lf, *blk;
848
849 if (lock->lf_vnode == 0)
850 return;
851
852 printf("%s: Lock list for vno 0x%08x:\n",
853 tag, lock->lf_vnode);
854 for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
855 printf("\tlock %p for ",(void *)lf);
856 if (lf->lf_flags & F_POSIX)
857 printf("proc %ld",
858 (long)((struct proc *)lf->lf_id)->p_pid);
859 else
860 printf("id %p", (void *)lf->lf_id);
861 printf(", %s, start %jd, end %jd",
862 lf->lf_type == F_RDLCK ? "shared" :
863 lf->lf_type == F_WRLCK ? "exclusive" :
864 lf->lf_type == F_UNLCK ? "unlock" :
865 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
866 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
867 printf("\n\t\tlock request %p for ", (void *)blk);
868 if (blk->lf_flags & F_POSIX)
869 printf("proc %ld",
870 (long)((struct proc *)blk->lf_id)->p_pid);
871 else
872 printf("id %p", (void *)blk->lf_id);
873 printf(", %s, start %jd, end %jd",
874 blk->lf_type == F_RDLCK ? "shared" :
875 blk->lf_type == F_WRLCK ? "exclusive" :
876 blk->lf_type == F_UNLCK ? "unlock" :
877 "unknown", (intmax_t)blk->lf_start,
878 (intmax_t)blk->lf_end);
879 if (!TAILQ_EMPTY(&blk->lf_blkhd))
880 panic("lf_printlist: bad list");
881 }
882 printf("\n");
883 }
884 }
885 #endif /* LOCKF_DEBUG */