2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
62 #include <sys/cdefs.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
67 #include <sys/mount.h>
69 #include <sys/signalvar.h>
70 #include <sys/unistd.h>
72 #include <sys/vnode.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/vnode_if.h>
75 #include <sys/malloc.h>
76 #include <sys/fcntl.h>
77 #include <sys/lockf.h>
80 * This variable controls the maximum number of processes that will
81 * be checked in doing deadlock detection.
83 static int maxlockdepth
= MAXDEPTH
;
85 #ifdef LOCKF_DEBUGGING
86 #include <sys/sysctl.h>
87 #include <ufs/ufs/quota.h>
88 #include <ufs/ufs/inode.h>
89 void lf_print(const char *tag
, struct lockf
*lock
);
90 void lf_printlist(const char *tag
, struct lockf
*lock
);
91 static int lockf_debug
= 2;
92 SYSCTL_INT(_debug
, OID_AUTO
, lockf_debug
, CTLFLAG_RW
, &lockf_debug
, 0, "");
95 * If there is no mask bit selector, or there is on, and the selector is
96 * set, then output the debugging diagnostic.
98 #define LOCKF_DEBUG(mask, ...) \
100 if( !(mask) || ((mask) & lockf_debug)) { \
101 printf(__VA_ARGS__); \
104 #else /* !LOCKF_DEBUGGING */
105 #define LOCKF_DEBUG(mask, ...) /* mask */
106 #endif /* !LOCKF_DEBUGGING */
108 MALLOC_DEFINE(M_LOCKF
, "lockf", "Byte-range locking structures");
110 #define NOLOCKF (struct lockf *)0
113 #define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
116 * Overlapping lock states
121 OVERLAP_CONTAINS_LOCK
,
122 OVERLAP_CONTAINED_BY_LOCK
,
123 OVERLAP_STARTS_BEFORE_LOCK
,
124 OVERLAP_ENDS_AFTER_LOCK
127 static int lf_clearlock(struct lockf
*);
128 static overlap_t
lf_findoverlap(struct lockf
*,
129 struct lockf
*, int, struct lockf
***, struct lockf
**);
130 static struct lockf
*lf_getblock(struct lockf
*);
131 static int lf_getlock(struct lockf
*, struct flock
*);
132 static int lf_setlock(struct lockf
*);
133 static int lf_split(struct lockf
*, struct lockf
*);
134 static void lf_wakelock(struct lockf
*, boolean_t
);
138 * in order to mitigate risk
139 * don't switch to new wake-one method unless
140 * we have at least this many waiters to wake up
142 #define SAFE_WAITER_LIMIT 20
148 * Description: Advisory record locking support
150 * Parameters: ap Argument pointer to a vnop_advlock_args
151 * argument descriptor structure for the
152 * lock operation to be attempted.
157 * ENOLCK Number of locked regions exceeds limit
162 * lf_clearlock:ENOLCK
165 * Notes: We return ENOLCK when we run out of memory to support locks; as
166 * such, there is no specific expectation limit other than the
167 * amount of available resources.
170 lf_advlock(struct vnop_advlock_args
*ap
)
172 struct vnode
*vp
= ap
->a_vp
;
173 struct flock
*fl
= ap
->a_fl
;
174 vfs_context_t context
= ap
->a_context
;
176 off_t start
, end
, oadd
;
179 struct lockf
**head
= &vp
->v_lockf
;
181 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
184 * Avoid the common case of unlocking when inode has no locks.
186 if (*head
== (struct lockf
*)0) {
187 if (ap
->a_op
!= F_SETLK
) {
188 fl
->l_type
= F_UNLCK
;
189 LOCKF_DEBUG(0, "lf_advlock: '%s' unlock without lock\n", vfs_context_proc(context
)->p_comm
);
195 * Convert the flock structure into a start and end.
197 switch (fl
->l_whence
) {
202 * Caller is responsible for adding any necessary offset
203 * when SEEK_CUR is used.
211 * It's OK to cast the u_quad_t to and off_t here, since they
212 * are the same storage size, and the value of the returned
213 * contents will never overflow into the sign bit. We need to
214 * do this because we will use size to force range checks.
216 if ((error
= vnode_size(vp
, (off_t
*)&size
, context
))) {
217 LOCKF_DEBUG(0, "lf_advlock: vnode_getattr failed: %d\n", error
);
221 if (size
> OFF_MAX
||
223 size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
)))
225 start
= size
+ fl
->l_start
;
229 LOCKF_DEBUG(0, "lf_advlock: unknown whence %d\n", fl
->l_whence
);
233 LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start
);
238 LOCKF_DEBUG(0, "lf_advlock: len < 0 & start == 0\n");
244 LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start
);
247 } else if (fl
->l_len
== 0)
250 oadd
= fl
->l_len
- 1;
251 if (oadd
> (off_t
)(OFF_MAX
- start
)) {
252 LOCKF_DEBUG(0, "lf_advlock: overflow\n");
258 * Create the lockf structure
260 MALLOC(lock
, struct lockf
*, sizeof *lock
, M_LOCKF
, M_WAITOK
);
263 lock
->lf_start
= start
;
265 lock
->lf_id
= ap
->a_id
;
267 lock
->lf_type
= fl
->l_type
;
268 lock
->lf_head
= head
;
269 lock
->lf_next
= (struct lockf
*)0;
270 lock
->lf_waiters
= 0;
271 TAILQ_INIT(&lock
->lf_blkhd
);
272 lock
->lf_flags
= ap
->a_flags
;
274 if (ap
->a_flags
& F_FLOCK
)
275 lock
->lf_flags
|= F_WAKE1_SAFE
;
277 lck_mtx_lock(&vp
->v_lock
); /* protect the lockf list */
279 * Do the requested operation.
283 error
= lf_setlock(lock
);
287 error
= lf_clearlock(lock
);
292 error
= lf_getlock(lock
, fl
);
301 lck_mtx_unlock(&vp
->v_lock
); /* done maniplulating the list */
303 LOCKF_DEBUG(0, "lf_advlock: normal exit: %d\n\n", error
);
309 * lf_coelesce_adjacent
311 * Description: Helper function: when setting a lock, coelesce adjacent
312 * locks. Needed because adjacent locks are not overlapping,
313 * but POSIX requires that they be coelesced.
315 * Parameters: lock The new lock which may be adjacent
316 * to already locked reagions, and which
317 * should therefore be coelesced with them
322 lf_coelesce_adjacent(struct lockf
*lock
)
324 struct lockf
**lf
= lock
->lf_head
;
326 while (*lf
!= NOLOCKF
) {
327 /* reject locks that obviously could not be coelesced */
329 ((*lf
)->lf_id
!= lock
->lf_id
) ||
330 ((*lf
)->lf_type
!= lock
->lf_type
)) {
331 lf
= &(*lf
)->lf_next
;
335 /* If the lock ends adjacent to us, we can coelesce it */
336 if ((*lf
)->lf_end
!= -1 &&
337 ((*lf
)->lf_end
+ 1) == lock
->lf_start
) {
338 struct lockf
*adjacent
= *lf
;
340 LOCKF_DEBUG(0, "lf_coelesce_adjacent: coelesce adjacent previous\n");
341 lock
->lf_start
= (*lf
)->lf_start
;
343 lf
= &(*lf
)->lf_next
;
344 FREE(adjacent
, M_LOCKF
);
347 /* If the lock starts adjacent to us, we can coelesce it */
348 if (lock
->lf_end
!= -1 &&
349 (lock
->lf_end
+ 1) == (*lf
)->lf_start
) {
350 struct lockf
*adjacent
= *lf
;
352 LOCKF_DEBUG(0, "lf_coelesce_adjacent: coelesce adjacent following\n");
353 lock
->lf_end
= (*lf
)->lf_end
;
354 lock
->lf_next
= (*lf
)->lf_next
;
356 FREE(adjacent
, M_LOCKF
);
360 /* no matching conditions; go on to next lock */
361 lf
= &(*lf
)->lf_next
;
369 * Description: Set a byte-range lock.
371 * Parameters: lock The lock structure describing the lock
372 * to be set; allocated by the caller, it
373 * will be linked into the lock list if
374 * the set is successful, and freed if the
375 * set is unsuccessful.
381 * lf_clearlock:ENOLCK
384 * Notes: We add the lock to the provisional lock list. We do not
385 * coelesce at this time; this has implications for other lock
386 * requestors in the blocker search mechanism.
389 lf_setlock(struct lockf
*lock
)
392 struct lockf
**head
= lock
->lf_head
;
393 struct lockf
**prev
, *overlap
, *ltmp
;
394 static char lockstr
[] = "lockf";
395 int priority
, needtolink
, error
;
396 struct vnode
*vp
= lock
->lf_vnode
;
399 #ifdef LOCKF_DEBUGGING
400 if (lockf_debug
& 1) {
401 lf_print("lf_setlock", lock
);
402 lf_printlist("lf_setlock(in)", lock
);
404 #endif /* LOCKF_DEBUGGING */
410 if (lock
->lf_type
== F_WRLCK
)
414 * Scan lock list for this file looking for locks that would block us.
416 while ((block
= lf_getblock(lock
))) {
418 * Free the structure and return if nonblocking.
420 if ((lock
->lf_flags
& F_WAIT
) == 0) {
426 * We are blocked. Since flock style locks cover
427 * the whole file, there is no chance for deadlock.
428 * For byte-range locks we must check for deadlock.
430 * Deadlock detection is done by looking through the
431 * wait channels to see if there are any cycles that
432 * involve us. MAXDEPTH is set just to make sure we
433 * do not go off into neverland.
435 if ((lock
->lf_flags
& F_POSIX
) &&
436 (block
->lf_flags
& F_POSIX
)) {
437 struct proc
*wproc
, *bproc
;
439 struct lockf
*waitblock
;
442 /* The block is waiting on something */
443 wproc
= (struct proc
*)block
->lf_id
;
445 TAILQ_FOREACH(ut
, &wproc
->p_uthlist
, uu_list
) {
447 * While the thread is asleep (uu_wchan != 0)
448 * in this code (uu_wmesg == lockstr)
449 * and we have not exceeded the maximum cycle
450 * depth (i < maxlockdepth), then check for a
451 * cycle to see if the lock is blocked behind
452 * someone blocked behind us.
454 while (((waitblock
= (struct lockf
*)ut
->uu_wchan
) != NULL
) &&
455 ut
->uu_wmesg
== lockstr
&&
456 (i
++ < maxlockdepth
)) {
457 waitblock
= (struct lockf
*)ut
->uu_wchan
;
459 * Get the lock blocking the lock
460 * which would block us, and make
461 * certain it hasn't come unblocked
462 * (been granted, e.g. between the time
463 * we called lf_getblock, and the time
464 * we successfully acquired the
467 waitblock
= waitblock
->lf_next
;
468 if (waitblock
== NULL
)
472 * Make sure it's an advisory range
473 * lock and not an overall file lock;
474 * if we mix lock types, it's our own
477 if ((waitblock
->lf_flags
& F_POSIX
) == 0)
481 * If the owner of the lock that's
482 * blocking a lock that's blocking us
483 * getting the requested lock, then we
484 * would deadlock, so error out.
486 bproc
= (struct proc
*)waitblock
->lf_id
;
487 if (bproc
== (struct proc
*)lock
->lf_id
) {
498 * For flock type locks, we must first remove
499 * any shared locks that we hold before we sleep
500 * waiting for an exclusive lock.
502 if ((lock
->lf_flags
& F_FLOCK
) &&
503 lock
->lf_type
== F_WRLCK
) {
504 lock
->lf_type
= F_UNLCK
;
505 if ((error
= lf_clearlock(lock
)) != 0) {
509 lock
->lf_type
= F_WRLCK
;
512 * Add our lock to the blocked list and sleep until we're free.
513 * Remember who blocked us (for deadlock detection).
515 lock
->lf_next
= block
;
516 TAILQ_INSERT_TAIL(&block
->lf_blkhd
, lock
, lf_block
);
519 if ( !(lock
->lf_flags
& F_FLOCK
))
520 block
->lf_flags
&= ~F_WAKE1_SAFE
;
522 #ifdef LOCKF_DEBUGGING
523 if (lockf_debug
& 1) {
524 lf_print("lf_setlock: blocking on", block
);
525 lf_printlist("lf_setlock(block)", block
);
527 #endif /* LOCKF_DEBUGGING */
528 error
= msleep(lock
, &vp
->v_lock
, priority
, lockstr
, 0);
530 if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) {
533 if ((block
= lf_getblock(lock
))) {
534 TAILQ_FOREACH(tlock
, &lock
->lf_blkhd
, lf_block
) {
535 tlock
->lf_next
= block
;
537 TAILQ_CONCAT(&block
->lf_blkhd
, &lock
->lf_blkhd
, lf_block
);
539 block
->lf_waiters
+= lock
->lf_waiters
;
540 lock
->lf_waiters
= 0;
543 if (error
) { /* XXX */
545 * We may have been awakened by a signal and/or by a
546 * debugger continuing us (in which cases we must remove
547 * ourselves from the blocked list) and/or by another
548 * process releasing a lock (in which case we have
549 * already been removed from the blocked list and our
550 * lf_next field set to NOLOCKF).
553 TAILQ_REMOVE(&lock
->lf_next
->lf_blkhd
, lock
, lf_block
);
554 lock
->lf_next
->lf_waiters
--;
555 lock
->lf_next
= NOLOCKF
;
557 if (!TAILQ_EMPTY(&lock
->lf_blkhd
))
558 lf_wakelock(lock
, TRUE
);
565 * No blocks!! Add the lock. Note that we will
566 * downgrade or upgrade any overlapping locks this
567 * process already owns.
569 * Skip over locks owned by other processes.
570 * Handle any locks that overlap and are owned by ourselves.
576 ovcase
= lf_findoverlap(block
, lock
, SELF
, &prev
, &overlap
);
578 block
= overlap
->lf_next
;
583 * 2) overlap contains lock
584 * 3) lock contains overlap
585 * 4) overlap starts before lock
586 * 5) overlap ends after lock
592 lock
->lf_next
= overlap
;
596 case OVERLAP_EQUALS_LOCK
:
598 * If downgrading lock, others may be
599 * able to acquire it.
601 if (lock
->lf_type
== F_RDLCK
&&
602 overlap
->lf_type
== F_WRLCK
)
603 lf_wakelock(overlap
, TRUE
);
604 overlap
->lf_type
= lock
->lf_type
;
606 lock
= overlap
; /* for lf_coelesce_adjacent() */
609 case OVERLAP_CONTAINS_LOCK
:
611 * Check for common starting point and different types.
613 if (overlap
->lf_type
== lock
->lf_type
) {
615 lock
= overlap
; /* for lf_coelesce_adjacent() */
618 if (overlap
->lf_start
== lock
->lf_start
) {
620 lock
->lf_next
= overlap
;
621 overlap
->lf_start
= lock
->lf_end
+ 1;
624 * If we can't split the lock, we can't
625 * grant it. Claim a system limit for the
628 if (lf_split(overlap
, lock
)) {
633 lf_wakelock(overlap
, TRUE
);
636 case OVERLAP_CONTAINED_BY_LOCK
:
638 * If downgrading lock, others may be able to
639 * acquire it, otherwise take the list.
641 if (lock
->lf_type
== F_RDLCK
&&
642 overlap
->lf_type
== F_WRLCK
) {
643 lf_wakelock(overlap
, TRUE
);
645 while (!TAILQ_EMPTY(&overlap
->lf_blkhd
)) {
646 ltmp
= TAILQ_FIRST(&overlap
->lf_blkhd
);
647 TAILQ_REMOVE(&overlap
->lf_blkhd
, ltmp
,
649 overlap
->lf_waiters
--;
651 TAILQ_INSERT_TAIL(&lock
->lf_blkhd
,
655 ltmp
->lf_next
= lock
;
659 * Add the new lock if necessary and delete the overlap.
663 lock
->lf_next
= overlap
->lf_next
;
664 prev
= &lock
->lf_next
;
667 *prev
= overlap
->lf_next
;
668 FREE(overlap
, M_LOCKF
);
671 case OVERLAP_STARTS_BEFORE_LOCK
:
673 * Add lock after overlap on the list.
675 lock
->lf_next
= overlap
->lf_next
;
676 overlap
->lf_next
= lock
;
677 overlap
->lf_end
= lock
->lf_start
- 1;
678 prev
= &lock
->lf_next
;
679 lf_wakelock(overlap
, TRUE
);
683 case OVERLAP_ENDS_AFTER_LOCK
:
685 * Add the new lock before overlap.
689 lock
->lf_next
= overlap
;
691 overlap
->lf_start
= lock
->lf_end
+ 1;
692 lf_wakelock(overlap
, TRUE
);
697 /* Coelesce adjacent locks with identical attributes */
698 lf_coelesce_adjacent(lock
);
699 #ifdef LOCKF_DEBUGGING
700 if (lockf_debug
& 1) {
701 lf_print("lf_setlock: got the lock", lock
);
702 lf_printlist("lf_setlock(out)", lock
);
704 #endif /* LOCKF_DEBUGGING */
712 * Description: Remove a byte-range lock on an vnode. Generally, find the
713 * lock (or an overlap to that lock) and remove it (or shrink
714 * it), then wakeup anyone we can.
716 * Parameters: unlock The lock to clear
721 * Notes: A caller may unlock all the locks owned by the caller by
722 * specifying the entire file range; locks owned by other
723 * callers are not effected by this operation.
726 lf_clearlock(struct lockf
*unlock
)
728 struct lockf
**head
= unlock
->lf_head
;
729 struct lockf
*lf
= *head
;
730 struct lockf
*overlap
, **prev
;
735 #ifdef LOCKF_DEBUGGING
736 if (unlock
->lf_type
!= F_UNLCK
)
737 panic("lf_clearlock: bad type");
739 lf_print("lf_clearlock", unlock
);
740 #endif /* LOCKF_DEBUGGING */
742 while ((ovcase
= lf_findoverlap(lf
, unlock
, SELF
, &prev
, &overlap
)) != OVERLAP_NONE
) {
744 * Wakeup the list of locks to be retried.
746 lf_wakelock(overlap
, FALSE
);
749 case OVERLAP_NONE
: /* satisfy compiler enum/switch */
752 case OVERLAP_EQUALS_LOCK
:
753 *prev
= overlap
->lf_next
;
754 FREE(overlap
, M_LOCKF
);
757 case OVERLAP_CONTAINS_LOCK
: /* split it */
758 if (overlap
->lf_start
== unlock
->lf_start
) {
759 overlap
->lf_start
= unlock
->lf_end
+ 1;
763 * If we can't split the lock, we can't grant it.
764 * Claim a system limit for the resource shortage.
766 if (lf_split(overlap
, unlock
))
768 overlap
->lf_next
= unlock
->lf_next
;
771 case OVERLAP_CONTAINED_BY_LOCK
:
772 *prev
= overlap
->lf_next
;
773 lf
= overlap
->lf_next
;
774 FREE(overlap
, M_LOCKF
);
777 case OVERLAP_STARTS_BEFORE_LOCK
:
778 overlap
->lf_end
= unlock
->lf_start
- 1;
779 prev
= &overlap
->lf_next
;
780 lf
= overlap
->lf_next
;
783 case OVERLAP_ENDS_AFTER_LOCK
:
784 overlap
->lf_start
= unlock
->lf_end
+ 1;
789 #ifdef LOCKF_DEBUGGING
791 lf_printlist("lf_clearlock", unlock
);
792 #endif /* LOCKF_DEBUGGING */
800 * Description: Check whether there is a blocking lock, and if so return
801 * its process identifier into the lock being requested.
803 * Parameters: lock Pointer to lock to test for blocks
804 * fl Pointer to flock structure to receive
805 * the blocking lock information, if a
806 * blocking lock is found.
811 * *fl Contents modified to reflect the
812 * blocking lock, if one is found; not
815 * Notes: fl->l_pid will be (-1) for file locks and will only be set to
816 * the blocking process ID for advisory record locks.
819 lf_getlock(struct lockf
*lock
, struct flock
*fl
)
823 #ifdef LOCKF_DEBUGGING
825 lf_print("lf_getlock", lock
);
826 #endif /* LOCKF_DEBUGGING */
828 if ((block
= lf_getblock(lock
))) {
829 fl
->l_type
= block
->lf_type
;
830 fl
->l_whence
= SEEK_SET
;
831 fl
->l_start
= block
->lf_start
;
832 if (block
->lf_end
== -1)
835 fl
->l_len
= block
->lf_end
- block
->lf_start
+ 1;
836 if (block
->lf_flags
& F_POSIX
)
837 fl
->l_pid
= proc_pid((struct proc
*)(block
->lf_id
));
841 fl
->l_type
= F_UNLCK
;
850 * Description: Walk the list of locks for an inode and return the first
851 * blocking lock. A lock is considered blocking if we are not
852 * the lock owner; otherwise, we are permitted to upgrade or
853 * downgrade it, and it's not considered blocking.
855 * Parameters: lock The lock for which we are interested
856 * in obtaining the blocking lock, if any
858 * Returns: NOLOCKF No blocking lock exists
859 * !NOLOCKF The address of the blocking lock's
862 static struct lockf
*
863 lf_getblock(struct lockf
*lock
)
865 struct lockf
**prev
, *overlap
, *lf
= *(lock
->lf_head
);
868 prev
= lock
->lf_head
;
869 while ((ovcase
= lf_findoverlap(lf
, lock
, OTHERS
, &prev
, &overlap
)) != OVERLAP_NONE
) {
871 * We've found an overlap, see if it blocks us
873 if ((lock
->lf_type
== F_WRLCK
|| overlap
->lf_type
== F_WRLCK
))
876 * Nope, point to the next one on the list and
877 * see if it blocks us
879 lf
= overlap
->lf_next
;
888 * Description: Walk the list of locks to find an overlapping lock (if any).
890 * Parameters: lf First lock on lock list
891 * lock The lock we are checking for an overlap
893 * prev pointer to pointer pointer to contain
894 * address of pointer to previous lock
895 * pointer to overlapping lock, if overlap
896 * overlap pointer to pointer to contain address
897 * of overlapping lock
899 * Returns: OVERLAP_NONE
900 * OVERLAP_EQUALS_LOCK
901 * OVERLAP_CONTAINS_LOCK
902 * OVERLAP_CONTAINED_BY_LOCK
903 * OVERLAP_STARTS_BEFORE_LOCK
904 * OVERLAP_ENDS_AFTER_LOCK
907 * *prev The address of the next pointer in the
908 * lock previous to the overlapping lock;
909 * this is generally used to relink the
910 * lock list, avoiding a second iteration.
911 * *overlap The pointer to the overlapping lock
912 * itself; this is ussed to return data in
913 * the check == OTHERS case, and for the
914 * caller to modify the overlapping lock,
915 * in the check == SELF case
917 * Note: This returns only the FIRST overlapping lock. There may be
918 * more than one. lf_getlock will return the first blocking lock,
919 * while lf_setlock will iterate over all overlapping locks to
921 * The check parameter can be SELF, meaning we are looking for
922 * overelapping locks owned by us, or it can be OTHERS, meaning
923 * we are looking for overlapping locks owned by someone else so
924 * we can report a blocking lock on an F_GETLK request.
926 * The value of *overlap and *prev are modified, even if there is
927 * no overlapping lock found; always check the return code.
930 lf_findoverlap(struct lockf
*lf
, struct lockf
*lock
, int type
,
931 struct lockf
***prev
, struct lockf
**overlap
)
938 #ifdef LOCKF_DEBUGGING
940 lf_print("lf_findoverlap: looking for overlap in", lock
);
941 #endif /* LOCKF_DEBUGGING */
942 start
= lock
->lf_start
;
944 while (lf
!= NOLOCKF
) {
945 if (((type
& SELF
) && lf
->lf_id
!= lock
->lf_id
) ||
946 ((type
& OTHERS
) && lf
->lf_id
== lock
->lf_id
)) {
947 *prev
= &lf
->lf_next
;
948 *overlap
= lf
= lf
->lf_next
;
951 #ifdef LOCKF_DEBUGGING
953 lf_print("\tchecking", lf
);
954 #endif /* LOCKF_DEBUGGING */
956 * OK, check for overlap
958 if ((lf
->lf_end
!= -1 && start
> lf
->lf_end
) ||
959 (end
!= -1 && lf
->lf_start
> end
)) {
961 LOCKF_DEBUG(2, "no overlap\n");
962 if ((type
& SELF
) && end
!= -1 && lf
->lf_start
> end
)
963 return (OVERLAP_NONE
);
964 *prev
= &lf
->lf_next
;
965 *overlap
= lf
= lf
->lf_next
;
968 if ((lf
->lf_start
== start
) && (lf
->lf_end
== end
)) {
969 LOCKF_DEBUG(2, "overlap == lock\n");
970 return (OVERLAP_EQUALS_LOCK
);
972 if ((lf
->lf_start
<= start
) &&
974 ((lf
->lf_end
>= end
) || (lf
->lf_end
== -1))) {
975 LOCKF_DEBUG(2, "overlap contains lock\n");
976 return (OVERLAP_CONTAINS_LOCK
);
978 if (start
<= lf
->lf_start
&&
980 (lf
->lf_end
!= -1 && end
>= lf
->lf_end
))) {
981 LOCKF_DEBUG(2, "lock contains overlap\n");
982 return (OVERLAP_CONTAINED_BY_LOCK
);
984 if ((lf
->lf_start
< start
) &&
985 ((lf
->lf_end
>= start
) || (lf
->lf_end
== -1))) {
986 LOCKF_DEBUG(2, "overlap starts before lock\n");
987 return (OVERLAP_STARTS_BEFORE_LOCK
);
989 if ((lf
->lf_start
> start
) &&
991 ((lf
->lf_end
> end
) || (lf
->lf_end
== -1))) {
992 LOCKF_DEBUG(2, "overlap ends after lock\n");
993 return (OVERLAP_ENDS_AFTER_LOCK
);
995 panic("lf_findoverlap: default");
997 return (OVERLAP_NONE
);
1004 * Description: Split a lock and a contained region into two or three locks
1007 * Parameters: lock1 Lock to split
1008 * lock2 Overlapping lock region requiring the
1009 * split (upgrade/downgrade/unlock)
1011 * Returns: 0 Success
1012 * ENOLCK No memory for new lock
1015 * *lock1 Modified original lock
1016 * *lock2 Overlapping lock (inserted into list)
1017 * (new lock) Potential new lock inserted into list
1018 * if split results in 3 locks
1020 * Notes: This operation can only fail if the split would result in three
1021 * locks, and there is insufficient memory to allocate the third
1022 * lock; in that case, neither of the locks will be modified.
1025 lf_split(struct lockf
*lock1
, struct lockf
*lock2
)
1027 struct lockf
*splitlock
;
1029 #ifdef LOCKF_DEBUGGING
1030 if (lockf_debug
& 2) {
1031 lf_print("lf_split", lock1
);
1032 lf_print("splitting from", lock2
);
1034 #endif /* LOCKF_DEBUGGING */
1036 * Check to see if spliting into only two pieces.
1038 if (lock1
->lf_start
== lock2
->lf_start
) {
1039 lock1
->lf_start
= lock2
->lf_end
+ 1;
1040 lock2
->lf_next
= lock1
;
1043 if (lock1
->lf_end
== lock2
->lf_end
) {
1044 lock1
->lf_end
= lock2
->lf_start
- 1;
1045 lock2
->lf_next
= lock1
->lf_next
;
1046 lock1
->lf_next
= lock2
;
1050 * Make a new lock consisting of the last part of
1051 * the encompassing lock
1053 MALLOC(splitlock
, struct lockf
*, sizeof *splitlock
, M_LOCKF
, M_WAITOK
);
1054 if (splitlock
== NULL
)
1056 bcopy(lock1
, splitlock
, sizeof *splitlock
);
1057 splitlock
->lf_start
= lock2
->lf_end
+ 1;
1058 TAILQ_INIT(&splitlock
->lf_blkhd
);
1059 lock1
->lf_end
= lock2
->lf_start
- 1;
1061 * OK, now link it in
1063 splitlock
->lf_next
= lock1
->lf_next
;
1064 lock2
->lf_next
= splitlock
;
1065 lock1
->lf_next
= lock2
;
1074 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1075 * waiting on the lock may now be able to acquire it.
1077 * Parameters: listhead Lock list head on which waiters may
1078 * have pending locks
1082 * Notes: This function iterates a list of locks and wakes all waiters,
1083 * rather than only waiters for the contended regions. Because
1084 * of this, for heavily contended files, this can result in a
1085 * "thundering herd" situation. Refactoring the code could make
1086 * this operation more efficient, if heavy contention ever results
1087 * in a real-world performance problem.
1090 lf_wakelock(struct lockf
*listhead
, boolean_t force_all
)
1092 struct lockf
*wakelock
;
1093 boolean_t wake_all
= TRUE
;
1095 if (force_all
== FALSE
&& (listhead
->lf_flags
& F_WAKE1_SAFE
) && listhead
->lf_waiters
> SAFE_WAITER_LIMIT
)
1098 while (!TAILQ_EMPTY(&listhead
->lf_blkhd
)) {
1099 wakelock
= TAILQ_FIRST(&listhead
->lf_blkhd
);
1100 TAILQ_REMOVE(&listhead
->lf_blkhd
, wakelock
, lf_block
);
1101 listhead
->lf_waiters
--;
1103 wakelock
->lf_next
= NOLOCKF
;
1104 #ifdef LOCKF_DEBUGGING
1105 if (lockf_debug
& 2)
1106 lf_print("lf_wakelock: awakening", wakelock
);
1107 #endif /* LOCKF_DEBUGGING */
1108 if (wake_all
== FALSE
) {
1110 TAILQ_CONCAT(&wakelock
->lf_blkhd
, &listhead
->lf_blkhd
, lf_block
);
1111 wakelock
->lf_waiters
= listhead
->lf_waiters
;
1112 listhead
->lf_waiters
= 0;
1114 if (!TAILQ_EMPTY(&wakelock
->lf_blkhd
)) {
1115 struct lockf
*tlock
;
1117 TAILQ_FOREACH(tlock
, &wakelock
->lf_blkhd
, lf_block
) {
1118 tlock
->lf_next
= wakelock
;
1124 if (wake_all
== FALSE
)
1130 #ifdef LOCKF_DEBUGGING
1134 * Print out a lock; lock information is prefixed by the string in 'tag'
1136 * Parameters: tag A string tag for debugging
1137 * lock The lock whose information should be
1143 lf_print(const char *tag
, struct lockf
*lock
)
1145 printf("%s: lock %p for ", tag
, (void *)lock
);
1146 if (lock
->lf_flags
& F_POSIX
)
1147 printf("proc %ld", (long)((struct proc
*)lock
->lf_id
)->p_pid
);
1149 printf("id %p", (void *)lock
->lf_id
);
1150 if (lock
->lf_vnode
!= 0)
1151 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
1153 lock
->lf_type
== F_RDLCK
? "shared" :
1154 lock
->lf_type
== F_WRLCK
? "exclusive" :
1155 lock
->lf_type
== F_UNLCK
? "unlock" : "unknown",
1156 (intmax_t)lock
->lf_start
, (intmax_t)lock
->lf_end
);
1158 printf(" %s, start 0x%016llx, end 0x%016llx",
1159 lock
->lf_type
== F_RDLCK
? "shared" :
1160 lock
->lf_type
== F_WRLCK
? "exclusive" :
1161 lock
->lf_type
== F_UNLCK
? "unlock" : "unknown",
1162 (intmax_t)lock
->lf_start
, (intmax_t)lock
->lf_end
);
1163 if (!TAILQ_EMPTY(&lock
->lf_blkhd
))
1164 printf(" block %p\n", (void *)TAILQ_FIRST(&lock
->lf_blkhd
));
1171 * lf_printlist DEBUG
1173 * Print out a lock list for the vnode associated with 'lock'; lock information
1174 * is prefixed by the string in 'tag'
1176 * Parameters: tag A string tag for debugging
1177 * lock The lock whose vnode's lock list should
1183 lf_printlist(const char *tag
, struct lockf
*lock
)
1185 struct lockf
*lf
, *blk
;
1187 if (lock
->lf_vnode
== 0)
1190 printf("%s: Lock list for vno %p:\n",
1191 tag
, lock
->lf_vnode
);
1192 for (lf
= lock
->lf_vnode
->v_lockf
; lf
; lf
= lf
->lf_next
) {
1193 printf("\tlock %p for ",(void *)lf
);
1194 if (lf
->lf_flags
& F_POSIX
)
1196 (long)((struct proc
*)lf
->lf_id
)->p_pid
);
1198 printf("id %p", (void *)lf
->lf_id
);
1199 printf(", %s, start 0x%016llx, end 0x%016llx",
1200 lf
->lf_type
== F_RDLCK
? "shared" :
1201 lf
->lf_type
== F_WRLCK
? "exclusive" :
1202 lf
->lf_type
== F_UNLCK
? "unlock" :
1203 "unknown", (intmax_t)lf
->lf_start
, (intmax_t)lf
->lf_end
);
1204 TAILQ_FOREACH(blk
, &lf
->lf_blkhd
, lf_block
) {
1205 printf("\n\t\tlock request %p for ", (void *)blk
);
1206 if (blk
->lf_flags
& F_POSIX
)
1208 (long)((struct proc
*)blk
->lf_id
)->p_pid
);
1210 printf("id %p", (void *)blk
->lf_id
);
1211 printf(", %s, start 0x%016llx, end 0x%016llx",
1212 blk
->lf_type
== F_RDLCK
? "shared" :
1213 blk
->lf_type
== F_WRLCK
? "exclusive" :
1214 blk
->lf_type
== F_UNLCK
? "unlock" :
1215 "unknown", (intmax_t)blk
->lf_start
,
1216 (intmax_t)blk
->lf_end
);
1217 if (!TAILQ_EMPTY(&blk
->lf_blkhd
))
1218 panic("lf_printlist: bad list");
1223 #endif /* LOCKF_DEBUGGING */