]>
git.saurik.com Git - apple/xnu.git/blob - bsd/miscfs/specfs/spec_lockf.c
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
27 * This code is derived from software contributed to Berkeley by
28 * Scooter Morris at Genentech Inc.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)spec_lockf.c 8.4 (Berkeley) 10/26/94
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
66 #include <sys/vnode.h>
67 #include <sys/malloc.h>
68 #include <sys/fcntl.h>
69 #include <sys/quota.h>
71 #include <miscfs/specfs/lockf.h>
72 #include <miscfs/specfs/specdev.h>
75 * This variable controls the maximum number of processes that will
76 * be checked in doing deadlock detection.
78 int spec_maxlockdepth
= MAXDEPTH
;
82 #include <sys/sysctl.h>
84 struct ctldebug debug4
= { "lockf_debug", &lockf_debug
};
87 #define NOLOCKF (struct lockf *)0
92 * Set a byte-range lock.
96 register struct lockf
*lock
;
98 register struct lockf
*block
;
99 struct specinfo
*sip
= lock
->lf_specinfo
;
100 struct lockf
**prev
, *overlap
, *ltmp
;
101 static char lockstr
[] = "lockf";
102 int ovcase
, priority
, needtolink
, error
;
106 spec_lf_print("lf_setlock", lock
);
107 #endif /* LOCKF_DEBUG */
113 if (lock
->lf_type
== F_WRLCK
)
117 * Scan lock list for this file looking for locks that would block us.
119 while (block
= spec_lf_getblock(lock
)) {
121 * Free the structure and return if nonblocking.
123 if ((lock
->lf_flags
& F_WAIT
) == 0) {
128 * We are blocked. Since flock style locks cover
129 * the whole file, there is no chance for deadlock.
130 * For byte-range locks we must check for deadlock.
132 * Deadlock detection is done by looking through the
133 * wait channels to see if there are any cycles that
134 * involve us. MAXDEPTH is set just to make sure we
135 * do not go off into neverland.
137 if ((lock
->lf_flags
& F_POSIX
) &&
138 (block
->lf_flags
& F_POSIX
)) {
139 register struct proc
*wproc
;
140 register struct lockf
*waitblock
;
143 /* The block is waiting on something */
144 wproc
= (struct proc
*)block
->lf_id
;
145 while (wproc
->p_wchan
&&
146 (wproc
->p_wmesg
== lockstr
) &&
147 (i
++ < spec_maxlockdepth
)) {
148 waitblock
= (struct lockf
*)wproc
->p_wchan
;
149 /* Get the owner of the blocking lock */
150 waitblock
= waitblock
->lf_next
;
151 if ((waitblock
->lf_flags
& F_POSIX
) == 0)
153 wproc
= (struct proc
*)waitblock
->lf_id
;
154 if (wproc
== (struct proc
*)lock
->lf_id
) {
155 _FREE(lock
, M_LOCKF
);
161 * For flock type locks, we must first remove
162 * any shared locks that we hold before we sleep
163 * waiting for an exclusive lock.
165 if ((lock
->lf_flags
& F_FLOCK
) &&
166 lock
->lf_type
== F_WRLCK
) {
167 lock
->lf_type
= F_UNLCK
;
168 (void) spec_lf_clearlock(lock
);
169 lock
->lf_type
= F_WRLCK
;
172 * Add our lock to the blocked list and sleep until we're free.
173 * Remember who blocked us (for deadlock detection).
175 lock
->lf_next
= block
;
176 TAILQ_INSERT_TAIL(&block
->lf_blkhd
, lock
, lf_block
);
178 if (lockf_debug
& 1) {
179 spec_lf_print("lf_setlock: blocking on", block
);
180 spec_lf_printlist("lf_setlock", block
);
182 #endif /* LOCKF_DEBUG */
183 if (error
= tsleep((caddr_t
)lock
, priority
, lockstr
, 0)) {
185 * We may have been awakened by a signal (in
186 * which case we must remove ourselves from the
187 * blocked list) and/or by another process
188 * releasing a lock (in which case we have already
189 * been removed from the blocked list and our
190 * lf_next field set to NOLOCKF).
193 TAILQ_REMOVE(&lock
->lf_next
->lf_blkhd
, lock
,
195 _FREE(lock
, M_LOCKF
);
200 * No blocks!! Add the lock. Note that we will
201 * downgrade or upgrade any overlapping locks this
202 * process already owns.
204 * Skip over locks owned by other processes.
205 * Handle any locks that overlap and are owned by ourselves.
207 prev
= &sip
->si_lockf
;
208 block
= sip
->si_lockf
;
211 if (ovcase
= spec_lf_findoverlap(block
, lock
, SELF
, &prev
, &overlap
))
212 block
= overlap
->lf_next
;
217 * 2) overlap contains lock
218 * 3) lock contains overlap
219 * 4) overlap starts before lock
220 * 5) overlap ends after lock
223 case 0: /* no overlap */
226 lock
->lf_next
= overlap
;
230 case 1: /* overlap == lock */
232 * If downgrading lock, others may be
233 * able to acquire it.
235 if (lock
->lf_type
== F_RDLCK
&&
236 overlap
->lf_type
== F_WRLCK
)
237 spec_lf_wakelock(overlap
);
238 overlap
->lf_type
= lock
->lf_type
;
240 lock
= overlap
; /* for debug output below */
243 case 2: /* overlap contains lock */
245 * Check for common starting point and different types.
247 if (overlap
->lf_type
== lock
->lf_type
) {
248 _FREE(lock
, M_LOCKF
);
249 lock
= overlap
; /* for debug output below */
252 if (overlap
->lf_start
== lock
->lf_start
) {
254 lock
->lf_next
= overlap
;
255 overlap
->lf_start
= lock
->lf_end
+ 1;
257 spec_lf_split(overlap
, lock
);
258 spec_lf_wakelock(overlap
);
261 case 3: /* lock contains overlap */
263 * If downgrading lock, others may be able to
264 * acquire it, otherwise take the list.
266 if (lock
->lf_type
== F_RDLCK
&&
267 overlap
->lf_type
== F_WRLCK
) {
268 spec_lf_wakelock(overlap
);
270 while (ltmp
= overlap
->lf_blkhd
.tqh_first
) {
271 TAILQ_REMOVE(&overlap
->lf_blkhd
, ltmp
,
273 TAILQ_INSERT_TAIL(&lock
->lf_blkhd
,
278 * Add the new lock if necessary and delete the overlap.
282 lock
->lf_next
= overlap
->lf_next
;
283 prev
= &lock
->lf_next
;
286 *prev
= overlap
->lf_next
;
287 _FREE(overlap
, M_LOCKF
);
290 case 4: /* overlap starts before lock */
292 * Add lock after overlap on the list.
294 lock
->lf_next
= overlap
->lf_next
;
295 overlap
->lf_next
= lock
;
296 overlap
->lf_end
= lock
->lf_start
- 1;
297 prev
= &lock
->lf_next
;
298 spec_lf_wakelock(overlap
);
302 case 5: /* overlap ends after lock */
304 * Add the new lock before overlap.
308 lock
->lf_next
= overlap
;
310 overlap
->lf_start
= lock
->lf_end
+ 1;
311 spec_lf_wakelock(overlap
);
317 if (lockf_debug
& 1) {
318 spec_lf_print("lf_setlock: got the lock", lock
);
319 spec_lf_printlist("lf_setlock", lock
);
321 #endif /* LOCKF_DEBUG */
326 * Remove a byte-range lock on an specinfo.
328 * Generally, find the lock (or an overlap to that lock)
329 * and remove it (or shrink it), then wakeup anyone we can.
332 spec_lf_clearlock(unlock
)
333 register struct lockf
*unlock
;
335 struct specinfo
*sip
= unlock
->lf_specinfo
;
336 register struct lockf
*lf
= sip
->si_lockf
;
337 struct lockf
*overlap
, **prev
;
343 if (unlock
->lf_type
!= F_UNLCK
)
344 panic("lf_clearlock: bad type");
346 spec_lf_print("lf_clearlock", unlock
);
347 #endif /* LOCKF_DEBUG */
348 prev
= &sip
->si_lockf
;
349 while (ovcase
= spec_lf_findoverlap(lf
, unlock
, SELF
, &prev
, &overlap
)) {
351 * Wakeup the list of locks to be retried.
353 spec_lf_wakelock(overlap
);
357 case 1: /* overlap == lock */
358 *prev
= overlap
->lf_next
;
359 FREE(overlap
, M_LOCKF
);
362 case 2: /* overlap contains lock: split it */
363 if (overlap
->lf_start
== unlock
->lf_start
) {
364 overlap
->lf_start
= unlock
->lf_end
+ 1;
367 spec_lf_split(overlap
, unlock
);
368 overlap
->lf_next
= unlock
->lf_next
;
371 case 3: /* lock contains overlap */
372 *prev
= overlap
->lf_next
;
373 lf
= overlap
->lf_next
;
374 _FREE(overlap
, M_LOCKF
);
377 case 4: /* overlap starts before lock */
378 overlap
->lf_end
= unlock
->lf_start
- 1;
379 prev
= &overlap
->lf_next
;
380 lf
= overlap
->lf_next
;
383 case 5: /* overlap ends after lock */
384 overlap
->lf_start
= unlock
->lf_end
+ 1;
391 spec_lf_printlist("lf_clearlock", unlock
);
392 #endif /* LOCKF_DEBUG */
397 * Check whether there is a blocking lock,
398 * and if so return its process identifier.
401 spec_lf_getlock(lock
, fl
)
402 register struct lockf
*lock
;
403 register struct flock
*fl
;
405 register struct lockf
*block
;
409 spec_lf_print("lf_getlock", lock
);
410 #endif /* LOCKF_DEBUG */
412 if (block
= spec_lf_getblock(lock
)) {
413 fl
->l_type
= block
->lf_type
;
414 fl
->l_whence
= SEEK_SET
;
415 fl
->l_start
= block
->lf_start
;
416 if (block
->lf_end
== -1)
419 fl
->l_len
= block
->lf_end
- block
->lf_start
+ 1;
420 if (block
->lf_flags
& F_POSIX
)
421 fl
->l_pid
= ((struct proc
*)(block
->lf_id
))->p_pid
;
425 fl
->l_type
= F_UNLCK
;
431 * Walk the list of locks for an specinfo and
432 * return the first blocking lock.
435 spec_lf_getblock(lock
)
436 register struct lockf
*lock
;
438 struct lockf
**prev
, *overlap
, *lf
= lock
->lf_specinfo
->si_lockf
;
441 prev
= &lock
->lf_specinfo
->si_lockf
;
442 while (ovcase
= spec_lf_findoverlap(lf
, lock
, OTHERS
, &prev
, &overlap
)) {
444 * We've found an overlap, see if it blocks us
446 if ((lock
->lf_type
== F_WRLCK
|| overlap
->lf_type
== F_WRLCK
))
449 * Nope, point to the next one on the list and
450 * see if it blocks us
452 lf
= overlap
->lf_next
;
458 * Walk the list of locks for an specinfo to
459 * find an overlapping lock (if any).
461 * NOTE: this returns only the FIRST overlapping lock. There
462 * may be more than one.
465 spec_lf_findoverlap(lf
, lock
, type
, prev
, overlap
)
466 register struct lockf
*lf
;
469 struct lockf
***prev
;
470 struct lockf
**overlap
;
479 spec_lf_print("lf_findoverlap: looking for overlap in", lock
);
480 #endif /* LOCKF_DEBUG */
481 start
= lock
->lf_start
;
483 while (lf
!= NOLOCKF
) {
484 if (((type
& SELF
) && lf
->lf_id
!= lock
->lf_id
) ||
485 ((type
& OTHERS
) && lf
->lf_id
== lock
->lf_id
)) {
486 *prev
= &lf
->lf_next
;
487 *overlap
= lf
= lf
->lf_next
;
492 spec_lf_print("\tchecking", lf
);
493 #endif /* LOCKF_DEBUG */
495 * OK, check for overlap
500 * 2) overlap contains lock
501 * 3) lock contains overlap
502 * 4) overlap starts before lock
503 * 5) overlap ends after lock
505 if ((lf
->lf_end
!= -1 && start
> lf
->lf_end
) ||
506 (end
!= -1 && lf
->lf_start
> end
)) {
510 printf("no overlap\n");
511 #endif /* LOCKF_DEBUG */
512 if ((type
& SELF
) && end
!= -1 && lf
->lf_start
> end
)
514 *prev
= &lf
->lf_next
;
515 *overlap
= lf
= lf
->lf_next
;
518 if ((lf
->lf_start
== start
) && (lf
->lf_end
== end
)) {
522 printf("overlap == lock\n");
523 #endif /* LOCKF_DEBUG */
526 if ((lf
->lf_start
<= start
) &&
528 ((lf
->lf_end
>= end
) || (lf
->lf_end
== -1))) {
532 printf("overlap contains lock\n");
533 #endif /* LOCKF_DEBUG */
536 if (start
<= lf
->lf_start
&&
538 (lf
->lf_end
!= -1 && end
>= lf
->lf_end
))) {
542 printf("lock contains overlap\n");
543 #endif /* LOCKF_DEBUG */
546 if ((lf
->lf_start
< start
) &&
547 ((lf
->lf_end
>= start
) || (lf
->lf_end
== -1))) {
551 printf("overlap starts before lock\n");
552 #endif /* LOCKF_DEBUG */
555 if ((lf
->lf_start
> start
) &&
557 ((lf
->lf_end
> end
) || (lf
->lf_end
== -1))) {
561 printf("overlap ends after lock\n");
562 #endif /* LOCKF_DEBUG */
565 panic("lf_findoverlap: default");
571 * Split a lock and a contained region into
572 * two or three locks as necessary.
575 spec_lf_split(lock1
, lock2
)
576 register struct lockf
*lock1
;
577 register struct lockf
*lock2
;
579 register struct lockf
*splitlock
;
582 if (lockf_debug
& 2) {
583 spec_lf_print("lf_split", lock1
);
584 spec_lf_print("splitting from", lock2
);
586 #endif /* LOCKF_DEBUG */
588 * Check to see if spliting into only two pieces.
590 if (lock1
->lf_start
== lock2
->lf_start
) {
591 lock1
->lf_start
= lock2
->lf_end
+ 1;
592 lock2
->lf_next
= lock1
;
595 if (lock1
->lf_end
== lock2
->lf_end
) {
596 lock1
->lf_end
= lock2
->lf_start
- 1;
597 lock2
->lf_next
= lock1
->lf_next
;
598 lock1
->lf_next
= lock2
;
602 * Make a new lock consisting of the last part of
603 * the encompassing lock
605 MALLOC(splitlock
, struct lockf
*, sizeof *splitlock
, M_LOCKF
, M_WAITOK
);
606 bcopy((caddr_t
)lock1
, (caddr_t
)splitlock
, sizeof *splitlock
);
607 splitlock
->lf_start
= lock2
->lf_end
+ 1;
608 TAILQ_INIT(&splitlock
->lf_blkhd
);
609 lock1
->lf_end
= lock2
->lf_start
- 1;
613 splitlock
->lf_next
= lock1
->lf_next
;
614 lock2
->lf_next
= splitlock
;
615 lock1
->lf_next
= lock2
;
622 spec_lf_wakelock(listhead
)
623 struct lockf
*listhead
;
625 register struct lockf
*wakelock
;
627 while (wakelock
= listhead
->lf_blkhd
.tqh_first
) {
628 TAILQ_REMOVE(&listhead
->lf_blkhd
, wakelock
, lf_block
);
629 wakelock
->lf_next
= NOLOCKF
;
632 spec_lf_print("lf_wakelock: awakening", wakelock
);
633 #endif /* LOCKF_DEBUG */
634 wakeup((caddr_t
)wakelock
);
642 spec_lf_print(tag
, lock
)
644 register struct lockf
*lock
;
647 printf("%s: lock 0x%lx for ", tag
, lock
);
648 if (lock
->lf_flags
& F_POSIX
)
649 printf("proc %d", ((struct proc
*)(lock
->lf_id
))->p_pid
);
651 printf("id 0x%x", lock
->lf_id
);
652 printf(" on sip %d rdev <%d, %d>, %s, start %d, end %d",
654 major(lock
->lf_specinfo
->si_rdev
),
655 minor(lock
->lf_specinfo
->si_rdev
),
656 lock
->lf_type
== F_RDLCK
? "shared" :
657 lock
->lf_type
== F_WRLCK
? "exclusive" :
658 lock
->lf_type
== F_UNLCK
? "unlock" :
659 "unknown", lock
->lf_start
, lock
->lf_end
);
660 if (lock
->lf_blkhd
.tqh_first
)
661 printf(" block 0x%x\n", lock
->lf_blkhd
.tqh_first
);
666 spec_lf_printlist(tag
, lock
)
670 register struct lockf
*lf
, *blk
;
672 printf("%s: Lock list for sip %d on dev <%d, %d>:\n",
673 tag
, lock
->lf_specinfo
,
674 major(lock
->lf_specinfo
->si_dev
),
675 minor(lock
->lf_specinfo
->si_dev
));
676 for (lf
= lock
->lf_specinfo
->si_lockf
; lf
; lf
= lf
->lf_next
) {
677 printf("\tlock 0x%lx for ", lf
);
678 if (lf
->lf_flags
& F_POSIX
)
679 printf("proc %d", ((struct proc
*)(lf
->lf_id
))->p_pid
);
681 printf("id 0x%x", lf
->lf_id
);
682 printf(", %s, start %d, end %d",
683 lf
->lf_type
== F_RDLCK
? "shared" :
684 lf
->lf_type
== F_WRLCK
? "exclusive" :
685 lf
->lf_type
== F_UNLCK
? "unlock" :
686 "unknown", lf
->lf_start
, lf
->lf_end
);
687 for (blk
= lf
->lf_blkhd
.tqh_first
; blk
;
688 blk
= blk
->lf_block
.tqe_next
) {
689 printf("\n\t\tlock request 0x%lx for ", blk
);
690 if (blk
->lf_flags
& F_POSIX
)
692 ((struct proc
*)(blk
->lf_id
))->p_pid
);
694 printf("id 0x%x", blk
->lf_id
);
695 printf(", %s, start %d, end %d",
696 blk
->lf_type
== F_RDLCK
? "shared" :
697 blk
->lf_type
== F_WRLCK
? "exclusive" :
698 blk
->lf_type
== F_UNLCK
? "unlock" :
699 "unknown", blk
->lf_start
, blk
->lf_end
);
700 if (blk
->lf_blkhd
.tqh_first
)
701 panic("lf_printlist: bad list");
706 #endif /* LOCKF_DEBUG */