]> git.saurik.com Git - apple/xnu.git/blob - bsd/miscfs/specfs/spec_lockf.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / bsd / miscfs / specfs / spec_lockf.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1982, 1986, 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * This code is derived from software contributed to Berkeley by
28 * Scooter Morris at Genentech Inc.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)spec_lockf.c 8.4 (Berkeley) 10/26/94
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/file.h>
65 #include <sys/proc.h>
66 #include <sys/vnode.h>
67 #include <sys/malloc.h>
68 #include <sys/fcntl.h>
69 #include <sys/quota.h>
70
71 #include <miscfs/specfs/lockf.h>
72 #include <miscfs/specfs/specdev.h>
73
74 /*
75 * This variable controls the maximum number of processes that will
76 * be checked in doing deadlock detection.
77 */
78 int spec_maxlockdepth = MAXDEPTH;
79
80 #ifdef LOCKF_DEBUG
81 #include <vm/vm.h>
82 #include <sys/sysctl.h>
83 int lockf_debug = 0;
84 struct ctldebug debug4 = { "lockf_debug", &lockf_debug };
85 #endif
86
87 #define NOLOCKF (struct lockf *)0
88 #define SELF 0x1
89 #define OTHERS 0x2
90
91 /*
92 * Set a byte-range lock.
93 */
94 int
95 spec_lf_setlock(lock)
96 register struct lockf *lock;
97 {
98 register struct lockf *block;
99 struct specinfo *sip = lock->lf_specinfo;
100 struct lockf **prev, *overlap, *ltmp;
101 static char lockstr[] = "lockf";
102 int ovcase, priority, needtolink, error;
103
104 #ifdef LOCKF_DEBUG
105 if (lockf_debug & 1)
106 spec_lf_print("lf_setlock", lock);
107 #endif /* LOCKF_DEBUG */
108
109 /*
110 * Set the priority
111 */
112 priority = PLOCK;
113 if (lock->lf_type == F_WRLCK)
114 priority += 4;
115 priority |= PCATCH;
116 /*
117 * Scan lock list for this file looking for locks that would block us.
118 */
119 while (block = spec_lf_getblock(lock)) {
120 /*
121 * Free the structure and return if nonblocking.
122 */
123 if ((lock->lf_flags & F_WAIT) == 0) {
124 FREE(lock, M_LOCKF);
125 return (EAGAIN);
126 }
127 /*
128 * We are blocked. Since flock style locks cover
129 * the whole file, there is no chance for deadlock.
130 * For byte-range locks we must check for deadlock.
131 *
132 * Deadlock detection is done by looking through the
133 * wait channels to see if there are any cycles that
134 * involve us. MAXDEPTH is set just to make sure we
135 * do not go off into neverland.
136 */
137 if ((lock->lf_flags & F_POSIX) &&
138 (block->lf_flags & F_POSIX)) {
139 register struct proc *wproc;
140 register struct lockf *waitblock;
141 int i = 0;
142
143 /* The block is waiting on something */
144 wproc = (struct proc *)block->lf_id;
145 while (wproc->p_wchan &&
146 (wproc->p_wmesg == lockstr) &&
147 (i++ < spec_maxlockdepth)) {
148 waitblock = (struct lockf *)wproc->p_wchan;
149 /* Get the owner of the blocking lock */
150 waitblock = waitblock->lf_next;
151 if ((waitblock->lf_flags & F_POSIX) == 0)
152 break;
153 wproc = (struct proc *)waitblock->lf_id;
154 if (wproc == (struct proc *)lock->lf_id) {
155 _FREE(lock, M_LOCKF);
156 return (EDEADLK);
157 }
158 }
159 }
160 /*
161 * For flock type locks, we must first remove
162 * any shared locks that we hold before we sleep
163 * waiting for an exclusive lock.
164 */
165 if ((lock->lf_flags & F_FLOCK) &&
166 lock->lf_type == F_WRLCK) {
167 lock->lf_type = F_UNLCK;
168 (void) spec_lf_clearlock(lock);
169 lock->lf_type = F_WRLCK;
170 }
171 /*
172 * Add our lock to the blocked list and sleep until we're free.
173 * Remember who blocked us (for deadlock detection).
174 */
175 lock->lf_next = block;
176 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
177 #ifdef LOCKF_DEBUG
178 if (lockf_debug & 1) {
179 spec_lf_print("lf_setlock: blocking on", block);
180 spec_lf_printlist("lf_setlock", block);
181 }
182 #endif /* LOCKF_DEBUG */
183 if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
184 /*
185 * We may have been awakened by a signal (in
186 * which case we must remove ourselves from the
187 * blocked list) and/or by another process
188 * releasing a lock (in which case we have already
189 * been removed from the blocked list and our
190 * lf_next field set to NOLOCKF).
191 */
192 if (lock->lf_next)
193 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock,
194 lf_block);
195 _FREE(lock, M_LOCKF);
196 return (error);
197 }
198 }
199 /*
200 * No blocks!! Add the lock. Note that we will
201 * downgrade or upgrade any overlapping locks this
202 * process already owns.
203 *
204 * Skip over locks owned by other processes.
205 * Handle any locks that overlap and are owned by ourselves.
206 */
207 prev = &sip->si_lockf;
208 block = sip->si_lockf;
209 needtolink = 1;
210 for (;;) {
211 if (ovcase = spec_lf_findoverlap(block, lock, SELF, &prev, &overlap))
212 block = overlap->lf_next;
213 /*
214 * Six cases:
215 * 0) no overlap
216 * 1) overlap == lock
217 * 2) overlap contains lock
218 * 3) lock contains overlap
219 * 4) overlap starts before lock
220 * 5) overlap ends after lock
221 */
222 switch (ovcase) {
223 case 0: /* no overlap */
224 if (needtolink) {
225 *prev = lock;
226 lock->lf_next = overlap;
227 }
228 break;
229
230 case 1: /* overlap == lock */
231 /*
232 * If downgrading lock, others may be
233 * able to acquire it.
234 */
235 if (lock->lf_type == F_RDLCK &&
236 overlap->lf_type == F_WRLCK)
237 spec_lf_wakelock(overlap);
238 overlap->lf_type = lock->lf_type;
239 FREE(lock, M_LOCKF);
240 lock = overlap; /* for debug output below */
241 break;
242
243 case 2: /* overlap contains lock */
244 /*
245 * Check for common starting point and different types.
246 */
247 if (overlap->lf_type == lock->lf_type) {
248 _FREE(lock, M_LOCKF);
249 lock = overlap; /* for debug output below */
250 break;
251 }
252 if (overlap->lf_start == lock->lf_start) {
253 *prev = lock;
254 lock->lf_next = overlap;
255 overlap->lf_start = lock->lf_end + 1;
256 } else
257 spec_lf_split(overlap, lock);
258 spec_lf_wakelock(overlap);
259 break;
260
261 case 3: /* lock contains overlap */
262 /*
263 * If downgrading lock, others may be able to
264 * acquire it, otherwise take the list.
265 */
266 if (lock->lf_type == F_RDLCK &&
267 overlap->lf_type == F_WRLCK) {
268 spec_lf_wakelock(overlap);
269 } else {
270 while (ltmp = overlap->lf_blkhd.tqh_first) {
271 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
272 lf_block);
273 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
274 ltmp, lf_block);
275 }
276 }
277 /*
278 * Add the new lock if necessary and delete the overlap.
279 */
280 if (needtolink) {
281 *prev = lock;
282 lock->lf_next = overlap->lf_next;
283 prev = &lock->lf_next;
284 needtolink = 0;
285 } else
286 *prev = overlap->lf_next;
287 _FREE(overlap, M_LOCKF);
288 continue;
289
290 case 4: /* overlap starts before lock */
291 /*
292 * Add lock after overlap on the list.
293 */
294 lock->lf_next = overlap->lf_next;
295 overlap->lf_next = lock;
296 overlap->lf_end = lock->lf_start - 1;
297 prev = &lock->lf_next;
298 spec_lf_wakelock(overlap);
299 needtolink = 0;
300 continue;
301
302 case 5: /* overlap ends after lock */
303 /*
304 * Add the new lock before overlap.
305 */
306 if (needtolink) {
307 *prev = lock;
308 lock->lf_next = overlap;
309 }
310 overlap->lf_start = lock->lf_end + 1;
311 spec_lf_wakelock(overlap);
312 break;
313 }
314 break;
315 }
316 #ifdef LOCKF_DEBUG
317 if (lockf_debug & 1) {
318 spec_lf_print("lf_setlock: got the lock", lock);
319 spec_lf_printlist("lf_setlock", lock);
320 }
321 #endif /* LOCKF_DEBUG */
322 return (0);
323 }
324
325 /*
326 * Remove a byte-range lock on an specinfo.
327 *
328 * Generally, find the lock (or an overlap to that lock)
329 * and remove it (or shrink it), then wakeup anyone we can.
330 */
331 int
332 spec_lf_clearlock(unlock)
333 register struct lockf *unlock;
334 {
335 struct specinfo *sip = unlock->lf_specinfo;
336 register struct lockf *lf = sip->si_lockf;
337 struct lockf *overlap, **prev;
338 int ovcase;
339
340 if (lf == NOLOCKF)
341 return (0);
342 #ifdef LOCKF_DEBUG
343 if (unlock->lf_type != F_UNLCK)
344 panic("lf_clearlock: bad type");
345 if (lockf_debug & 1)
346 spec_lf_print("lf_clearlock", unlock);
347 #endif /* LOCKF_DEBUG */
348 prev = &sip->si_lockf;
349 while (ovcase = spec_lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) {
350 /*
351 * Wakeup the list of locks to be retried.
352 */
353 spec_lf_wakelock(overlap);
354
355 switch (ovcase) {
356
357 case 1: /* overlap == lock */
358 *prev = overlap->lf_next;
359 FREE(overlap, M_LOCKF);
360 break;
361
362 case 2: /* overlap contains lock: split it */
363 if (overlap->lf_start == unlock->lf_start) {
364 overlap->lf_start = unlock->lf_end + 1;
365 break;
366 }
367 spec_lf_split(overlap, unlock);
368 overlap->lf_next = unlock->lf_next;
369 break;
370
371 case 3: /* lock contains overlap */
372 *prev = overlap->lf_next;
373 lf = overlap->lf_next;
374 _FREE(overlap, M_LOCKF);
375 continue;
376
377 case 4: /* overlap starts before lock */
378 overlap->lf_end = unlock->lf_start - 1;
379 prev = &overlap->lf_next;
380 lf = overlap->lf_next;
381 continue;
382
383 case 5: /* overlap ends after lock */
384 overlap->lf_start = unlock->lf_end + 1;
385 break;
386 }
387 break;
388 }
389 #ifdef LOCKF_DEBUG
390 if (lockf_debug & 1)
391 spec_lf_printlist("lf_clearlock", unlock);
392 #endif /* LOCKF_DEBUG */
393 return (0);
394 }
395
396 /*
397 * Check whether there is a blocking lock,
398 * and if so return its process identifier.
399 */
400 int
401 spec_lf_getlock(lock, fl)
402 register struct lockf *lock;
403 register struct flock *fl;
404 {
405 register struct lockf *block;
406
407 #ifdef LOCKF_DEBUG
408 if (lockf_debug & 1)
409 spec_lf_print("lf_getlock", lock);
410 #endif /* LOCKF_DEBUG */
411
412 if (block = spec_lf_getblock(lock)) {
413 fl->l_type = block->lf_type;
414 fl->l_whence = SEEK_SET;
415 fl->l_start = block->lf_start;
416 if (block->lf_end == -1)
417 fl->l_len = 0;
418 else
419 fl->l_len = block->lf_end - block->lf_start + 1;
420 if (block->lf_flags & F_POSIX)
421 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
422 else
423 fl->l_pid = -1;
424 } else {
425 fl->l_type = F_UNLCK;
426 }
427 return (0);
428 }
429
430 /*
431 * Walk the list of locks for an specinfo and
432 * return the first blocking lock.
433 */
434 struct lockf *
435 spec_lf_getblock(lock)
436 register struct lockf *lock;
437 {
438 struct lockf **prev, *overlap, *lf = lock->lf_specinfo->si_lockf;
439 int ovcase;
440
441 prev = &lock->lf_specinfo->si_lockf;
442 while (ovcase = spec_lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) {
443 /*
444 * We've found an overlap, see if it blocks us
445 */
446 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
447 return (overlap);
448 /*
449 * Nope, point to the next one on the list and
450 * see if it blocks us
451 */
452 lf = overlap->lf_next;
453 }
454 return (NOLOCKF);
455 }
456
457 /*
458 * Walk the list of locks for an specinfo to
459 * find an overlapping lock (if any).
460 *
461 * NOTE: this returns only the FIRST overlapping lock. There
462 * may be more than one.
463 */
464 int
465 spec_lf_findoverlap(lf, lock, type, prev, overlap)
466 register struct lockf *lf;
467 struct lockf *lock;
468 int type;
469 struct lockf ***prev;
470 struct lockf **overlap;
471 {
472 off_t start, end;
473
474 *overlap = lf;
475 if (lf == NOLOCKF)
476 return (0);
477 #ifdef LOCKF_DEBUG
478 if (lockf_debug & 2)
479 spec_lf_print("lf_findoverlap: looking for overlap in", lock);
480 #endif /* LOCKF_DEBUG */
481 start = lock->lf_start;
482 end = lock->lf_end;
483 while (lf != NOLOCKF) {
484 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
485 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
486 *prev = &lf->lf_next;
487 *overlap = lf = lf->lf_next;
488 continue;
489 }
490 #ifdef LOCKF_DEBUG
491 if (lockf_debug & 2)
492 spec_lf_print("\tchecking", lf);
493 #endif /* LOCKF_DEBUG */
494 /*
495 * OK, check for overlap
496 *
497 * Six cases:
498 * 0) no overlap
499 * 1) overlap == lock
500 * 2) overlap contains lock
501 * 3) lock contains overlap
502 * 4) overlap starts before lock
503 * 5) overlap ends after lock
504 */
505 if ((lf->lf_end != -1 && start > lf->lf_end) ||
506 (end != -1 && lf->lf_start > end)) {
507 /* Case 0 */
508 #ifdef LOCKF_DEBUG
509 if (lockf_debug & 2)
510 printf("no overlap\n");
511 #endif /* LOCKF_DEBUG */
512 if ((type & SELF) && end != -1 && lf->lf_start > end)
513 return (0);
514 *prev = &lf->lf_next;
515 *overlap = lf = lf->lf_next;
516 continue;
517 }
518 if ((lf->lf_start == start) && (lf->lf_end == end)) {
519 /* Case 1 */
520 #ifdef LOCKF_DEBUG
521 if (lockf_debug & 2)
522 printf("overlap == lock\n");
523 #endif /* LOCKF_DEBUG */
524 return (1);
525 }
526 if ((lf->lf_start <= start) &&
527 (end != -1) &&
528 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
529 /* Case 2 */
530 #ifdef LOCKF_DEBUG
531 if (lockf_debug & 2)
532 printf("overlap contains lock\n");
533 #endif /* LOCKF_DEBUG */
534 return (2);
535 }
536 if (start <= lf->lf_start &&
537 (end == -1 ||
538 (lf->lf_end != -1 && end >= lf->lf_end))) {
539 /* Case 3 */
540 #ifdef LOCKF_DEBUG
541 if (lockf_debug & 2)
542 printf("lock contains overlap\n");
543 #endif /* LOCKF_DEBUG */
544 return (3);
545 }
546 if ((lf->lf_start < start) &&
547 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
548 /* Case 4 */
549 #ifdef LOCKF_DEBUG
550 if (lockf_debug & 2)
551 printf("overlap starts before lock\n");
552 #endif /* LOCKF_DEBUG */
553 return (4);
554 }
555 if ((lf->lf_start > start) &&
556 (end != -1) &&
557 ((lf->lf_end > end) || (lf->lf_end == -1))) {
558 /* Case 5 */
559 #ifdef LOCKF_DEBUG
560 if (lockf_debug & 2)
561 printf("overlap ends after lock\n");
562 #endif /* LOCKF_DEBUG */
563 return (5);
564 }
565 panic("lf_findoverlap: default");
566 }
567 return (0);
568 }
569
570 /*
571 * Split a lock and a contained region into
572 * two or three locks as necessary.
573 */
574 void
575 spec_lf_split(lock1, lock2)
576 register struct lockf *lock1;
577 register struct lockf *lock2;
578 {
579 register struct lockf *splitlock;
580
581 #ifdef LOCKF_DEBUG
582 if (lockf_debug & 2) {
583 spec_lf_print("lf_split", lock1);
584 spec_lf_print("splitting from", lock2);
585 }
586 #endif /* LOCKF_DEBUG */
587 /*
588 * Check to see if spliting into only two pieces.
589 */
590 if (lock1->lf_start == lock2->lf_start) {
591 lock1->lf_start = lock2->lf_end + 1;
592 lock2->lf_next = lock1;
593 return;
594 }
595 if (lock1->lf_end == lock2->lf_end) {
596 lock1->lf_end = lock2->lf_start - 1;
597 lock2->lf_next = lock1->lf_next;
598 lock1->lf_next = lock2;
599 return;
600 }
601 /*
602 * Make a new lock consisting of the last part of
603 * the encompassing lock
604 */
605 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
606 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
607 splitlock->lf_start = lock2->lf_end + 1;
608 TAILQ_INIT(&splitlock->lf_blkhd);
609 lock1->lf_end = lock2->lf_start - 1;
610 /*
611 * OK, now link it in
612 */
613 splitlock->lf_next = lock1->lf_next;
614 lock2->lf_next = splitlock;
615 lock1->lf_next = lock2;
616 }
617
618 /*
619 * Wakeup a blocklist
620 */
621 void
622 spec_lf_wakelock(listhead)
623 struct lockf *listhead;
624 {
625 register struct lockf *wakelock;
626
627 while (wakelock = listhead->lf_blkhd.tqh_first) {
628 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
629 wakelock->lf_next = NOLOCKF;
630 #ifdef LOCKF_DEBUG
631 if (lockf_debug & 2)
632 spec_lf_print("lf_wakelock: awakening", wakelock);
633 #endif /* LOCKF_DEBUG */
634 wakeup((caddr_t)wakelock);
635 }
636 }
637
638 #ifdef LOCKF_DEBUG
639 /*
640 * Print out a lock.
641 */
642 spec_lf_print(tag, lock)
643 char *tag;
644 register struct lockf *lock;
645 {
646
647 printf("%s: lock 0x%lx for ", tag, lock);
648 if (lock->lf_flags & F_POSIX)
649 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
650 else
651 printf("id 0x%x", lock->lf_id);
652 printf(" on sip %d rdev <%d, %d>, %s, start %d, end %d",
653 lock->lf_specinfo,
654 major(lock->lf_specinfo->si_rdev),
655 minor(lock->lf_specinfo->si_rdev),
656 lock->lf_type == F_RDLCK ? "shared" :
657 lock->lf_type == F_WRLCK ? "exclusive" :
658 lock->lf_type == F_UNLCK ? "unlock" :
659 "unknown", lock->lf_start, lock->lf_end);
660 if (lock->lf_blkhd.tqh_first)
661 printf(" block 0x%x\n", lock->lf_blkhd.tqh_first);
662 else
663 printf("\n");
664 }
665
666 spec_lf_printlist(tag, lock)
667 char *tag;
668 struct lockf *lock;
669 {
670 register struct lockf *lf, *blk;
671
672 printf("%s: Lock list for sip %d on dev <%d, %d>:\n",
673 tag, lock->lf_specinfo,
674 major(lock->lf_specinfo->si_dev),
675 minor(lock->lf_specinfo->si_dev));
676 for (lf = lock->lf_specinfo->si_lockf; lf; lf = lf->lf_next) {
677 printf("\tlock 0x%lx for ", lf);
678 if (lf->lf_flags & F_POSIX)
679 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
680 else
681 printf("id 0x%x", lf->lf_id);
682 printf(", %s, start %d, end %d",
683 lf->lf_type == F_RDLCK ? "shared" :
684 lf->lf_type == F_WRLCK ? "exclusive" :
685 lf->lf_type == F_UNLCK ? "unlock" :
686 "unknown", lf->lf_start, lf->lf_end);
687 for (blk = lf->lf_blkhd.tqh_first; blk;
688 blk = blk->lf_block.tqe_next) {
689 printf("\n\t\tlock request 0x%lx for ", blk);
690 if (blk->lf_flags & F_POSIX)
691 printf("proc %d",
692 ((struct proc *)(blk->lf_id))->p_pid);
693 else
694 printf("id 0x%x", blk->lf_id);
695 printf(", %s, start %d, end %d",
696 blk->lf_type == F_RDLCK ? "shared" :
697 blk->lf_type == F_WRLCK ? "exclusive" :
698 blk->lf_type == F_UNLCK ? "unlock" :
699 "unknown", blk->lf_start, blk->lf_end);
700 if (blk->lf_blkhd.tqh_first)
701 panic("lf_printlist: bad list");
702 }
703 printf("\n");
704 }
705 }
706 #endif /* LOCKF_DEBUG */