]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_lockf.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / bsd / kern / kern_lockf.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
1c79356b
A
28/*
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
1c79356b
A
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
91447636 59 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
1c79356b
A
60 */
61
91447636 62#include <sys/cdefs.h>
1c79356b
A
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
91447636
A
66#include <sys/lock.h>
67#include <sys/mount.h>
1c79356b 68#include <sys/proc.h>
2d21ac55 69#include <sys/signalvar.h>
91447636 70#include <sys/unistd.h>
2d21ac55 71#include <sys/user.h>
1c79356b 72#include <sys/vnode.h>
91447636
A
73#include <sys/vnode_internal.h>
74#include <sys/vnode_if.h>
1c79356b
A
75#include <sys/malloc.h>
76#include <sys/fcntl.h>
91447636 77#include <sys/lockf.h>
39236c6e
A
78#include <sys/sdt.h>
79#include <kern/task.h>
1c79356b
A
80
81/*
82 * This variable controls the maximum number of processes that will
83 * be checked in doing deadlock detection.
84 */
91447636 85static int maxlockdepth = MAXDEPTH;
1c79356b 86
2d21ac55 87#ifdef LOCKF_DEBUGGING
1c79356b 88#include <sys/sysctl.h>
91447636
A
89#include <ufs/ufs/quota.h>
90#include <ufs/ufs/inode.h>
2d21ac55
A
91void lf_print(const char *tag, struct lockf *lock);
92void lf_printlist(const char *tag, struct lockf *lock);
91447636 93static int lockf_debug = 2;
6d2010ae 94SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, "");
2d21ac55
A
95
96/*
97 * If there is no mask bit selector, or there is on, and the selector is
98 * set, then output the debugging diagnostic.
99 */
100#define LOCKF_DEBUG(mask, ...) \
101 do { \
102 if( !(mask) || ((mask) & lockf_debug)) { \
103 printf(__VA_ARGS__); \
104 } \
105 } while(0)
106#else /* !LOCKF_DEBUGGING */
107#define LOCKF_DEBUG(mask, ...) /* mask */
108#endif /* !LOCKF_DEBUGGING */
1c79356b 109
91447636
A
110MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
111
1c79356b
A
112#define NOLOCKF (struct lockf *)0
113#define SELF 0x1
114#define OTHERS 0x2
91447636 115#define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
2d21ac55
A
116
117/*
118 * Overlapping lock states
119 */
120typedef enum {
121 OVERLAP_NONE = 0,
122 OVERLAP_EQUALS_LOCK,
123 OVERLAP_CONTAINS_LOCK,
124 OVERLAP_CONTAINED_BY_LOCK,
125 OVERLAP_STARTS_BEFORE_LOCK,
126 OVERLAP_ENDS_AFTER_LOCK
127} overlap_t;
128
91447636 129static int lf_clearlock(struct lockf *);
2d21ac55 130static overlap_t lf_findoverlap(struct lockf *,
91447636 131 struct lockf *, int, struct lockf ***, struct lockf **);
316670eb
A
132static struct lockf *lf_getblock(struct lockf *, pid_t);
133static int lf_getlock(struct lockf *, struct flock *, pid_t);
39236c6e 134static int lf_setlock(struct lockf *, struct timespec *);
2d21ac55 135static int lf_split(struct lockf *, struct lockf *);
c910b4d9 136static void lf_wakelock(struct lockf *, boolean_t);
39236c6e
A
137#if IMPORTANCE_INHERITANCE
138static void lf_hold_assertion(task_t, struct lockf *);
139static void lf_jump_to_queue_head(struct lockf *, struct lockf *);
140static void lf_drop_assertion(struct lockf *);
141#endif /* IMPORTANCE_INHERITANCE */
c910b4d9 142
1c79356b 143/*
2d21ac55
A
144 * lf_advlock
145 *
146 * Description: Advisory record locking support
147 *
148 * Parameters: ap Argument pointer to a vnop_advlock_args
149 * argument descriptor structure for the
150 * lock operation to be attempted.
151 *
152 * Returns: 0 Success
153 * EOVERFLOW
154 * EINVAL
155 * ENOLCK Number of locked regions exceeds limit
156 * lf_setlock:EAGAIN
157 * lf_setlock:EDEADLK
158 * lf_setlock:EINTR
159 * lf_setlock:ENOLCK
39236c6e 160 * lf_setlock:ETIMEDOUT
2d21ac55
A
161 * lf_clearlock:ENOLCK
162 * vnode_size:???
163 *
164 * Notes: We return ENOLCK when we run out of memory to support locks; as
165 * such, there is no specific expectation limit other than the
166 * amount of available resources.
1c79356b
A
167 */
168int
2d21ac55 169lf_advlock(struct vnop_advlock_args *ap)
91447636
A
170{
171 struct vnode *vp = ap->a_vp;
172 struct flock *fl = ap->a_fl;
173 vfs_context_t context = ap->a_context;
174 struct lockf *lock;
175 off_t start, end, oadd;
176 u_quad_t size;
177 int error;
178 struct lockf **head = &vp->v_lockf;
179
180 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
181
182 /*
183 * Avoid the common case of unlocking when inode has no locks.
184 */
185 if (*head == (struct lockf *)0) {
186 if (ap->a_op != F_SETLK) {
187 fl->l_type = F_UNLCK;
2d21ac55 188 LOCKF_DEBUG(0, "lf_advlock: '%s' unlock without lock\n", vfs_context_proc(context)->p_comm);
91447636
A
189 return (0);
190 }
191 }
192
193 /*
194 * Convert the flock structure into a start and end.
195 */
196 switch (fl->l_whence) {
197
198 case SEEK_SET:
199 case SEEK_CUR:
200 /*
201 * Caller is responsible for adding any necessary offset
202 * when SEEK_CUR is used.
203 */
204 start = fl->l_start;
205 break;
206
207 case SEEK_END:
208
2d21ac55
A
209 /*
210 * It's OK to cast the u_quad_t to and off_t here, since they
211 * are the same storage size, and the value of the returned
212 * contents will never overflow into the sign bit. We need to
213 * do this because we will use size to force range checks.
214 */
215 if ((error = vnode_size(vp, (off_t *)&size, context))) {
216 LOCKF_DEBUG(0, "lf_advlock: vnode_getattr failed: %d\n", error);
91447636 217 return (error);
2d21ac55 218 }
91447636
A
219
220 if (size > OFF_MAX ||
2d21ac55
A
221 (fl->l_start > 0 &&
222 size > (u_quad_t)(OFF_MAX - fl->l_start)))
91447636
A
223 return (EOVERFLOW);
224 start = size + fl->l_start;
225 break;
226
227 default:
2d21ac55 228 LOCKF_DEBUG(0, "lf_advlock: unknown whence %d\n", fl->l_whence);
91447636
A
229 return (EINVAL);
230 }
2d21ac55
A
231 if (start < 0) {
232 LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start);
91447636 233 return (EINVAL);
2d21ac55 234 }
91447636 235 if (fl->l_len < 0) {
2d21ac55
A
236 if (start == 0) {
237 LOCKF_DEBUG(0, "lf_advlock: len < 0 & start == 0\n");
91447636 238 return (EINVAL);
2d21ac55 239 }
91447636
A
240 end = start - 1;
241 start += fl->l_len;
2d21ac55
A
242 if (start < 0) {
243 LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start);
91447636 244 return (EINVAL);
2d21ac55 245 }
91447636
A
246 } else if (fl->l_len == 0)
247 end = -1;
248 else {
249 oadd = fl->l_len - 1;
2d21ac55
A
250 if (oadd > (off_t)(OFF_MAX - start)) {
251 LOCKF_DEBUG(0, "lf_advlock: overflow\n");
91447636 252 return (EOVERFLOW);
2d21ac55 253 }
91447636
A
254 end = start + oadd;
255 }
256 /*
257 * Create the lockf structure
258 */
259 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
2d21ac55
A
260 if (lock == NULL)
261 return (ENOLCK);
91447636
A
262 lock->lf_start = start;
263 lock->lf_end = end;
264 lock->lf_id = ap->a_id;
265 lock->lf_vnode = vp;
266 lock->lf_type = fl->l_type;
267 lock->lf_head = head;
268 lock->lf_next = (struct lockf *)0;
269 TAILQ_INIT(&lock->lf_blkhd);
270 lock->lf_flags = ap->a_flags;
39236c6e
A
271#if IMPORTANCE_INHERITANCE
272 lock->lf_boosted = LF_NOT_BOOSTED;
273#endif /* IMPORTANCE_INHERITANCE */
91447636 274
c910b4d9
A
275 if (ap->a_flags & F_FLOCK)
276 lock->lf_flags |= F_WAKE1_SAFE;
277
91447636
A
278 lck_mtx_lock(&vp->v_lock); /* protect the lockf list */
279 /*
280 * Do the requested operation.
281 */
282 switch(ap->a_op) {
283 case F_SETLK:
39236c6e 284 error = lf_setlock(lock, ap->a_timeout);
91447636
A
285 break;
286
287 case F_UNLCK:
288 error = lf_clearlock(lock);
289 FREE(lock, M_LOCKF);
290 break;
291
292 case F_GETLK:
316670eb 293 error = lf_getlock(lock, fl, -1);
91447636
A
294 FREE(lock, M_LOCKF);
295 break;
296
316670eb 297
91447636
A
298 default:
299 FREE(lock, M_LOCKF);
300 error = EINVAL;
301 break;
302 }
6d2010ae 303 lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */
91447636 304
2d21ac55 305 LOCKF_DEBUG(0, "lf_advlock: normal exit: %d\n\n", error);
91447636
A
306 return (error);
307}
308
316670eb
A
309/*
310 * Empty the queue of msleeping requests for a lock on the given vnode.
311 * Called with the vnode already locked. Used for forced unmount, where
312 * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
313 * that prevents the vnode from ever being drained. Force unmounting wins.
314 */
315void
316lf_abort_advlocks(vnode_t vp)
317{
318 struct lockf *lock;
319
320 if ((lock = vp->v_lockf) == NULL)
321 return;
322
323 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
324
325 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
326 struct lockf *tlock;
327
328 TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
329 /*
330 * Setting this flag should cause all
331 * currently blocked F_SETLK request to
332 * return to userland with an errno.
333 */
334 tlock->lf_flags |= F_ABORT;
335 }
336 lf_wakelock(lock, TRUE);
337 }
338}
2d21ac55 339
91447636 340/*
6d2010ae
A
341 * Take any lock attempts which are currently blocked by a given lock ("from")
342 * and mark them as blocked by a different lock ("to"). Used in the case
343 * where a byte range currently occupied by "from" is to be occupied by "to."
344 */
345static void
346lf_move_blocked(struct lockf *to, struct lockf *from)
347{
348 struct lockf *tlock;
349
350 TAILQ_FOREACH(tlock, &from->lf_blkhd, lf_block) {
351 tlock->lf_next = to;
352 }
353
354 TAILQ_CONCAT(&to->lf_blkhd, &from->lf_blkhd, lf_block);
355}
356
357/*
358 * lf_coalesce_adjacent
2d21ac55 359 *
6d2010ae 360 * Description: Helper function: when setting a lock, coalesce adjacent
2d21ac55 361 * locks. Needed because adjacent locks are not overlapping,
6d2010ae 362 * but POSIX requires that they be coalesced.
2d21ac55
A
363 *
364 * Parameters: lock The new lock which may be adjacent
6d2010ae
A
365 * to already locked regions, and which
366 * should therefore be coalesced with them
2d21ac55
A
367 *
368 * Returns: <void>
369 */
370static void
6d2010ae 371lf_coalesce_adjacent(struct lockf *lock)
2d21ac55
A
372{
373 struct lockf **lf = lock->lf_head;
374
375 while (*lf != NOLOCKF) {
6d2010ae 376 /* reject locks that obviously could not be coalesced */
2d21ac55
A
377 if ((*lf == lock) ||
378 ((*lf)->lf_id != lock->lf_id) ||
379 ((*lf)->lf_type != lock->lf_type)) {
380 lf = &(*lf)->lf_next;
381 continue;
382 }
383
6d2010ae
A
384 /*
385 * NOTE: Assumes that if two locks are adjacent on the number line
386 * and belong to the same owner, then they are adjacent on the list.
387 */
2d21ac55
A
388 if ((*lf)->lf_end != -1 &&
389 ((*lf)->lf_end + 1) == lock->lf_start) {
390 struct lockf *adjacent = *lf;
391
6d2010ae 392 LOCKF_DEBUG(0, "lf_coalesce_adjacent: coalesce adjacent previous\n");
2d21ac55
A
393 lock->lf_start = (*lf)->lf_start;
394 *lf = lock;
395 lf = &(*lf)->lf_next;
6d2010ae
A
396
397 lf_move_blocked(lock, adjacent);
398
2d21ac55
A
399 FREE(adjacent, M_LOCKF);
400 continue;
401 }
6d2010ae 402 /* If the lock starts adjacent to us, we can coalesce it */
2d21ac55
A
403 if (lock->lf_end != -1 &&
404 (lock->lf_end + 1) == (*lf)->lf_start) {
405 struct lockf *adjacent = *lf;
406
6d2010ae 407 LOCKF_DEBUG(0, "lf_coalesce_adjacent: coalesce adjacent following\n");
2d21ac55
A
408 lock->lf_end = (*lf)->lf_end;
409 lock->lf_next = (*lf)->lf_next;
410 lf = &lock->lf_next;
6d2010ae
A
411
412 lf_move_blocked(lock, adjacent);
413
2d21ac55
A
414 FREE(adjacent, M_LOCKF);
415 continue;
416 }
417
418 /* no matching conditions; go on to next lock */
419 lf = &(*lf)->lf_next;
420 }
421}
422
423
424/*
425 * lf_setlock
426 *
427 * Description: Set a byte-range lock.
428 *
429 * Parameters: lock The lock structure describing the lock
430 * to be set; allocated by the caller, it
431 * will be linked into the lock list if
432 * the set is successful, and freed if the
433 * set is unsuccessful.
434 *
39236c6e
A
435 * timeout Timeout specified in the case of
436 * SETLKWTIMEOUT.
437 *
2d21ac55
A
438 * Returns: 0 Success
439 * EAGAIN
440 * EDEADLK
441 * lf_split:ENOLCK
442 * lf_clearlock:ENOLCK
443 * msleep:EINTR
39236c6e 444 * msleep:ETIMEDOUT
2d21ac55
A
445 *
446 * Notes: We add the lock to the provisional lock list. We do not
6d2010ae 447 * coalesce at this time; this has implications for other lock
2d21ac55 448 * requestors in the blocker search mechanism.
91447636
A
449 */
450static int
39236c6e 451lf_setlock(struct lockf *lock, struct timespec *timeout)
1c79356b 452{
91447636
A
453 struct lockf *block;
454 struct lockf **head = lock->lf_head;
1c79356b
A
455 struct lockf **prev, *overlap, *ltmp;
456 static char lockstr[] = "lockf";
2d21ac55 457 int priority, needtolink, error;
91447636 458 struct vnode *vp = lock->lf_vnode;
2d21ac55 459 overlap_t ovcase;
39236c6e
A
460#if IMPORTANCE_INHERITANCE
461 task_t boosting_task, block_task;
462#endif /* IMPORTANCE_INHERITANCE */
1c79356b 463
2d21ac55
A
464#ifdef LOCKF_DEBUGGING
465 if (lockf_debug & 1) {
1c79356b 466 lf_print("lf_setlock", lock);
2d21ac55
A
467 lf_printlist("lf_setlock(in)", lock);
468 }
469#endif /* LOCKF_DEBUGGING */
1c79356b
A
470
471 /*
472 * Set the priority
473 */
474 priority = PLOCK;
475 if (lock->lf_type == F_WRLCK)
476 priority += 4;
477 priority |= PCATCH;
478 /*
479 * Scan lock list for this file looking for locks that would block us.
480 */
316670eb 481 while ((block = lf_getblock(lock, -1))) {
1c79356b
A
482 /*
483 * Free the structure and return if nonblocking.
484 */
485 if ((lock->lf_flags & F_WAIT) == 0) {
39236c6e 486 DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
1c79356b
A
487 FREE(lock, M_LOCKF);
488 return (EAGAIN);
489 }
2d21ac55 490
1c79356b
A
491 /*
492 * We are blocked. Since flock style locks cover
493 * the whole file, there is no chance for deadlock.
494 * For byte-range locks we must check for deadlock.
495 *
496 * Deadlock detection is done by looking through the
497 * wait channels to see if there are any cycles that
498 * involve us. MAXDEPTH is set just to make sure we
499 * do not go off into neverland.
500 */
501 if ((lock->lf_flags & F_POSIX) &&
502 (block->lf_flags & F_POSIX)) {
2d21ac55
A
503 struct proc *wproc, *bproc;
504 struct uthread *ut;
91447636 505 struct lockf *waitblock;
1c79356b
A
506 int i = 0;
507
508 /* The block is waiting on something */
509 wproc = (struct proc *)block->lf_id;
2d21ac55
A
510 proc_lock(wproc);
511 TAILQ_FOREACH(ut, &wproc->p_uthlist, uu_list) {
512 /*
513 * While the thread is asleep (uu_wchan != 0)
514 * in this code (uu_wmesg == lockstr)
515 * and we have not exceeded the maximum cycle
516 * depth (i < maxlockdepth), then check for a
517 * cycle to see if the lock is blocked behind
518 * someone blocked behind us.
519 */
520 while (((waitblock = (struct lockf *)ut->uu_wchan) != NULL) &&
521 ut->uu_wmesg == lockstr &&
91447636 522 (i++ < maxlockdepth)) {
2d21ac55
A
523 waitblock = (struct lockf *)ut->uu_wchan;
524 /*
525 * Get the lock blocking the lock
526 * which would block us, and make
527 * certain it hasn't come unblocked
528 * (been granted, e.g. between the time
529 * we called lf_getblock, and the time
530 * we successfully acquired the
531 * proc_lock).
532 */
91447636 533 waitblock = waitblock->lf_next;
2d21ac55
A
534 if (waitblock == NULL)
535 break;
536
537 /*
538 * Make sure it's an advisory range
539 * lock and not an overall file lock;
540 * if we mix lock types, it's our own
541 * fault.
542 */
91447636
A
543 if ((waitblock->lf_flags & F_POSIX) == 0)
544 break;
2d21ac55
A
545
546 /*
547 * If the owner of the lock that's
548 * blocking a lock that's blocking us
549 * getting the requested lock, then we
550 * would deadlock, so error out.
551 */
552 bproc = (struct proc *)waitblock->lf_id;
553 if (bproc == (struct proc *)lock->lf_id) {
554 proc_unlock(wproc);
91447636
A
555 FREE(lock, M_LOCKF);
556 return (EDEADLK);
557 }
1c79356b
A
558 }
559 }
2d21ac55 560 proc_unlock(wproc);
1c79356b 561 }
2d21ac55 562
1c79356b
A
563 /*
564 * For flock type locks, we must first remove
565 * any shared locks that we hold before we sleep
566 * waiting for an exclusive lock.
567 */
568 if ((lock->lf_flags & F_FLOCK) &&
569 lock->lf_type == F_WRLCK) {
570 lock->lf_type = F_UNLCK;
2d21ac55
A
571 if ((error = lf_clearlock(lock)) != 0) {
572 FREE(lock, M_LOCKF);
573 return (error);
574 }
1c79356b
A
575 lock->lf_type = F_WRLCK;
576 }
577 /*
578 * Add our lock to the blocked list and sleep until we're free.
579 * Remember who blocked us (for deadlock detection).
580 */
581 lock->lf_next = block;
582 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
c910b4d9
A
583
584 if ( !(lock->lf_flags & F_FLOCK))
585 block->lf_flags &= ~F_WAKE1_SAFE;
586
2d21ac55 587#ifdef LOCKF_DEBUGGING
1c79356b
A
588 if (lockf_debug & 1) {
589 lf_print("lf_setlock: blocking on", block);
2d21ac55 590 lf_printlist("lf_setlock(block)", block);
1c79356b 591 }
2d21ac55 592#endif /* LOCKF_DEBUGGING */
39236c6e
A
593 DTRACE_FSINFO(advlock__wait, vnode_t, vp);
594#if IMPORTANCE_INHERITANCE
595 /*
596 * Posix type of locks are not inherited by child processes and
597 * it maintains one to one mapping between lock and its owner, while
598 * Flock type of locks are inherited across forks and it does not
599 * maintian any one to one mapping between the lock and the lock
600 * owner. Thus importance donation is done only for Posix type of
601 * locks.
602 */
603 if ((lock->lf_flags & F_POSIX) && (block->lf_flags & F_POSIX)) {
604 block_task = proc_task((proc_t) block->lf_id);
605 boosting_task = proc_task((proc_t) lock->lf_id);
606
607 /* Check if current task can donate importance. The
608 * check of imp_donor bit is done without holding
fe8ab488 609 * any lock. The value may change after you read it,
39236c6e
A
610 * but it is ok to boost a task while someone else is
611 * unboosting you.
fe8ab488
A
612 *
613 * TODO: Support live inheritance on file locks.
39236c6e
A
614 */
615 if (task_is_importance_donor(boosting_task)) {
616 if (block->lf_boosted != LF_BOOSTED &&
fe8ab488 617 task_is_importance_receiver_type(block_task)) {
39236c6e
A
618 lf_hold_assertion(block_task, block);
619 }
620 lf_jump_to_queue_head(block, lock);
621 }
622 }
623#endif /* IMPORTANCE_INHERITANCE */
624 error = msleep(lock, &vp->v_lock, priority, lockstr, timeout);
c910b4d9 625
316670eb
A
626 if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
627 error = EBADF;
628
99c3a104 629 if (lock->lf_next) {
1c79356b 630 /*
99c3a104
A
631 * lf_wakelock() always sets wakelock->lf_next to
632 * NULL before a wakeup; so we've been woken early
633 * - perhaps by a debugger, signal or other event.
634 *
635 * Remove 'lock' from the block list (avoids double-add
636 * in the spurious case, which would create a cycle)
1c79356b 637 */
99c3a104
A
638 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
639 lock->lf_next = NULL;
640
641 if (error == 0) {
642 /*
643 * If this was a spurious wakeup, retry
644 */
645 printf("%s: spurious wakeup, retrying lock\n",
646 __func__);
647 continue;
91447636 648 }
99c3a104
A
649 }
650
651 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
652 if ((block = lf_getblock(lock, -1)) != NULL)
653 lf_move_blocked(block, lock);
654 }
655
656 if (error) {
c910b4d9
A
657 if (!TAILQ_EMPTY(&lock->lf_blkhd))
658 lf_wakelock(lock, TRUE);
91447636 659 FREE(lock, M_LOCKF);
39236c6e
A
660 /* Return ETIMEDOUT if timeout occoured. */
661 if (error == EWOULDBLOCK) {
662 error = ETIMEDOUT;
663 }
1c79356b 664 return (error);
99c3a104 665 }
1c79356b 666 }
99c3a104 667
1c79356b
A
668 /*
669 * No blocks!! Add the lock. Note that we will
670 * downgrade or upgrade any overlapping locks this
671 * process already owns.
672 *
673 * Skip over locks owned by other processes.
674 * Handle any locks that overlap and are owned by ourselves.
675 */
91447636
A
676 prev = head;
677 block = *head;
1c79356b
A
678 needtolink = 1;
679 for (;;) {
91447636
A
680 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
681 if (ovcase)
1c79356b
A
682 block = overlap->lf_next;
683 /*
684 * Six cases:
685 * 0) no overlap
686 * 1) overlap == lock
687 * 2) overlap contains lock
688 * 3) lock contains overlap
689 * 4) overlap starts before lock
690 * 5) overlap ends after lock
691 */
692 switch (ovcase) {
2d21ac55 693 case OVERLAP_NONE:
1c79356b
A
694 if (needtolink) {
695 *prev = lock;
696 lock->lf_next = overlap;
697 }
698 break;
699
2d21ac55 700 case OVERLAP_EQUALS_LOCK:
1c79356b
A
701 /*
702 * If downgrading lock, others may be
703 * able to acquire it.
704 */
705 if (lock->lf_type == F_RDLCK &&
706 overlap->lf_type == F_WRLCK)
c910b4d9 707 lf_wakelock(overlap, TRUE);
1c79356b
A
708 overlap->lf_type = lock->lf_type;
709 FREE(lock, M_LOCKF);
6d2010ae 710 lock = overlap; /* for lf_coalesce_adjacent() */
1c79356b
A
711 break;
712
2d21ac55 713 case OVERLAP_CONTAINS_LOCK:
1c79356b
A
714 /*
715 * Check for common starting point and different types.
716 */
717 if (overlap->lf_type == lock->lf_type) {
91447636 718 FREE(lock, M_LOCKF);
6d2010ae 719 lock = overlap; /* for lf_coalesce_adjacent() */
1c79356b
A
720 break;
721 }
722 if (overlap->lf_start == lock->lf_start) {
723 *prev = lock;
724 lock->lf_next = overlap;
725 overlap->lf_start = lock->lf_end + 1;
2d21ac55
A
726 } else {
727 /*
728 * If we can't split the lock, we can't
729 * grant it. Claim a system limit for the
730 * resource shortage.
731 */
732 if (lf_split(overlap, lock)) {
733 FREE(lock, M_LOCKF);
734 return (ENOLCK);
735 }
736 }
c910b4d9 737 lf_wakelock(overlap, TRUE);
1c79356b
A
738 break;
739
2d21ac55 740 case OVERLAP_CONTAINED_BY_LOCK:
1c79356b
A
741 /*
742 * If downgrading lock, others may be able to
743 * acquire it, otherwise take the list.
744 */
745 if (lock->lf_type == F_RDLCK &&
746 overlap->lf_type == F_WRLCK) {
c910b4d9 747 lf_wakelock(overlap, TRUE);
1c79356b 748 } else {
91447636
A
749 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
750 ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
1c79356b
A
751 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
752 lf_block);
753 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
754 ltmp, lf_block);
91447636 755 ltmp->lf_next = lock;
1c79356b
A
756 }
757 }
758 /*
759 * Add the new lock if necessary and delete the overlap.
760 */
761 if (needtolink) {
762 *prev = lock;
763 lock->lf_next = overlap->lf_next;
764 prev = &lock->lf_next;
765 needtolink = 0;
766 } else
767 *prev = overlap->lf_next;
91447636 768 FREE(overlap, M_LOCKF);
1c79356b
A
769 continue;
770
2d21ac55 771 case OVERLAP_STARTS_BEFORE_LOCK:
1c79356b
A
772 /*
773 * Add lock after overlap on the list.
774 */
775 lock->lf_next = overlap->lf_next;
776 overlap->lf_next = lock;
777 overlap->lf_end = lock->lf_start - 1;
778 prev = &lock->lf_next;
c910b4d9 779 lf_wakelock(overlap, TRUE);
1c79356b
A
780 needtolink = 0;
781 continue;
782
2d21ac55 783 case OVERLAP_ENDS_AFTER_LOCK:
1c79356b
A
784 /*
785 * Add the new lock before overlap.
786 */
787 if (needtolink) {
788 *prev = lock;
789 lock->lf_next = overlap;
790 }
791 overlap->lf_start = lock->lf_end + 1;
c910b4d9 792 lf_wakelock(overlap, TRUE);
1c79356b
A
793 break;
794 }
795 break;
796 }
6d2010ae
A
797 /* Coalesce adjacent locks with identical attributes */
798 lf_coalesce_adjacent(lock);
2d21ac55 799#ifdef LOCKF_DEBUGGING
1c79356b
A
800 if (lockf_debug & 1) {
801 lf_print("lf_setlock: got the lock", lock);
2d21ac55 802 lf_printlist("lf_setlock(out)", lock);
1c79356b 803 }
2d21ac55 804#endif /* LOCKF_DEBUGGING */
1c79356b
A
805 return (0);
806}
807
2d21ac55 808
1c79356b 809/*
2d21ac55
A
810 * lf_clearlock
811 *
812 * Description: Remove a byte-range lock on an vnode. Generally, find the
813 * lock (or an overlap to that lock) and remove it (or shrink
814 * it), then wakeup anyone we can.
815 *
816 * Parameters: unlock The lock to clear
1c79356b 817 *
2d21ac55
A
818 * Returns: 0 Success
819 * lf_split:ENOLCK
820 *
821 * Notes: A caller may unlock all the locks owned by the caller by
822 * specifying the entire file range; locks owned by other
823 * callers are not effected by this operation.
1c79356b 824 */
91447636 825static int
2d21ac55 826lf_clearlock(struct lockf *unlock)
1c79356b 827{
91447636
A
828 struct lockf **head = unlock->lf_head;
829 struct lockf *lf = *head;
1c79356b 830 struct lockf *overlap, **prev;
2d21ac55 831 overlap_t ovcase;
1c79356b
A
832
833 if (lf == NOLOCKF)
834 return (0);
2d21ac55 835#ifdef LOCKF_DEBUGGING
1c79356b
A
836 if (unlock->lf_type != F_UNLCK)
837 panic("lf_clearlock: bad type");
838 if (lockf_debug & 1)
839 lf_print("lf_clearlock", unlock);
2d21ac55 840#endif /* LOCKF_DEBUGGING */
91447636 841 prev = head;
2d21ac55 842 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) {
1c79356b
A
843 /*
844 * Wakeup the list of locks to be retried.
845 */
c910b4d9 846 lf_wakelock(overlap, FALSE);
39236c6e
A
847#if IMPORTANCE_INHERITANCE
848 if (overlap->lf_boosted == LF_BOOSTED) {
849 lf_drop_assertion(overlap);
850 }
851#endif /* IMPORTANCE_INHERITANCE */
1c79356b
A
852
853 switch (ovcase) {
2d21ac55
A
854 case OVERLAP_NONE: /* satisfy compiler enum/switch */
855 break;
1c79356b 856
2d21ac55 857 case OVERLAP_EQUALS_LOCK:
1c79356b
A
858 *prev = overlap->lf_next;
859 FREE(overlap, M_LOCKF);
860 break;
861
2d21ac55 862 case OVERLAP_CONTAINS_LOCK: /* split it */
1c79356b
A
863 if (overlap->lf_start == unlock->lf_start) {
864 overlap->lf_start = unlock->lf_end + 1;
865 break;
866 }
2d21ac55
A
867 /*
868 * If we can't split the lock, we can't grant it.
869 * Claim a system limit for the resource shortage.
870 */
871 if (lf_split(overlap, unlock))
872 return (ENOLCK);
1c79356b
A
873 overlap->lf_next = unlock->lf_next;
874 break;
875
2d21ac55 876 case OVERLAP_CONTAINED_BY_LOCK:
1c79356b
A
877 *prev = overlap->lf_next;
878 lf = overlap->lf_next;
91447636 879 FREE(overlap, M_LOCKF);
1c79356b
A
880 continue;
881
2d21ac55 882 case OVERLAP_STARTS_BEFORE_LOCK:
1c79356b
A
883 overlap->lf_end = unlock->lf_start - 1;
884 prev = &overlap->lf_next;
885 lf = overlap->lf_next;
886 continue;
887
2d21ac55 888 case OVERLAP_ENDS_AFTER_LOCK:
1c79356b
A
889 overlap->lf_start = unlock->lf_end + 1;
890 break;
891 }
892 break;
893 }
2d21ac55 894#ifdef LOCKF_DEBUGGING
1c79356b
A
895 if (lockf_debug & 1)
896 lf_printlist("lf_clearlock", unlock);
2d21ac55 897#endif /* LOCKF_DEBUGGING */
1c79356b
A
898 return (0);
899}
900
2d21ac55 901
1c79356b 902/*
2d21ac55
A
903 * lf_getlock
904 *
905 * Description: Check whether there is a blocking lock, and if so return
906 * its process identifier into the lock being requested.
907 *
908 * Parameters: lock Pointer to lock to test for blocks
909 * fl Pointer to flock structure to receive
910 * the blocking lock information, if a
911 * blocking lock is found.
316670eb 912 * matchpid -1, or pid value to match in lookup.
2d21ac55
A
913 *
914 * Returns: 0 Success
915 *
916 * Implicit Returns:
917 * *fl Contents modified to reflect the
918 * blocking lock, if one is found; not
919 * modified otherwise
920 *
921 * Notes: fl->l_pid will be (-1) for file locks and will only be set to
922 * the blocking process ID for advisory record locks.
1c79356b 923 */
91447636 924static int
316670eb 925lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid)
1c79356b 926{
91447636 927 struct lockf *block;
1c79356b 928
2d21ac55 929#ifdef LOCKF_DEBUGGING
1c79356b
A
930 if (lockf_debug & 1)
931 lf_print("lf_getlock", lock);
2d21ac55 932#endif /* LOCKF_DEBUGGING */
1c79356b 933
316670eb 934 if ((block = lf_getblock(lock, matchpid))) {
1c79356b
A
935 fl->l_type = block->lf_type;
936 fl->l_whence = SEEK_SET;
937 fl->l_start = block->lf_start;
938 if (block->lf_end == -1)
939 fl->l_len = 0;
940 else
941 fl->l_len = block->lf_end - block->lf_start + 1;
942 if (block->lf_flags & F_POSIX)
91447636 943 fl->l_pid = proc_pid((struct proc *)(block->lf_id));
1c79356b
A
944 else
945 fl->l_pid = -1;
946 } else {
947 fl->l_type = F_UNLCK;
948 }
949 return (0);
950}
951
952/*
2d21ac55
A
953 * lf_getblock
954 *
955 * Description: Walk the list of locks for an inode and return the first
956 * blocking lock. A lock is considered blocking if we are not
957 * the lock owner; otherwise, we are permitted to upgrade or
958 * downgrade it, and it's not considered blocking.
959 *
960 * Parameters: lock The lock for which we are interested
961 * in obtaining the blocking lock, if any
316670eb 962 * matchpid -1, or pid value to match in lookup.
2d21ac55
A
963 *
964 * Returns: NOLOCKF No blocking lock exists
965 * !NOLOCKF The address of the blocking lock's
966 * struct lockf.
1c79356b 967 */
91447636 968static struct lockf *
316670eb 969lf_getblock(struct lockf *lock, pid_t matchpid)
1c79356b 970{
91447636 971 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
1c79356b 972
316670eb
A
973 for (prev = lock->lf_head;
974 lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != OVERLAP_NONE;
975 lf = overlap->lf_next) {
1c79356b 976 /*
316670eb
A
977 * Found an overlap.
978 *
979 * If we're matching pids, and it's a record lock,
980 * but the pid doesn't match, then keep on looking ..
1c79356b 981 */
316670eb
A
982 if (matchpid != -1 &&
983 (overlap->lf_flags & F_POSIX) != 0 &&
984 proc_pid((struct proc *)(overlap->lf_id)) != matchpid)
985 continue;
1c79356b 986 /*
316670eb 987 * does it block us?
1c79356b 988 */
316670eb
A
989 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
990 return (overlap);
1c79356b
A
991 }
992 return (NOLOCKF);
993}
994
2d21ac55 995
1c79356b 996/*
2d21ac55
A
997 * lf_findoverlap
998 *
999 * Description: Walk the list of locks to find an overlapping lock (if any).
1000 *
1001 * Parameters: lf First lock on lock list
1002 * lock The lock we are checking for an overlap
1003 * check Check type
1004 * prev pointer to pointer pointer to contain
1005 * address of pointer to previous lock
1006 * pointer to overlapping lock, if overlap
1007 * overlap pointer to pointer to contain address
1008 * of overlapping lock
1009 *
1010 * Returns: OVERLAP_NONE
1011 * OVERLAP_EQUALS_LOCK
1012 * OVERLAP_CONTAINS_LOCK
1013 * OVERLAP_CONTAINED_BY_LOCK
1014 * OVERLAP_STARTS_BEFORE_LOCK
1015 * OVERLAP_ENDS_AFTER_LOCK
1c79356b 1016 *
2d21ac55
A
1017 * Implicit Returns:
1018 * *prev The address of the next pointer in the
1019 * lock previous to the overlapping lock;
1020 * this is generally used to relink the
1021 * lock list, avoiding a second iteration.
1022 * *overlap The pointer to the overlapping lock
316670eb 1023 * itself; this is used to return data in
2d21ac55
A
1024 * the check == OTHERS case, and for the
1025 * caller to modify the overlapping lock,
1026 * in the check == SELF case
1027 *
1028 * Note: This returns only the FIRST overlapping lock. There may be
1029 * more than one. lf_getlock will return the first blocking lock,
1030 * while lf_setlock will iterate over all overlapping locks to
1031 *
1032 * The check parameter can be SELF, meaning we are looking for
6d2010ae 1033 * overlapping locks owned by us, or it can be OTHERS, meaning
2d21ac55
A
1034 * we are looking for overlapping locks owned by someone else so
1035 * we can report a blocking lock on an F_GETLK request.
1036 *
1037 * The value of *overlap and *prev are modified, even if there is
1038 * no overlapping lock found; always check the return code.
1c79356b 1039 */
2d21ac55
A
1040static overlap_t
1041lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
1042 struct lockf ***prev, struct lockf **overlap)
1c79356b
A
1043{
1044 off_t start, end;
6d2010ae 1045 int found_self = 0;
1c79356b
A
1046
1047 *overlap = lf;
1048 if (lf == NOLOCKF)
1049 return (0);
2d21ac55 1050#ifdef LOCKF_DEBUGGING
1c79356b
A
1051 if (lockf_debug & 2)
1052 lf_print("lf_findoverlap: looking for overlap in", lock);
2d21ac55 1053#endif /* LOCKF_DEBUGGING */
1c79356b
A
1054 start = lock->lf_start;
1055 end = lock->lf_end;
1056 while (lf != NOLOCKF) {
1057 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
1058 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
6d2010ae
A
1059 /*
1060 * Locks belonging to one process are adjacent on the
1061 * list, so if we've found any locks belonging to us,
1062 * and we're now seeing something else, then we've
1063 * examined all "self" locks. Note that bailing out
1064 * here is quite important; for coalescing, we assume
1065 * numerically adjacent locks from the same owner to
1066 * be adjacent on the list.
1067 */
1068 if ((type & SELF) && found_self) {
1069 return OVERLAP_NONE;
1070 }
1071
1c79356b
A
1072 *prev = &lf->lf_next;
1073 *overlap = lf = lf->lf_next;
1074 continue;
1075 }
6d2010ae
A
1076
1077 if ((type & SELF)) {
1078 found_self = 1;
1079 }
1080
2d21ac55 1081#ifdef LOCKF_DEBUGGING
1c79356b
A
1082 if (lockf_debug & 2)
1083 lf_print("\tchecking", lf);
2d21ac55 1084#endif /* LOCKF_DEBUGGING */
1c79356b
A
1085 /*
1086 * OK, check for overlap
1c79356b
A
1087 */
1088 if ((lf->lf_end != -1 && start > lf->lf_end) ||
1089 (end != -1 && lf->lf_start > end)) {
1090 /* Case 0 */
2d21ac55 1091 LOCKF_DEBUG(2, "no overlap\n");
6d2010ae
A
1092
1093 /*
1094 * NOTE: assumes that locks for the same process are
1095 * nonintersecting and ordered.
1096 */
1c79356b 1097 if ((type & SELF) && end != -1 && lf->lf_start > end)
2d21ac55 1098 return (OVERLAP_NONE);
1c79356b
A
1099 *prev = &lf->lf_next;
1100 *overlap = lf = lf->lf_next;
1101 continue;
1102 }
1103 if ((lf->lf_start == start) && (lf->lf_end == end)) {
2d21ac55
A
1104 LOCKF_DEBUG(2, "overlap == lock\n");
1105 return (OVERLAP_EQUALS_LOCK);
1c79356b
A
1106 }
1107 if ((lf->lf_start <= start) &&
1108 (end != -1) &&
1109 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
2d21ac55
A
1110 LOCKF_DEBUG(2, "overlap contains lock\n");
1111 return (OVERLAP_CONTAINS_LOCK);
1c79356b
A
1112 }
1113 if (start <= lf->lf_start &&
1114 (end == -1 ||
1115 (lf->lf_end != -1 && end >= lf->lf_end))) {
2d21ac55
A
1116 LOCKF_DEBUG(2, "lock contains overlap\n");
1117 return (OVERLAP_CONTAINED_BY_LOCK);
1c79356b
A
1118 }
1119 if ((lf->lf_start < start) &&
1120 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
2d21ac55
A
1121 LOCKF_DEBUG(2, "overlap starts before lock\n");
1122 return (OVERLAP_STARTS_BEFORE_LOCK);
1c79356b
A
1123 }
1124 if ((lf->lf_start > start) &&
1125 (end != -1) &&
1126 ((lf->lf_end > end) || (lf->lf_end == -1))) {
2d21ac55
A
1127 LOCKF_DEBUG(2, "overlap ends after lock\n");
1128 return (OVERLAP_ENDS_AFTER_LOCK);
1c79356b
A
1129 }
1130 panic("lf_findoverlap: default");
1131 }
2d21ac55 1132 return (OVERLAP_NONE);
1c79356b
A
1133}
1134
2d21ac55 1135
1c79356b 1136/*
2d21ac55
A
1137 * lf_split
1138 *
1139 * Description: Split a lock and a contained region into two or three locks
1140 * as necessary.
1141 *
1142 * Parameters: lock1 Lock to split
1143 * lock2 Overlapping lock region requiring the
1144 * split (upgrade/downgrade/unlock)
1145 *
1146 * Returns: 0 Success
1147 * ENOLCK No memory for new lock
1148 *
1149 * Implicit Returns:
1150 * *lock1 Modified original lock
1151 * *lock2 Overlapping lock (inserted into list)
1152 * (new lock) Potential new lock inserted into list
1153 * if split results in 3 locks
1154 *
1155 * Notes: This operation can only fail if the split would result in three
1156 * locks, and there is insufficient memory to allocate the third
1157 * lock; in that case, neither of the locks will be modified.
1c79356b 1158 */
2d21ac55
A
1159static int
1160lf_split(struct lockf *lock1, struct lockf *lock2)
1c79356b 1161{
91447636 1162 struct lockf *splitlock;
1c79356b 1163
2d21ac55 1164#ifdef LOCKF_DEBUGGING
1c79356b
A
1165 if (lockf_debug & 2) {
1166 lf_print("lf_split", lock1);
1167 lf_print("splitting from", lock2);
1168 }
2d21ac55 1169#endif /* LOCKF_DEBUGGING */
1c79356b
A
1170 /*
1171 * Check to see if spliting into only two pieces.
1172 */
1173 if (lock1->lf_start == lock2->lf_start) {
1174 lock1->lf_start = lock2->lf_end + 1;
1175 lock2->lf_next = lock1;
2d21ac55 1176 return (0);
1c79356b
A
1177 }
1178 if (lock1->lf_end == lock2->lf_end) {
1179 lock1->lf_end = lock2->lf_start - 1;
1180 lock2->lf_next = lock1->lf_next;
1181 lock1->lf_next = lock2;
2d21ac55 1182 return (0);
1c79356b
A
1183 }
1184 /*
1185 * Make a new lock consisting of the last part of
1186 * the encompassing lock
1187 */
1188 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
2d21ac55
A
1189 if (splitlock == NULL)
1190 return (ENOLCK);
91447636 1191 bcopy(lock1, splitlock, sizeof *splitlock);
1c79356b
A
1192 splitlock->lf_start = lock2->lf_end + 1;
1193 TAILQ_INIT(&splitlock->lf_blkhd);
1194 lock1->lf_end = lock2->lf_start - 1;
1195 /*
1196 * OK, now link it in
1197 */
1198 splitlock->lf_next = lock1->lf_next;
1199 lock2->lf_next = splitlock;
1200 lock1->lf_next = lock2;
2d21ac55
A
1201
1202 return (0);
1c79356b
A
1203}
1204
2d21ac55 1205
1c79356b 1206/*
2d21ac55
A
1207 * lf_wakelock
1208 *
1209 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1210 * waiting on the lock may now be able to acquire it.
1211 *
1212 * Parameters: listhead Lock list head on which waiters may
1213 * have pending locks
1214 *
1215 * Returns: <void>
1216 *
1217 * Notes: This function iterates a list of locks and wakes all waiters,
1218 * rather than only waiters for the contended regions. Because
1219 * of this, for heavily contended files, this can result in a
1220 * "thundering herd" situation. Refactoring the code could make
1221 * this operation more efficient, if heavy contention ever results
1222 * in a real-world performance problem.
1c79356b 1223 */
91447636 1224static void
c910b4d9 1225lf_wakelock(struct lockf *listhead, boolean_t force_all)
1c79356b 1226{
91447636 1227 struct lockf *wakelock;
c910b4d9
A
1228 boolean_t wake_all = TRUE;
1229
b0d623f7 1230 if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE))
c910b4d9 1231 wake_all = FALSE;
1c79356b 1232
91447636
A
1233 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1234 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
1c79356b 1235 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
c910b4d9 1236
1c79356b 1237 wakelock->lf_next = NOLOCKF;
2d21ac55 1238#ifdef LOCKF_DEBUGGING
1c79356b
A
1239 if (lockf_debug & 2)
1240 lf_print("lf_wakelock: awakening", wakelock);
2d21ac55 1241#endif /* LOCKF_DEBUGGING */
c910b4d9 1242 if (wake_all == FALSE) {
b0d623f7
A
1243 /*
1244 * If there are items on the list head block list,
1245 * move them to the wakelock list instead, and then
1246 * correct their lf_next pointers.
1247 */
1248 if (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1249 TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block);
c910b4d9 1250
c910b4d9
A
1251 struct lockf *tlock;
1252
1253 TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
99c3a104
A
1254 if (TAILQ_NEXT(tlock, lf_block) == tlock) {
1255 /* See rdar://10887303 */
1256 panic("cycle in wakelock list");
1257 }
c910b4d9
A
1258 tlock->lf_next = wakelock;
1259 }
1260 }
1261 }
91447636 1262 wakeup(wakelock);
c910b4d9
A
1263
1264 if (wake_all == FALSE)
1265 break;
1c79356b
A
1266 }
1267}
1268
2d21ac55
A
1269
1270#ifdef LOCKF_DEBUGGING
1c79356b 1271/*
2d21ac55
A
1272 * lf_print DEBUG
1273 *
1274 * Print out a lock; lock information is prefixed by the string in 'tag'
1275 *
1276 * Parameters: tag A string tag for debugging
1277 * lock The lock whose information should be
1278 * displayed
1279 *
1280 * Returns: <void>
1c79356b 1281 */
91447636 1282void
2d21ac55 1283lf_print(const char *tag, struct lockf *lock)
1c79356b 1284{
91447636 1285 printf("%s: lock %p for ", tag, (void *)lock);
1c79356b 1286 if (lock->lf_flags & F_POSIX)
91447636
A
1287 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
1288 else
1289 printf("id %p", (void *)lock->lf_id);
1290 if (lock->lf_vnode != 0)
2d21ac55 1291 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
91447636
A
1292 lock->lf_vnode,
1293 lock->lf_type == F_RDLCK ? "shared" :
1294 lock->lf_type == F_WRLCK ? "exclusive" :
1295 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1296 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1c79356b 1297 else
2d21ac55 1298 printf(" %s, start 0x%016llx, end 0x%016llx",
91447636
A
1299 lock->lf_type == F_RDLCK ? "shared" :
1300 lock->lf_type == F_WRLCK ? "exclusive" :
1301 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1302 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1303 if (!TAILQ_EMPTY(&lock->lf_blkhd))
1304 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1c79356b
A
1305 else
1306 printf("\n");
1307}
1308
2d21ac55
A
1309
1310/*
1311 * lf_printlist DEBUG
1312 *
1313 * Print out a lock list for the vnode associated with 'lock'; lock information
1314 * is prefixed by the string in 'tag'
1315 *
1316 * Parameters: tag A string tag for debugging
1317 * lock The lock whose vnode's lock list should
1318 * be displayed
1319 *
1320 * Returns: <void>
1321 */
91447636 1322void
2d21ac55 1323lf_printlist(const char *tag, struct lockf *lock)
1c79356b 1324{
91447636
A
1325 struct lockf *lf, *blk;
1326
1327 if (lock->lf_vnode == 0)
1328 return;
1329
2d21ac55 1330 printf("%s: Lock list for vno %p:\n",
91447636
A
1331 tag, lock->lf_vnode);
1332 for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
1333 printf("\tlock %p for ",(void *)lf);
1c79356b 1334 if (lf->lf_flags & F_POSIX)
91447636
A
1335 printf("proc %ld",
1336 (long)((struct proc *)lf->lf_id)->p_pid);
1c79356b 1337 else
91447636 1338 printf("id %p", (void *)lf->lf_id);
2d21ac55 1339 printf(", %s, start 0x%016llx, end 0x%016llx",
91447636
A
1340 lf->lf_type == F_RDLCK ? "shared" :
1341 lf->lf_type == F_WRLCK ? "exclusive" :
1342 lf->lf_type == F_UNLCK ? "unlock" :
1343 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
1344 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
1345 printf("\n\t\tlock request %p for ", (void *)blk);
1c79356b 1346 if (blk->lf_flags & F_POSIX)
91447636
A
1347 printf("proc %ld",
1348 (long)((struct proc *)blk->lf_id)->p_pid);
1c79356b 1349 else
91447636 1350 printf("id %p", (void *)blk->lf_id);
2d21ac55 1351 printf(", %s, start 0x%016llx, end 0x%016llx",
91447636
A
1352 blk->lf_type == F_RDLCK ? "shared" :
1353 blk->lf_type == F_WRLCK ? "exclusive" :
1354 blk->lf_type == F_UNLCK ? "unlock" :
1355 "unknown", (intmax_t)blk->lf_start,
1356 (intmax_t)blk->lf_end);
1357 if (!TAILQ_EMPTY(&blk->lf_blkhd))
1c79356b
A
1358 panic("lf_printlist: bad list");
1359 }
1360 printf("\n");
1361 }
1362}
2d21ac55 1363#endif /* LOCKF_DEBUGGING */
39236c6e
A
1364
1365#if IMPORTANCE_INHERITANCE
1366
1367/*
1368 * lf_hold_assertion
1369 *
1370 * Call task importance hold assertion on the owner of the lock.
1371 *
1372 * Parameters: block_task Owner of the lock blocking
1373 * current thread.
1374 *
1375 * block lock on which the current thread
1376 * is blocking on.
1377 *
1378 * Returns: <void>
1379 *
1380 * Notes: The task reference on block_task is not needed to be hold since
1381 * the current thread has vnode lock and block_task has a file
1382 * lock, thus removing file lock in exit requires block_task to
1383 * grab the vnode lock.
1384 */
1385static void
1386lf_hold_assertion(task_t block_task, struct lockf *block)
1387{
fe8ab488
A
1388 if (task_importance_hold_file_lock_assertion(block_task, 1)) {
1389 block->lf_boosted = LF_BOOSTED;
1390 }
39236c6e
A
1391}
1392
1393
1394/*
1395 * lf_jump_to_queue_head
1396 *
1397 * Jump the lock from the tail of the block queue to the head of
1398 * the queue.
1399 *
1400 * Parameters: block lockf struct containing the
1401 * block queue.
1402 * lock lockf struct to be jumped to the
1403 * front.
1404 *
1405 * Returns: <void>
1406 */
1407static void
1408lf_jump_to_queue_head(struct lockf *block, struct lockf *lock)
1409{
1410 /* Move the lock to the head of the block queue. */
1411 TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block);
1412 TAILQ_INSERT_HEAD(&block->lf_blkhd, lock, lf_block);
1413}
1414
1415
1416/*
1417 * lf_drop_assertion
1418 *
1419 * Drops the task hold assertion.
1420 *
1421 * Parameters: block lockf struct holding the assertion.
1422 *
1423 * Returns: <void>
1424 */
1425static void
1426lf_drop_assertion(struct lockf *block)
1427{
1428 task_t current_task;
1429
1430 current_task = proc_task((proc_t) block->lf_id);
fe8ab488 1431 task_importance_drop_file_lock_assertion(current_task, 1);
39236c6e
A
1432 block->lf_boosted = LF_NOT_BOOSTED;
1433}
1434
1435#endif /* IMPORTANCE_INHERITANCE */