]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_lockf.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / kern / kern_lockf.c
CommitLineData
2d21ac55 1/*
3e170ce0 2 * Copyright (c) 2015 Apple Computer, Inc. All rights reserved.
2d21ac55
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
1c79356b
A
28/*
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
1c79356b
A
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
91447636 59 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
1c79356b
A
60 */
61
91447636 62#include <sys/cdefs.h>
1c79356b
A
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
91447636
A
66#include <sys/lock.h>
67#include <sys/mount.h>
1c79356b 68#include <sys/proc.h>
2d21ac55 69#include <sys/signalvar.h>
91447636 70#include <sys/unistd.h>
2d21ac55 71#include <sys/user.h>
1c79356b 72#include <sys/vnode.h>
91447636
A
73#include <sys/vnode_internal.h>
74#include <sys/vnode_if.h>
1c79356b
A
75#include <sys/malloc.h>
76#include <sys/fcntl.h>
91447636 77#include <sys/lockf.h>
39236c6e 78#include <sys/sdt.h>
39037602 79#include <kern/policy_internal.h>
1c79356b 80
3e170ce0
A
81#include <sys/file_internal.h>
82
1c79356b
A
83/*
84 * This variable controls the maximum number of processes that will
85 * be checked in doing deadlock detection.
86 */
91447636 87static int maxlockdepth = MAXDEPTH;
1c79356b 88
3e170ce0
A
89#if (DEVELOPMENT || DEBUG)
90#define LOCKF_DEBUGGING 1
91#endif
92
2d21ac55 93#ifdef LOCKF_DEBUGGING
1c79356b 94#include <sys/sysctl.h>
2d21ac55
A
95void lf_print(const char *tag, struct lockf *lock);
96void lf_printlist(const char *tag, struct lockf *lock);
3e170ce0
A
97
98#define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */
99#define LF_DBG_LIST (1 << 1) /* split, coalesce */
100#define LF_DBG_IMPINH (1 << 2) /* importance inheritance */
101#define LF_DBG_TRACE (1 << 3) /* errors, exit */
102
103static int lockf_debug = 0; /* was 2, could be 3 ;-) */
6d2010ae 104SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, "");
2d21ac55
A
105
106/*
3e170ce0 107 * If there is no mask bit selector, or there is one, and the selector is
2d21ac55
A
108 * set, then output the debugging diagnostic.
109 */
110#define LOCKF_DEBUG(mask, ...) \
111 do { \
112 if( !(mask) || ((mask) & lockf_debug)) { \
113 printf(__VA_ARGS__); \
114 } \
115 } while(0)
116#else /* !LOCKF_DEBUGGING */
117#define LOCKF_DEBUG(mask, ...) /* mask */
118#endif /* !LOCKF_DEBUGGING */
1c79356b 119
91447636
A
120MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
121
1c79356b
A
122#define NOLOCKF (struct lockf *)0
123#define SELF 0x1
124#define OTHERS 0x2
91447636 125#define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
2d21ac55
A
126
127/*
128 * Overlapping lock states
129 */
130typedef enum {
131 OVERLAP_NONE = 0,
132 OVERLAP_EQUALS_LOCK,
133 OVERLAP_CONTAINS_LOCK,
134 OVERLAP_CONTAINED_BY_LOCK,
135 OVERLAP_STARTS_BEFORE_LOCK,
136 OVERLAP_ENDS_AFTER_LOCK
137} overlap_t;
138
91447636 139static int lf_clearlock(struct lockf *);
2d21ac55 140static overlap_t lf_findoverlap(struct lockf *,
91447636 141 struct lockf *, int, struct lockf ***, struct lockf **);
316670eb
A
142static struct lockf *lf_getblock(struct lockf *, pid_t);
143static int lf_getlock(struct lockf *, struct flock *, pid_t);
39236c6e 144static int lf_setlock(struct lockf *, struct timespec *);
2d21ac55 145static int lf_split(struct lockf *, struct lockf *);
c910b4d9 146static void lf_wakelock(struct lockf *, boolean_t);
39236c6e
A
147#if IMPORTANCE_INHERITANCE
148static void lf_hold_assertion(task_t, struct lockf *);
149static void lf_jump_to_queue_head(struct lockf *, struct lockf *);
150static void lf_drop_assertion(struct lockf *);
3e170ce0 151static void lf_boost_blocking_proc(struct lockf *, struct lockf *);
813fb2f6 152static void lf_adjust_assertion(struct lockf *block);
39236c6e 153#endif /* IMPORTANCE_INHERITANCE */
c910b4d9 154
1c79356b 155/*
2d21ac55
A
156 * lf_advlock
157 *
158 * Description: Advisory record locking support
159 *
160 * Parameters: ap Argument pointer to a vnop_advlock_args
161 * argument descriptor structure for the
162 * lock operation to be attempted.
163 *
164 * Returns: 0 Success
165 * EOVERFLOW
166 * EINVAL
167 * ENOLCK Number of locked regions exceeds limit
168 * lf_setlock:EAGAIN
169 * lf_setlock:EDEADLK
170 * lf_setlock:EINTR
171 * lf_setlock:ENOLCK
39236c6e 172 * lf_setlock:ETIMEDOUT
2d21ac55
A
173 * lf_clearlock:ENOLCK
174 * vnode_size:???
175 *
176 * Notes: We return ENOLCK when we run out of memory to support locks; as
177 * such, there is no specific expectation limit other than the
178 * amount of available resources.
1c79356b
A
179 */
180int
2d21ac55 181lf_advlock(struct vnop_advlock_args *ap)
91447636
A
182{
183 struct vnode *vp = ap->a_vp;
184 struct flock *fl = ap->a_fl;
185 vfs_context_t context = ap->a_context;
186 struct lockf *lock;
187 off_t start, end, oadd;
188 u_quad_t size;
189 int error;
190 struct lockf **head = &vp->v_lockf;
191
192 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
193
194 /*
195 * Avoid the common case of unlocking when inode has no locks.
196 */
197 if (*head == (struct lockf *)0) {
198 if (ap->a_op != F_SETLK) {
199 fl->l_type = F_UNLCK;
3e170ce0
A
200 LOCKF_DEBUG(LF_DBG_TRACE,
201 "lf_advlock: '%s' unlock without lock\n",
202 vfs_context_proc(context)->p_comm);
91447636
A
203 return (0);
204 }
205 }
206
207 /*
208 * Convert the flock structure into a start and end.
209 */
210 switch (fl->l_whence) {
211
212 case SEEK_SET:
213 case SEEK_CUR:
214 /*
215 * Caller is responsible for adding any necessary offset
216 * when SEEK_CUR is used.
217 */
218 start = fl->l_start;
219 break;
220
221 case SEEK_END:
222
2d21ac55
A
223 /*
224 * It's OK to cast the u_quad_t to and off_t here, since they
225 * are the same storage size, and the value of the returned
226 * contents will never overflow into the sign bit. We need to
227 * do this because we will use size to force range checks.
228 */
229 if ((error = vnode_size(vp, (off_t *)&size, context))) {
3e170ce0
A
230 LOCKF_DEBUG(LF_DBG_TRACE,
231 "lf_advlock: vnode_getattr failed: %d\n", error);
91447636 232 return (error);
2d21ac55 233 }
91447636
A
234
235 if (size > OFF_MAX ||
2d21ac55
A
236 (fl->l_start > 0 &&
237 size > (u_quad_t)(OFF_MAX - fl->l_start)))
91447636
A
238 return (EOVERFLOW);
239 start = size + fl->l_start;
240 break;
241
242 default:
3e170ce0
A
243 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: unknown whence %d\n",
244 fl->l_whence);
91447636
A
245 return (EINVAL);
246 }
2d21ac55 247 if (start < 0) {
3e170ce0
A
248 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: start < 0 (%qd)\n",
249 start);
91447636 250 return (EINVAL);
2d21ac55 251 }
91447636 252 if (fl->l_len < 0) {
2d21ac55 253 if (start == 0) {
3e170ce0
A
254 LOCKF_DEBUG(LF_DBG_TRACE,
255 "lf_advlock: len < 0 & start == 0\n");
91447636 256 return (EINVAL);
2d21ac55 257 }
91447636
A
258 end = start - 1;
259 start += fl->l_len;
2d21ac55 260 if (start < 0) {
3e170ce0
A
261 LOCKF_DEBUG(LF_DBG_TRACE,
262 "lf_advlock: start < 0 (%qd)\n", start);
91447636 263 return (EINVAL);
2d21ac55 264 }
91447636
A
265 } else if (fl->l_len == 0)
266 end = -1;
267 else {
268 oadd = fl->l_len - 1;
2d21ac55 269 if (oadd > (off_t)(OFF_MAX - start)) {
3e170ce0 270 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: overflow\n");
91447636 271 return (EOVERFLOW);
2d21ac55 272 }
91447636
A
273 end = start + oadd;
274 }
275 /*
276 * Create the lockf structure
277 */
278 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
2d21ac55
A
279 if (lock == NULL)
280 return (ENOLCK);
91447636
A
281 lock->lf_start = start;
282 lock->lf_end = end;
283 lock->lf_id = ap->a_id;
284 lock->lf_vnode = vp;
285 lock->lf_type = fl->l_type;
286 lock->lf_head = head;
287 lock->lf_next = (struct lockf *)0;
288 TAILQ_INIT(&lock->lf_blkhd);
289 lock->lf_flags = ap->a_flags;
39236c6e
A
290#if IMPORTANCE_INHERITANCE
291 lock->lf_boosted = LF_NOT_BOOSTED;
3e170ce0
A
292#endif
293 if (ap->a_flags & F_POSIX)
294 lock->lf_owner = (struct proc *)lock->lf_id;
295 else
296 lock->lf_owner = NULL;
91447636 297
c910b4d9
A
298 if (ap->a_flags & F_FLOCK)
299 lock->lf_flags |= F_WAKE1_SAFE;
300
91447636
A
301 lck_mtx_lock(&vp->v_lock); /* protect the lockf list */
302 /*
303 * Do the requested operation.
304 */
305 switch(ap->a_op) {
306 case F_SETLK:
3e170ce0
A
307 /*
308 * For F_OFD_* locks, lf_id is the fileglob.
309 * Record an "lf_owner" iff this is a confined fd
310 * i.e. it cannot escape this process and will be
311 * F_UNLCKed before the owner exits. (This is
312 * the implicit guarantee needed to ensure lf_owner
313 * remains a valid reference here.)
314 */
315 if (ap->a_flags & F_OFD_LOCK) {
316 struct fileglob *fg = (void *)lock->lf_id;
317 if (fg->fg_lflags & FG_CONFINED)
318 lock->lf_owner = current_proc();
319 }
39236c6e 320 error = lf_setlock(lock, ap->a_timeout);
91447636
A
321 break;
322
323 case F_UNLCK:
324 error = lf_clearlock(lock);
325 FREE(lock, M_LOCKF);
326 break;
327
328 case F_GETLK:
316670eb 329 error = lf_getlock(lock, fl, -1);
91447636
A
330 FREE(lock, M_LOCKF);
331 break;
332
5ba3f43e
A
333#if CONFIG_EMBEDDED
334 case F_GETLKPID:
335 error = lf_getlock(lock, fl, fl->l_pid);
336 FREE(lock, M_LOCKF);
337 break;
338#endif
316670eb 339
91447636
A
340 default:
341 FREE(lock, M_LOCKF);
342 error = EINVAL;
343 break;
344 }
6d2010ae 345 lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */
91447636 346
3e170ce0 347 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: normal exit: %d\n", error);
91447636
A
348 return (error);
349}
350
316670eb
A
351/*
352 * Empty the queue of msleeping requests for a lock on the given vnode.
353 * Called with the vnode already locked. Used for forced unmount, where
354 * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
355 * that prevents the vnode from ever being drained. Force unmounting wins.
356 */
357void
358lf_abort_advlocks(vnode_t vp)
359{
360 struct lockf *lock;
361
362 if ((lock = vp->v_lockf) == NULL)
363 return;
364
365 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
366
367 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
368 struct lockf *tlock;
369
370 TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
371 /*
372 * Setting this flag should cause all
373 * currently blocked F_SETLK request to
374 * return to userland with an errno.
375 */
376 tlock->lf_flags |= F_ABORT;
377 }
378 lf_wakelock(lock, TRUE);
379 }
380}
2d21ac55 381
91447636 382/*
6d2010ae
A
383 * Take any lock attempts which are currently blocked by a given lock ("from")
384 * and mark them as blocked by a different lock ("to"). Used in the case
385 * where a byte range currently occupied by "from" is to be occupied by "to."
386 */
387static void
388lf_move_blocked(struct lockf *to, struct lockf *from)
389{
390 struct lockf *tlock;
391
392 TAILQ_FOREACH(tlock, &from->lf_blkhd, lf_block) {
393 tlock->lf_next = to;
394 }
395
396 TAILQ_CONCAT(&to->lf_blkhd, &from->lf_blkhd, lf_block);
397}
398
399/*
400 * lf_coalesce_adjacent
2d21ac55 401 *
6d2010ae 402 * Description: Helper function: when setting a lock, coalesce adjacent
2d21ac55 403 * locks. Needed because adjacent locks are not overlapping,
6d2010ae 404 * but POSIX requires that they be coalesced.
2d21ac55
A
405 *
406 * Parameters: lock The new lock which may be adjacent
6d2010ae
A
407 * to already locked regions, and which
408 * should therefore be coalesced with them
2d21ac55
A
409 *
410 * Returns: <void>
411 */
412static void
6d2010ae 413lf_coalesce_adjacent(struct lockf *lock)
2d21ac55
A
414{
415 struct lockf **lf = lock->lf_head;
416
417 while (*lf != NOLOCKF) {
6d2010ae 418 /* reject locks that obviously could not be coalesced */
2d21ac55
A
419 if ((*lf == lock) ||
420 ((*lf)->lf_id != lock->lf_id) ||
421 ((*lf)->lf_type != lock->lf_type)) {
422 lf = &(*lf)->lf_next;
423 continue;
424 }
425
6d2010ae
A
426 /*
427 * NOTE: Assumes that if two locks are adjacent on the number line
428 * and belong to the same owner, then they are adjacent on the list.
429 */
2d21ac55
A
430 if ((*lf)->lf_end != -1 &&
431 ((*lf)->lf_end + 1) == lock->lf_start) {
432 struct lockf *adjacent = *lf;
433
3e170ce0 434 LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent previous\n");
2d21ac55
A
435 lock->lf_start = (*lf)->lf_start;
436 *lf = lock;
437 lf = &(*lf)->lf_next;
6d2010ae
A
438
439 lf_move_blocked(lock, adjacent);
440
2d21ac55
A
441 FREE(adjacent, M_LOCKF);
442 continue;
443 }
6d2010ae 444 /* If the lock starts adjacent to us, we can coalesce it */
2d21ac55
A
445 if (lock->lf_end != -1 &&
446 (lock->lf_end + 1) == (*lf)->lf_start) {
447 struct lockf *adjacent = *lf;
448
3e170ce0 449 LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent following\n");
2d21ac55
A
450 lock->lf_end = (*lf)->lf_end;
451 lock->lf_next = (*lf)->lf_next;
452 lf = &lock->lf_next;
6d2010ae
A
453
454 lf_move_blocked(lock, adjacent);
455
2d21ac55
A
456 FREE(adjacent, M_LOCKF);
457 continue;
458 }
459
460 /* no matching conditions; go on to next lock */
461 lf = &(*lf)->lf_next;
462 }
463}
464
2d21ac55
A
465/*
466 * lf_setlock
467 *
468 * Description: Set a byte-range lock.
469 *
470 * Parameters: lock The lock structure describing the lock
471 * to be set; allocated by the caller, it
472 * will be linked into the lock list if
473 * the set is successful, and freed if the
474 * set is unsuccessful.
475 *
39236c6e
A
476 * timeout Timeout specified in the case of
477 * SETLKWTIMEOUT.
478 *
2d21ac55
A
479 * Returns: 0 Success
480 * EAGAIN
481 * EDEADLK
482 * lf_split:ENOLCK
483 * lf_clearlock:ENOLCK
484 * msleep:EINTR
39236c6e 485 * msleep:ETIMEDOUT
2d21ac55
A
486 *
487 * Notes: We add the lock to the provisional lock list. We do not
6d2010ae 488 * coalesce at this time; this has implications for other lock
2d21ac55 489 * requestors in the blocker search mechanism.
91447636
A
490 */
491static int
39236c6e 492lf_setlock(struct lockf *lock, struct timespec *timeout)
1c79356b 493{
91447636
A
494 struct lockf *block;
495 struct lockf **head = lock->lf_head;
1c79356b
A
496 struct lockf **prev, *overlap, *ltmp;
497 static char lockstr[] = "lockf";
2d21ac55 498 int priority, needtolink, error;
91447636 499 struct vnode *vp = lock->lf_vnode;
2d21ac55 500 overlap_t ovcase;
1c79356b 501
2d21ac55 502#ifdef LOCKF_DEBUGGING
3e170ce0 503 if (lockf_debug & LF_DBG_LOCKOP) {
1c79356b 504 lf_print("lf_setlock", lock);
2d21ac55
A
505 lf_printlist("lf_setlock(in)", lock);
506 }
507#endif /* LOCKF_DEBUGGING */
1c79356b
A
508
509 /*
510 * Set the priority
511 */
512 priority = PLOCK;
513 if (lock->lf_type == F_WRLCK)
514 priority += 4;
515 priority |= PCATCH;
516 /*
517 * Scan lock list for this file looking for locks that would block us.
518 */
316670eb 519 while ((block = lf_getblock(lock, -1))) {
1c79356b
A
520 /*
521 * Free the structure and return if nonblocking.
522 */
523 if ((lock->lf_flags & F_WAIT) == 0) {
39236c6e 524 DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
1c79356b
A
525 FREE(lock, M_LOCKF);
526 return (EAGAIN);
527 }
2d21ac55 528
1c79356b
A
529 /*
530 * We are blocked. Since flock style locks cover
531 * the whole file, there is no chance for deadlock.
3e170ce0
A
532 *
533 * OFD byte-range locks currently do NOT support
534 * deadlock detection.
535 *
536 * For POSIX byte-range locks we must check for deadlock.
1c79356b
A
537 *
538 * Deadlock detection is done by looking through the
539 * wait channels to see if there are any cycles that
540 * involve us. MAXDEPTH is set just to make sure we
541 * do not go off into neverland.
542 */
543 if ((lock->lf_flags & F_POSIX) &&
544 (block->lf_flags & F_POSIX)) {
2d21ac55
A
545 struct proc *wproc, *bproc;
546 struct uthread *ut;
91447636 547 struct lockf *waitblock;
1c79356b
A
548 int i = 0;
549
550 /* The block is waiting on something */
3e170ce0 551 wproc = block->lf_owner;
2d21ac55
A
552 proc_lock(wproc);
553 TAILQ_FOREACH(ut, &wproc->p_uthlist, uu_list) {
554 /*
555 * While the thread is asleep (uu_wchan != 0)
556 * in this code (uu_wmesg == lockstr)
557 * and we have not exceeded the maximum cycle
558 * depth (i < maxlockdepth), then check for a
559 * cycle to see if the lock is blocked behind
560 * someone blocked behind us.
561 */
562 while (((waitblock = (struct lockf *)ut->uu_wchan) != NULL) &&
563 ut->uu_wmesg == lockstr &&
91447636 564 (i++ < maxlockdepth)) {
2d21ac55
A
565 waitblock = (struct lockf *)ut->uu_wchan;
566 /*
567 * Get the lock blocking the lock
568 * which would block us, and make
569 * certain it hasn't come unblocked
570 * (been granted, e.g. between the time
571 * we called lf_getblock, and the time
572 * we successfully acquired the
573 * proc_lock).
574 */
91447636 575 waitblock = waitblock->lf_next;
2d21ac55
A
576 if (waitblock == NULL)
577 break;
578
579 /*
580 * Make sure it's an advisory range
3e170ce0 581 * lock and not any other kind of lock;
2d21ac55
A
582 * if we mix lock types, it's our own
583 * fault.
584 */
91447636
A
585 if ((waitblock->lf_flags & F_POSIX) == 0)
586 break;
2d21ac55
A
587
588 /*
589 * If the owner of the lock that's
590 * blocking a lock that's blocking us
591 * getting the requested lock, then we
592 * would deadlock, so error out.
593 */
3e170ce0
A
594 bproc = waitblock->lf_owner;
595 if (bproc == lock->lf_owner) {
2d21ac55 596 proc_unlock(wproc);
91447636
A
597 FREE(lock, M_LOCKF);
598 return (EDEADLK);
599 }
1c79356b
A
600 }
601 }
2d21ac55 602 proc_unlock(wproc);
1c79356b 603 }
2d21ac55 604
1c79356b
A
605 /*
606 * For flock type locks, we must first remove
607 * any shared locks that we hold before we sleep
608 * waiting for an exclusive lock.
609 */
610 if ((lock->lf_flags & F_FLOCK) &&
611 lock->lf_type == F_WRLCK) {
612 lock->lf_type = F_UNLCK;
2d21ac55
A
613 if ((error = lf_clearlock(lock)) != 0) {
614 FREE(lock, M_LOCKF);
615 return (error);
616 }
1c79356b
A
617 lock->lf_type = F_WRLCK;
618 }
619 /*
620 * Add our lock to the blocked list and sleep until we're free.
621 * Remember who blocked us (for deadlock detection).
622 */
623 lock->lf_next = block;
624 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
c910b4d9
A
625
626 if ( !(lock->lf_flags & F_FLOCK))
627 block->lf_flags &= ~F_WAKE1_SAFE;
628
3e170ce0
A
629#if IMPORTANCE_INHERITANCE
630 /*
631 * Importance donation is done only for cases where the
632 * owning task can be unambiguously determined.
633 *
634 * POSIX type locks are not inherited by child processes;
635 * we maintain a 1:1 mapping between a lock and its owning
636 * process.
637 *
638 * Flock type locks are inherited across fork() and there is
639 * no 1:1 mapping in the general case. However, the fileglobs
640 * used by OFD locks *may* be confined to the process that
641 * created them, and thus have an "owner", in which case
642 * we also attempt importance donation.
643 */
644 if ((lock->lf_flags & block->lf_flags & F_POSIX) != 0)
645 lf_boost_blocking_proc(lock, block);
646 else if ((lock->lf_flags & block->lf_flags & F_OFD_LOCK) &&
647 lock->lf_owner != block->lf_owner &&
648 NULL != lock->lf_owner && NULL != block->lf_owner)
649 lf_boost_blocking_proc(lock, block);
650#endif /* IMPORTANCE_INHERITANCE */
651
2d21ac55 652#ifdef LOCKF_DEBUGGING
3e170ce0 653 if (lockf_debug & LF_DBG_LOCKOP) {
1c79356b 654 lf_print("lf_setlock: blocking on", block);
2d21ac55 655 lf_printlist("lf_setlock(block)", block);
1c79356b 656 }
2d21ac55 657#endif /* LOCKF_DEBUGGING */
39236c6e 658 DTRACE_FSINFO(advlock__wait, vnode_t, vp);
3e170ce0 659
39236c6e 660 error = msleep(lock, &vp->v_lock, priority, lockstr, timeout);
c910b4d9 661
316670eb
A
662 if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
663 error = EBADF;
664
99c3a104 665 if (lock->lf_next) {
1c79356b 666 /*
99c3a104
A
667 * lf_wakelock() always sets wakelock->lf_next to
668 * NULL before a wakeup; so we've been woken early
669 * - perhaps by a debugger, signal or other event.
670 *
671 * Remove 'lock' from the block list (avoids double-add
672 * in the spurious case, which would create a cycle)
1c79356b 673 */
99c3a104 674 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
813fb2f6
A
675#if IMPORTANCE_INHERITANCE
676 /*
677 * Adjust the boost on lf_next.
678 */
679 lf_adjust_assertion(lock->lf_next);
680#endif /* IMPORTANCE_INHERITANCE */
99c3a104
A
681 lock->lf_next = NULL;
682
683 if (error == 0) {
684 /*
685 * If this was a spurious wakeup, retry
686 */
687 printf("%s: spurious wakeup, retrying lock\n",
688 __func__);
689 continue;
91447636 690 }
99c3a104
A
691 }
692
693 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
694 if ((block = lf_getblock(lock, -1)) != NULL)
695 lf_move_blocked(block, lock);
696 }
697
698 if (error) {
c910b4d9
A
699 if (!TAILQ_EMPTY(&lock->lf_blkhd))
700 lf_wakelock(lock, TRUE);
91447636 701 FREE(lock, M_LOCKF);
39236c6e
A
702 /* Return ETIMEDOUT if timeout occoured. */
703 if (error == EWOULDBLOCK) {
704 error = ETIMEDOUT;
705 }
1c79356b 706 return (error);
99c3a104 707 }
1c79356b 708 }
99c3a104 709
1c79356b
A
710 /*
711 * No blocks!! Add the lock. Note that we will
712 * downgrade or upgrade any overlapping locks this
713 * process already owns.
714 *
715 * Skip over locks owned by other processes.
716 * Handle any locks that overlap and are owned by ourselves.
717 */
91447636
A
718 prev = head;
719 block = *head;
1c79356b
A
720 needtolink = 1;
721 for (;;) {
91447636
A
722 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
723 if (ovcase)
1c79356b
A
724 block = overlap->lf_next;
725 /*
726 * Six cases:
727 * 0) no overlap
728 * 1) overlap == lock
729 * 2) overlap contains lock
730 * 3) lock contains overlap
731 * 4) overlap starts before lock
732 * 5) overlap ends after lock
733 */
734 switch (ovcase) {
2d21ac55 735 case OVERLAP_NONE:
1c79356b
A
736 if (needtolink) {
737 *prev = lock;
738 lock->lf_next = overlap;
739 }
740 break;
741
2d21ac55 742 case OVERLAP_EQUALS_LOCK:
1c79356b
A
743 /*
744 * If downgrading lock, others may be
745 * able to acquire it.
746 */
747 if (lock->lf_type == F_RDLCK &&
748 overlap->lf_type == F_WRLCK)
c910b4d9 749 lf_wakelock(overlap, TRUE);
1c79356b
A
750 overlap->lf_type = lock->lf_type;
751 FREE(lock, M_LOCKF);
6d2010ae 752 lock = overlap; /* for lf_coalesce_adjacent() */
1c79356b
A
753 break;
754
2d21ac55 755 case OVERLAP_CONTAINS_LOCK:
1c79356b
A
756 /*
757 * Check for common starting point and different types.
758 */
759 if (overlap->lf_type == lock->lf_type) {
91447636 760 FREE(lock, M_LOCKF);
6d2010ae 761 lock = overlap; /* for lf_coalesce_adjacent() */
1c79356b
A
762 break;
763 }
764 if (overlap->lf_start == lock->lf_start) {
765 *prev = lock;
766 lock->lf_next = overlap;
767 overlap->lf_start = lock->lf_end + 1;
2d21ac55
A
768 } else {
769 /*
770 * If we can't split the lock, we can't
771 * grant it. Claim a system limit for the
772 * resource shortage.
773 */
774 if (lf_split(overlap, lock)) {
775 FREE(lock, M_LOCKF);
776 return (ENOLCK);
777 }
778 }
c910b4d9 779 lf_wakelock(overlap, TRUE);
1c79356b
A
780 break;
781
2d21ac55 782 case OVERLAP_CONTAINED_BY_LOCK:
1c79356b
A
783 /*
784 * If downgrading lock, others may be able to
785 * acquire it, otherwise take the list.
786 */
787 if (lock->lf_type == F_RDLCK &&
788 overlap->lf_type == F_WRLCK) {
c910b4d9 789 lf_wakelock(overlap, TRUE);
1c79356b 790 } else {
91447636
A
791 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
792 ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
1c79356b
A
793 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
794 lf_block);
795 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
796 ltmp, lf_block);
91447636 797 ltmp->lf_next = lock;
1c79356b
A
798 }
799 }
800 /*
801 * Add the new lock if necessary and delete the overlap.
802 */
803 if (needtolink) {
804 *prev = lock;
805 lock->lf_next = overlap->lf_next;
806 prev = &lock->lf_next;
807 needtolink = 0;
808 } else
809 *prev = overlap->lf_next;
91447636 810 FREE(overlap, M_LOCKF);
1c79356b
A
811 continue;
812
2d21ac55 813 case OVERLAP_STARTS_BEFORE_LOCK:
1c79356b
A
814 /*
815 * Add lock after overlap on the list.
816 */
817 lock->lf_next = overlap->lf_next;
818 overlap->lf_next = lock;
819 overlap->lf_end = lock->lf_start - 1;
820 prev = &lock->lf_next;
c910b4d9 821 lf_wakelock(overlap, TRUE);
1c79356b
A
822 needtolink = 0;
823 continue;
824
2d21ac55 825 case OVERLAP_ENDS_AFTER_LOCK:
1c79356b
A
826 /*
827 * Add the new lock before overlap.
828 */
829 if (needtolink) {
830 *prev = lock;
831 lock->lf_next = overlap;
832 }
833 overlap->lf_start = lock->lf_end + 1;
c910b4d9 834 lf_wakelock(overlap, TRUE);
1c79356b
A
835 break;
836 }
837 break;
838 }
6d2010ae
A
839 /* Coalesce adjacent locks with identical attributes */
840 lf_coalesce_adjacent(lock);
2d21ac55 841#ifdef LOCKF_DEBUGGING
3e170ce0 842 if (lockf_debug & LF_DBG_LOCKOP) {
1c79356b 843 lf_print("lf_setlock: got the lock", lock);
2d21ac55 844 lf_printlist("lf_setlock(out)", lock);
1c79356b 845 }
2d21ac55 846#endif /* LOCKF_DEBUGGING */
1c79356b
A
847 return (0);
848}
849
2d21ac55 850
1c79356b 851/*
2d21ac55
A
852 * lf_clearlock
853 *
854 * Description: Remove a byte-range lock on an vnode. Generally, find the
855 * lock (or an overlap to that lock) and remove it (or shrink
856 * it), then wakeup anyone we can.
857 *
858 * Parameters: unlock The lock to clear
1c79356b 859 *
2d21ac55
A
860 * Returns: 0 Success
861 * lf_split:ENOLCK
862 *
863 * Notes: A caller may unlock all the locks owned by the caller by
864 * specifying the entire file range; locks owned by other
865 * callers are not effected by this operation.
1c79356b 866 */
91447636 867static int
2d21ac55 868lf_clearlock(struct lockf *unlock)
1c79356b 869{
91447636
A
870 struct lockf **head = unlock->lf_head;
871 struct lockf *lf = *head;
1c79356b 872 struct lockf *overlap, **prev;
2d21ac55 873 overlap_t ovcase;
1c79356b
A
874
875 if (lf == NOLOCKF)
876 return (0);
2d21ac55 877#ifdef LOCKF_DEBUGGING
1c79356b
A
878 if (unlock->lf_type != F_UNLCK)
879 panic("lf_clearlock: bad type");
3e170ce0 880 if (lockf_debug & LF_DBG_LOCKOP)
1c79356b 881 lf_print("lf_clearlock", unlock);
2d21ac55 882#endif /* LOCKF_DEBUGGING */
91447636 883 prev = head;
2d21ac55 884 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) {
1c79356b
A
885 /*
886 * Wakeup the list of locks to be retried.
887 */
c910b4d9 888 lf_wakelock(overlap, FALSE);
39236c6e
A
889#if IMPORTANCE_INHERITANCE
890 if (overlap->lf_boosted == LF_BOOSTED) {
891 lf_drop_assertion(overlap);
892 }
893#endif /* IMPORTANCE_INHERITANCE */
1c79356b
A
894
895 switch (ovcase) {
2d21ac55
A
896 case OVERLAP_NONE: /* satisfy compiler enum/switch */
897 break;
1c79356b 898
2d21ac55 899 case OVERLAP_EQUALS_LOCK:
1c79356b
A
900 *prev = overlap->lf_next;
901 FREE(overlap, M_LOCKF);
902 break;
903
2d21ac55 904 case OVERLAP_CONTAINS_LOCK: /* split it */
1c79356b
A
905 if (overlap->lf_start == unlock->lf_start) {
906 overlap->lf_start = unlock->lf_end + 1;
907 break;
908 }
2d21ac55
A
909 /*
910 * If we can't split the lock, we can't grant it.
911 * Claim a system limit for the resource shortage.
912 */
913 if (lf_split(overlap, unlock))
914 return (ENOLCK);
1c79356b
A
915 overlap->lf_next = unlock->lf_next;
916 break;
917
2d21ac55 918 case OVERLAP_CONTAINED_BY_LOCK:
1c79356b
A
919 *prev = overlap->lf_next;
920 lf = overlap->lf_next;
91447636 921 FREE(overlap, M_LOCKF);
1c79356b
A
922 continue;
923
2d21ac55 924 case OVERLAP_STARTS_BEFORE_LOCK:
1c79356b
A
925 overlap->lf_end = unlock->lf_start - 1;
926 prev = &overlap->lf_next;
927 lf = overlap->lf_next;
928 continue;
929
2d21ac55 930 case OVERLAP_ENDS_AFTER_LOCK:
1c79356b
A
931 overlap->lf_start = unlock->lf_end + 1;
932 break;
933 }
934 break;
935 }
2d21ac55 936#ifdef LOCKF_DEBUGGING
3e170ce0 937 if (lockf_debug & LF_DBG_LOCKOP)
1c79356b 938 lf_printlist("lf_clearlock", unlock);
2d21ac55 939#endif /* LOCKF_DEBUGGING */
1c79356b
A
940 return (0);
941}
942
2d21ac55 943
1c79356b 944/*
2d21ac55
A
945 * lf_getlock
946 *
947 * Description: Check whether there is a blocking lock, and if so return
948 * its process identifier into the lock being requested.
949 *
950 * Parameters: lock Pointer to lock to test for blocks
951 * fl Pointer to flock structure to receive
952 * the blocking lock information, if a
953 * blocking lock is found.
316670eb 954 * matchpid -1, or pid value to match in lookup.
2d21ac55
A
955 *
956 * Returns: 0 Success
957 *
958 * Implicit Returns:
959 * *fl Contents modified to reflect the
960 * blocking lock, if one is found; not
961 * modified otherwise
962 *
963 * Notes: fl->l_pid will be (-1) for file locks and will only be set to
964 * the blocking process ID for advisory record locks.
1c79356b 965 */
91447636 966static int
316670eb 967lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid)
1c79356b 968{
91447636 969 struct lockf *block;
1c79356b 970
2d21ac55 971#ifdef LOCKF_DEBUGGING
3e170ce0 972 if (lockf_debug & LF_DBG_LOCKOP)
1c79356b 973 lf_print("lf_getlock", lock);
2d21ac55 974#endif /* LOCKF_DEBUGGING */
1c79356b 975
316670eb 976 if ((block = lf_getblock(lock, matchpid))) {
1c79356b
A
977 fl->l_type = block->lf_type;
978 fl->l_whence = SEEK_SET;
979 fl->l_start = block->lf_start;
980 if (block->lf_end == -1)
981 fl->l_len = 0;
982 else
983 fl->l_len = block->lf_end - block->lf_start + 1;
3e170ce0
A
984 if (NULL != block->lf_owner) {
985 /*
986 * lf_owner is only non-NULL when the lock
987 * "owner" can be unambiguously determined
988 */
989 fl->l_pid = proc_pid(block->lf_owner);
990 } else
1c79356b
A
991 fl->l_pid = -1;
992 } else {
993 fl->l_type = F_UNLCK;
994 }
995 return (0);
996}
997
998/*
2d21ac55
A
999 * lf_getblock
1000 *
1001 * Description: Walk the list of locks for an inode and return the first
1002 * blocking lock. A lock is considered blocking if we are not
1003 * the lock owner; otherwise, we are permitted to upgrade or
1004 * downgrade it, and it's not considered blocking.
1005 *
1006 * Parameters: lock The lock for which we are interested
1007 * in obtaining the blocking lock, if any
316670eb 1008 * matchpid -1, or pid value to match in lookup.
2d21ac55
A
1009 *
1010 * Returns: NOLOCKF No blocking lock exists
1011 * !NOLOCKF The address of the blocking lock's
1012 * struct lockf.
1c79356b 1013 */
91447636 1014static struct lockf *
316670eb 1015lf_getblock(struct lockf *lock, pid_t matchpid)
1c79356b 1016{
91447636 1017 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
1c79356b 1018
316670eb
A
1019 for (prev = lock->lf_head;
1020 lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != OVERLAP_NONE;
1021 lf = overlap->lf_next) {
1c79356b 1022 /*
316670eb
A
1023 * Found an overlap.
1024 *
1025 * If we're matching pids, and it's a record lock,
3e170ce0 1026 * or it's an OFD lock on a process-confined fd,
316670eb 1027 * but the pid doesn't match, then keep on looking ..
1c79356b 1028 */
316670eb 1029 if (matchpid != -1 &&
3e170ce0
A
1030 (overlap->lf_flags & (F_POSIX|F_OFD_LOCK)) != 0 &&
1031 proc_pid(overlap->lf_owner) != matchpid)
316670eb 1032 continue;
3e170ce0 1033
1c79356b 1034 /*
316670eb 1035 * does it block us?
1c79356b 1036 */
316670eb
A
1037 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
1038 return (overlap);
1c79356b
A
1039 }
1040 return (NOLOCKF);
1041}
1042
2d21ac55 1043
1c79356b 1044/*
2d21ac55
A
1045 * lf_findoverlap
1046 *
1047 * Description: Walk the list of locks to find an overlapping lock (if any).
1048 *
1049 * Parameters: lf First lock on lock list
1050 * lock The lock we are checking for an overlap
1051 * check Check type
1052 * prev pointer to pointer pointer to contain
1053 * address of pointer to previous lock
1054 * pointer to overlapping lock, if overlap
1055 * overlap pointer to pointer to contain address
1056 * of overlapping lock
1057 *
1058 * Returns: OVERLAP_NONE
1059 * OVERLAP_EQUALS_LOCK
1060 * OVERLAP_CONTAINS_LOCK
1061 * OVERLAP_CONTAINED_BY_LOCK
1062 * OVERLAP_STARTS_BEFORE_LOCK
1063 * OVERLAP_ENDS_AFTER_LOCK
1c79356b 1064 *
2d21ac55
A
1065 * Implicit Returns:
1066 * *prev The address of the next pointer in the
1067 * lock previous to the overlapping lock;
1068 * this is generally used to relink the
1069 * lock list, avoiding a second iteration.
1070 * *overlap The pointer to the overlapping lock
316670eb 1071 * itself; this is used to return data in
2d21ac55
A
1072 * the check == OTHERS case, and for the
1073 * caller to modify the overlapping lock,
1074 * in the check == SELF case
1075 *
1076 * Note: This returns only the FIRST overlapping lock. There may be
1077 * more than one. lf_getlock will return the first blocking lock,
1078 * while lf_setlock will iterate over all overlapping locks to
1079 *
1080 * The check parameter can be SELF, meaning we are looking for
6d2010ae 1081 * overlapping locks owned by us, or it can be OTHERS, meaning
2d21ac55
A
1082 * we are looking for overlapping locks owned by someone else so
1083 * we can report a blocking lock on an F_GETLK request.
1084 *
1085 * The value of *overlap and *prev are modified, even if there is
1086 * no overlapping lock found; always check the return code.
1c79356b 1087 */
2d21ac55
A
1088static overlap_t
1089lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
1090 struct lockf ***prev, struct lockf **overlap)
1c79356b
A
1091{
1092 off_t start, end;
6d2010ae 1093 int found_self = 0;
1c79356b
A
1094
1095 *overlap = lf;
1096 if (lf == NOLOCKF)
1097 return (0);
2d21ac55 1098#ifdef LOCKF_DEBUGGING
3e170ce0 1099 if (lockf_debug & LF_DBG_LIST)
1c79356b 1100 lf_print("lf_findoverlap: looking for overlap in", lock);
2d21ac55 1101#endif /* LOCKF_DEBUGGING */
1c79356b
A
1102 start = lock->lf_start;
1103 end = lock->lf_end;
1104 while (lf != NOLOCKF) {
1105 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
1106 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
6d2010ae
A
1107 /*
1108 * Locks belonging to one process are adjacent on the
1109 * list, so if we've found any locks belonging to us,
1110 * and we're now seeing something else, then we've
1111 * examined all "self" locks. Note that bailing out
1112 * here is quite important; for coalescing, we assume
1113 * numerically adjacent locks from the same owner to
1114 * be adjacent on the list.
1115 */
1116 if ((type & SELF) && found_self) {
1117 return OVERLAP_NONE;
1118 }
1119
1c79356b
A
1120 *prev = &lf->lf_next;
1121 *overlap = lf = lf->lf_next;
1122 continue;
1123 }
6d2010ae
A
1124
1125 if ((type & SELF)) {
1126 found_self = 1;
1127 }
1128
2d21ac55 1129#ifdef LOCKF_DEBUGGING
3e170ce0 1130 if (lockf_debug & LF_DBG_LIST)
1c79356b 1131 lf_print("\tchecking", lf);
2d21ac55 1132#endif /* LOCKF_DEBUGGING */
1c79356b
A
1133 /*
1134 * OK, check for overlap
1c79356b
A
1135 */
1136 if ((lf->lf_end != -1 && start > lf->lf_end) ||
1137 (end != -1 && lf->lf_start > end)) {
1138 /* Case 0 */
3e170ce0 1139 LOCKF_DEBUG(LF_DBG_LIST, "no overlap\n");
6d2010ae
A
1140
1141 /*
1142 * NOTE: assumes that locks for the same process are
1143 * nonintersecting and ordered.
1144 */
1c79356b 1145 if ((type & SELF) && end != -1 && lf->lf_start > end)
2d21ac55 1146 return (OVERLAP_NONE);
1c79356b
A
1147 *prev = &lf->lf_next;
1148 *overlap = lf = lf->lf_next;
1149 continue;
1150 }
1151 if ((lf->lf_start == start) && (lf->lf_end == end)) {
3e170ce0 1152 LOCKF_DEBUG(LF_DBG_LIST, "overlap == lock\n");
2d21ac55 1153 return (OVERLAP_EQUALS_LOCK);
1c79356b
A
1154 }
1155 if ((lf->lf_start <= start) &&
1156 (end != -1) &&
1157 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
3e170ce0 1158 LOCKF_DEBUG(LF_DBG_LIST, "overlap contains lock\n");
2d21ac55 1159 return (OVERLAP_CONTAINS_LOCK);
1c79356b
A
1160 }
1161 if (start <= lf->lf_start &&
1162 (end == -1 ||
1163 (lf->lf_end != -1 && end >= lf->lf_end))) {
3e170ce0 1164 LOCKF_DEBUG(LF_DBG_LIST, "lock contains overlap\n");
2d21ac55 1165 return (OVERLAP_CONTAINED_BY_LOCK);
1c79356b
A
1166 }
1167 if ((lf->lf_start < start) &&
1168 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
3e170ce0 1169 LOCKF_DEBUG(LF_DBG_LIST, "overlap starts before lock\n");
2d21ac55 1170 return (OVERLAP_STARTS_BEFORE_LOCK);
1c79356b
A
1171 }
1172 if ((lf->lf_start > start) &&
1173 (end != -1) &&
1174 ((lf->lf_end > end) || (lf->lf_end == -1))) {
3e170ce0 1175 LOCKF_DEBUG(LF_DBG_LIST, "overlap ends after lock\n");
2d21ac55 1176 return (OVERLAP_ENDS_AFTER_LOCK);
1c79356b
A
1177 }
1178 panic("lf_findoverlap: default");
1179 }
2d21ac55 1180 return (OVERLAP_NONE);
1c79356b
A
1181}
1182
2d21ac55 1183
1c79356b 1184/*
2d21ac55
A
1185 * lf_split
1186 *
1187 * Description: Split a lock and a contained region into two or three locks
1188 * as necessary.
1189 *
1190 * Parameters: lock1 Lock to split
1191 * lock2 Overlapping lock region requiring the
1192 * split (upgrade/downgrade/unlock)
1193 *
1194 * Returns: 0 Success
1195 * ENOLCK No memory for new lock
1196 *
1197 * Implicit Returns:
1198 * *lock1 Modified original lock
1199 * *lock2 Overlapping lock (inserted into list)
1200 * (new lock) Potential new lock inserted into list
1201 * if split results in 3 locks
1202 *
1203 * Notes: This operation can only fail if the split would result in three
1204 * locks, and there is insufficient memory to allocate the third
1205 * lock; in that case, neither of the locks will be modified.
1c79356b 1206 */
2d21ac55
A
1207static int
1208lf_split(struct lockf *lock1, struct lockf *lock2)
1c79356b 1209{
91447636 1210 struct lockf *splitlock;
1c79356b 1211
2d21ac55 1212#ifdef LOCKF_DEBUGGING
3e170ce0 1213 if (lockf_debug & LF_DBG_LIST) {
1c79356b
A
1214 lf_print("lf_split", lock1);
1215 lf_print("splitting from", lock2);
1216 }
2d21ac55 1217#endif /* LOCKF_DEBUGGING */
1c79356b 1218 /*
3e170ce0 1219 * Check to see if splitting into only two pieces.
1c79356b
A
1220 */
1221 if (lock1->lf_start == lock2->lf_start) {
1222 lock1->lf_start = lock2->lf_end + 1;
1223 lock2->lf_next = lock1;
2d21ac55 1224 return (0);
1c79356b
A
1225 }
1226 if (lock1->lf_end == lock2->lf_end) {
1227 lock1->lf_end = lock2->lf_start - 1;
1228 lock2->lf_next = lock1->lf_next;
1229 lock1->lf_next = lock2;
2d21ac55 1230 return (0);
1c79356b
A
1231 }
1232 /*
1233 * Make a new lock consisting of the last part of
1234 * the encompassing lock
1235 */
1236 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
2d21ac55
A
1237 if (splitlock == NULL)
1238 return (ENOLCK);
91447636 1239 bcopy(lock1, splitlock, sizeof *splitlock);
1c79356b
A
1240 splitlock->lf_start = lock2->lf_end + 1;
1241 TAILQ_INIT(&splitlock->lf_blkhd);
1242 lock1->lf_end = lock2->lf_start - 1;
1243 /*
1244 * OK, now link it in
1245 */
1246 splitlock->lf_next = lock1->lf_next;
1247 lock2->lf_next = splitlock;
1248 lock1->lf_next = lock2;
2d21ac55
A
1249
1250 return (0);
1c79356b
A
1251}
1252
2d21ac55 1253
1c79356b 1254/*
2d21ac55
A
1255 * lf_wakelock
1256 *
1257 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1258 * waiting on the lock may now be able to acquire it.
1259 *
1260 * Parameters: listhead Lock list head on which waiters may
1261 * have pending locks
1262 *
1263 * Returns: <void>
1264 *
1265 * Notes: This function iterates a list of locks and wakes all waiters,
1266 * rather than only waiters for the contended regions. Because
1267 * of this, for heavily contended files, this can result in a
1268 * "thundering herd" situation. Refactoring the code could make
1269 * this operation more efficient, if heavy contention ever results
1270 * in a real-world performance problem.
1c79356b 1271 */
91447636 1272static void
c910b4d9 1273lf_wakelock(struct lockf *listhead, boolean_t force_all)
1c79356b 1274{
91447636 1275 struct lockf *wakelock;
c910b4d9
A
1276 boolean_t wake_all = TRUE;
1277
b0d623f7 1278 if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE))
c910b4d9 1279 wake_all = FALSE;
1c79356b 1280
91447636
A
1281 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1282 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
1c79356b 1283 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
c910b4d9 1284
1c79356b 1285 wakelock->lf_next = NOLOCKF;
2d21ac55 1286#ifdef LOCKF_DEBUGGING
3e170ce0 1287 if (lockf_debug & LF_DBG_LOCKOP)
1c79356b 1288 lf_print("lf_wakelock: awakening", wakelock);
2d21ac55 1289#endif /* LOCKF_DEBUGGING */
c910b4d9 1290 if (wake_all == FALSE) {
b0d623f7
A
1291 /*
1292 * If there are items on the list head block list,
1293 * move them to the wakelock list instead, and then
1294 * correct their lf_next pointers.
1295 */
1296 if (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1297 TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block);
c910b4d9 1298
c910b4d9
A
1299 struct lockf *tlock;
1300
1301 TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
99c3a104
A
1302 if (TAILQ_NEXT(tlock, lf_block) == tlock) {
1303 /* See rdar://10887303 */
1304 panic("cycle in wakelock list");
1305 }
c910b4d9
A
1306 tlock->lf_next = wakelock;
1307 }
1308 }
1309 }
91447636 1310 wakeup(wakelock);
c910b4d9
A
1311
1312 if (wake_all == FALSE)
1313 break;
1c79356b
A
1314 }
1315}
1316
2d21ac55
A
1317
1318#ifdef LOCKF_DEBUGGING
3e170ce0
A
1319#define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner))
1320
1c79356b 1321/*
2d21ac55
A
1322 * lf_print DEBUG
1323 *
1324 * Print out a lock; lock information is prefixed by the string in 'tag'
1325 *
1326 * Parameters: tag A string tag for debugging
1327 * lock The lock whose information should be
1328 * displayed
1329 *
1330 * Returns: <void>
1c79356b 1331 */
91447636 1332void
2d21ac55 1333lf_print(const char *tag, struct lockf *lock)
1c79356b 1334{
91447636 1335 printf("%s: lock %p for ", tag, (void *)lock);
1c79356b 1336 if (lock->lf_flags & F_POSIX)
3e170ce0
A
1337 printf("proc %p (owner %d)",
1338 lock->lf_id, GET_LF_OWNER_PID(lock));
1339 else if (lock->lf_flags & F_OFD_LOCK)
1340 printf("fg %p (owner %d)",
1341 lock->lf_id, GET_LF_OWNER_PID(lock));
91447636
A
1342 else
1343 printf("id %p", (void *)lock->lf_id);
1344 if (lock->lf_vnode != 0)
2d21ac55 1345 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
91447636
A
1346 lock->lf_vnode,
1347 lock->lf_type == F_RDLCK ? "shared" :
1348 lock->lf_type == F_WRLCK ? "exclusive" :
1349 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1350 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1c79356b 1351 else
2d21ac55 1352 printf(" %s, start 0x%016llx, end 0x%016llx",
91447636
A
1353 lock->lf_type == F_RDLCK ? "shared" :
1354 lock->lf_type == F_WRLCK ? "exclusive" :
1355 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1356 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1357 if (!TAILQ_EMPTY(&lock->lf_blkhd))
1358 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1c79356b
A
1359 else
1360 printf("\n");
1361}
1362
2d21ac55
A
1363
1364/*
1365 * lf_printlist DEBUG
1366 *
1367 * Print out a lock list for the vnode associated with 'lock'; lock information
1368 * is prefixed by the string in 'tag'
1369 *
1370 * Parameters: tag A string tag for debugging
1371 * lock The lock whose vnode's lock list should
1372 * be displayed
1373 *
1374 * Returns: <void>
1375 */
91447636 1376void
2d21ac55 1377lf_printlist(const char *tag, struct lockf *lock)
1c79356b 1378{
91447636
A
1379 struct lockf *lf, *blk;
1380
1381 if (lock->lf_vnode == 0)
1382 return;
1383
2d21ac55 1384 printf("%s: Lock list for vno %p:\n",
91447636
A
1385 tag, lock->lf_vnode);
1386 for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
1387 printf("\tlock %p for ",(void *)lf);
1c79356b 1388 if (lf->lf_flags & F_POSIX)
3e170ce0
A
1389 printf("proc %p (owner %d)",
1390 lf->lf_id, GET_LF_OWNER_PID(lf));
1391 else if (lf->lf_flags & F_OFD_LOCK)
1392 printf("fg %p (owner %d)",
1393 lf->lf_id, GET_LF_OWNER_PID(lf));
1c79356b 1394 else
91447636 1395 printf("id %p", (void *)lf->lf_id);
2d21ac55 1396 printf(", %s, start 0x%016llx, end 0x%016llx",
91447636
A
1397 lf->lf_type == F_RDLCK ? "shared" :
1398 lf->lf_type == F_WRLCK ? "exclusive" :
1399 lf->lf_type == F_UNLCK ? "unlock" :
1400 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
1401 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
1402 printf("\n\t\tlock request %p for ", (void *)blk);
1c79356b 1403 if (blk->lf_flags & F_POSIX)
3e170ce0
A
1404 printf("proc %p (owner %d)",
1405 blk->lf_id, GET_LF_OWNER_PID(blk));
1406 else if (blk->lf_flags & F_OFD_LOCK)
1407 printf("fg %p (owner %d)",
1408 blk->lf_id, GET_LF_OWNER_PID(blk));
1c79356b 1409 else
91447636 1410 printf("id %p", (void *)blk->lf_id);
2d21ac55 1411 printf(", %s, start 0x%016llx, end 0x%016llx",
91447636
A
1412 blk->lf_type == F_RDLCK ? "shared" :
1413 blk->lf_type == F_WRLCK ? "exclusive" :
1414 blk->lf_type == F_UNLCK ? "unlock" :
1415 "unknown", (intmax_t)blk->lf_start,
1416 (intmax_t)blk->lf_end);
1417 if (!TAILQ_EMPTY(&blk->lf_blkhd))
1c79356b
A
1418 panic("lf_printlist: bad list");
1419 }
1420 printf("\n");
1421 }
1422}
2d21ac55 1423#endif /* LOCKF_DEBUGGING */
39236c6e
A
1424
1425#if IMPORTANCE_INHERITANCE
1426
1427/*
1428 * lf_hold_assertion
1429 *
1430 * Call task importance hold assertion on the owner of the lock.
1431 *
1432 * Parameters: block_task Owner of the lock blocking
1433 * current thread.
1434 *
1435 * block lock on which the current thread
1436 * is blocking on.
1437 *
1438 * Returns: <void>
1439 *
1440 * Notes: The task reference on block_task is not needed to be hold since
1441 * the current thread has vnode lock and block_task has a file
1442 * lock, thus removing file lock in exit requires block_task to
1443 * grab the vnode lock.
1444 */
1445static void
1446lf_hold_assertion(task_t block_task, struct lockf *block)
1447{
39037602 1448 if (task_importance_hold_file_lock_assertion(block_task, 1) == 0) {
fe8ab488 1449 block->lf_boosted = LF_BOOSTED;
3e170ce0
A
1450 LOCKF_DEBUG(LF_DBG_IMPINH,
1451 "lf: importance hold file lock assert on pid %d lock %p\n",
1452 proc_pid(block->lf_owner), block);
fe8ab488 1453 }
39236c6e
A
1454}
1455
1456
1457/*
1458 * lf_jump_to_queue_head
1459 *
1460 * Jump the lock from the tail of the block queue to the head of
1461 * the queue.
1462 *
1463 * Parameters: block lockf struct containing the
1464 * block queue.
1465 * lock lockf struct to be jumped to the
1466 * front.
1467 *
1468 * Returns: <void>
1469 */
1470static void
1471lf_jump_to_queue_head(struct lockf *block, struct lockf *lock)
1472{
1473 /* Move the lock to the head of the block queue. */
1474 TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block);
1475 TAILQ_INSERT_HEAD(&block->lf_blkhd, lock, lf_block);
1476}
1477
1478
1479/*
1480 * lf_drop_assertion
1481 *
1482 * Drops the task hold assertion.
1483 *
1484 * Parameters: block lockf struct holding the assertion.
1485 *
1486 * Returns: <void>
1487 */
1488static void
1489lf_drop_assertion(struct lockf *block)
1490{
3e170ce0
A
1491 LOCKF_DEBUG(LF_DBG_IMPINH, "lf: %d: dropping assertion for lock %p\n",
1492 proc_pid(block->lf_owner), block);
39236c6e 1493
3e170ce0 1494 task_t current_task = proc_task(block->lf_owner);
fe8ab488 1495 task_importance_drop_file_lock_assertion(current_task, 1);
39236c6e
A
1496 block->lf_boosted = LF_NOT_BOOSTED;
1497}
1498
813fb2f6
A
1499/*
1500 * lf_adjust_assertion
1501 *
1502 * Adjusts importance assertion of file lock. Goes through
1503 * all the blocking locks and checks if the file lock needs
1504 * to be boosted anymore.
1505 *
1506 * Parameters: block lockf structure which needs to be adjusted.
1507 *
1508 * Returns: <void>
1509 */
1510static void
1511lf_adjust_assertion(struct lockf *block)
1512{
1513 boolean_t drop_boost = TRUE;
1514 struct lockf *next;
1515
1516 /* Return if the lock is not boosted */
1517 if (block->lf_boosted == LF_NOT_BOOSTED) {
1518 return;
1519 }
1520
1521 TAILQ_FOREACH(next, &block->lf_blkhd, lf_block) {
1522 /* Check if block and next are same type of locks */
1523 if (((block->lf_flags & next->lf_flags & F_POSIX) != 0) ||
1524 ((block->lf_flags & next->lf_flags & F_OFD_LOCK) &&
1525 (block->lf_owner != next->lf_owner) &&
1526 (NULL != block->lf_owner && NULL != next->lf_owner))) {
1527
1528 /* Check if next would be boosting block */
1529 if (task_is_importance_donor(proc_task(next->lf_owner)) &&
1530 task_is_importance_receiver_type(proc_task(block->lf_owner))) {
1531 /* Found a lock boosting block */
1532 drop_boost = FALSE;
1533 break;
1534 }
1535 }
1536 }
1537
1538 if (drop_boost) {
1539 lf_drop_assertion(block);
1540 }
1541}
1542
3e170ce0
A
1543static void
1544lf_boost_blocking_proc(struct lockf *lock, struct lockf *block)
1545{
1546 task_t ltask = proc_task(lock->lf_owner);
1547 task_t btask = proc_task(block->lf_owner);
1548
1549 /*
1550 * Check if ltask can donate importance. The
1551 * check of imp_donor bit is done without holding
1552 * any lock. The value may change after you read it,
1553 * but it is ok to boost a task while someone else is
1554 * unboosting you.
1555 *
1556 * TODO: Support live inheritance on file locks.
1557 */
1558 if (task_is_importance_donor(ltask)) {
1559 LOCKF_DEBUG(LF_DBG_IMPINH,
1560 "lf: %d: attempt to boost pid %d that holds lock %p\n",
1561 proc_pid(lock->lf_owner), proc_pid(block->lf_owner), block);
1562
1563 if (block->lf_boosted != LF_BOOSTED &&
1564 task_is_importance_receiver_type(btask)) {
1565 lf_hold_assertion(btask, block);
1566 }
1567 lf_jump_to_queue_head(block, lock);
1568 }
1569}
39236c6e 1570#endif /* IMPORTANCE_INHERITANCE */