]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_lockf.c
03f86bef2ab9b6e6bfec2f5ccedca0832fa46af6
[apple/xnu.git] / bsd / kern / kern_lockf.c
1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
60 */
61
62 #include <sys/cdefs.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/lock.h>
67 #include <sys/mount.h>
68 #include <sys/proc.h>
69 #include <sys/signalvar.h>
70 #include <sys/unistd.h>
71 #include <sys/user.h>
72 #include <sys/vnode.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/vnode_if.h>
75 #include <sys/malloc.h>
76 #include <sys/fcntl.h>
77 #include <sys/lockf.h>
78 #include <sys/sdt.h>
79 #include <kern/policy_internal.h>
80
81 #include <sys/file_internal.h>
82
83 #if (DEVELOPMENT || DEBUG)
84 #define LOCKF_DEBUGGING 1
85 #endif
86
87 #ifdef LOCKF_DEBUGGING
88 #include <sys/sysctl.h>
89 void lf_print(const char *tag, struct lockf *lock);
90 void lf_printlist(const char *tag, struct lockf *lock);
91
92 #define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */
93 #define LF_DBG_LIST (1 << 1) /* split, coalesce */
94 #define LF_DBG_IMPINH (1 << 2) /* importance inheritance */
95 #define LF_DBG_TRACE (1 << 3) /* errors, exit */
96 #define LF_DBG_DEADLOCK (1 << 4) /* deadlock detection */
97
98 static int lockf_debug = 0; /* was 2, could be 3 ;-) */
99 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, "");
100
101 /*
102 * If the selector is set, then output the debugging diagnostic.
103 */
104 #define LOCKF_DEBUG(mask, ...) \
105 do { \
106 if ((mask) & lockf_debug) { \
107 printf("%s>", __FUNCTION__); \
108 printf(__VA_ARGS__); \
109 } \
110 } while(0)
111
112 #define LOCKF_DEBUGP(mask) \
113 ({ \
114 ((mask) & lockf_debug); \
115 })
116 #else /* !LOCKF_DEBUGGING */
117 #define LOCKF_DEBUG(mask, ...) /* mask */
118 #endif /* !LOCKF_DEBUGGING */
119
120 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
121
122 #define NOLOCKF (struct lockf *)0
123 #define SELF 0x1
124 #define OTHERS 0x2
125 #define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
126
127 /*
128 * Overlapping lock states
129 */
130 typedef enum {
131 OVERLAP_NONE = 0,
132 OVERLAP_EQUALS_LOCK,
133 OVERLAP_CONTAINS_LOCK,
134 OVERLAP_CONTAINED_BY_LOCK,
135 OVERLAP_STARTS_BEFORE_LOCK,
136 OVERLAP_ENDS_AFTER_LOCK
137 } overlap_t;
138
139 static int lf_clearlock(struct lockf *);
140 static overlap_t lf_findoverlap(struct lockf *,
141 struct lockf *, int, struct lockf ***, struct lockf **);
142 static struct lockf *lf_getblock(struct lockf *, pid_t);
143 static int lf_getlock(struct lockf *, struct flock *, pid_t);
144 static int lf_setlock(struct lockf *, struct timespec *);
145 static int lf_split(struct lockf *, struct lockf *);
146 static void lf_wakelock(struct lockf *, boolean_t);
147 #if IMPORTANCE_INHERITANCE
148 static void lf_hold_assertion(task_t, struct lockf *);
149 static void lf_jump_to_queue_head(struct lockf *, struct lockf *);
150 static void lf_drop_assertion(struct lockf *);
151 static void lf_boost_blocking_proc(struct lockf *, struct lockf *);
152 static void lf_adjust_assertion(struct lockf *block);
153 #endif /* IMPORTANCE_INHERITANCE */
154
155 static lck_mtx_t lf_dead_lock;
156 static lck_grp_t *lf_dead_lock_grp;
157
158 void
159 lf_init(void)
160 {
161 lf_dead_lock_grp = lck_grp_alloc_init("lf_dead_lock", LCK_GRP_ATTR_NULL);
162 lck_mtx_init(&lf_dead_lock, lf_dead_lock_grp, LCK_ATTR_NULL);
163 }
164
165 /*
166 * lf_advlock
167 *
168 * Description: Advisory record locking support
169 *
170 * Parameters: ap Argument pointer to a vnop_advlock_args
171 * argument descriptor structure for the
172 * lock operation to be attempted.
173 *
174 * Returns: 0 Success
175 * EOVERFLOW
176 * EINVAL
177 * ENOLCK Number of locked regions exceeds limit
178 * lf_setlock:EAGAIN
179 * lf_setlock:EDEADLK
180 * lf_setlock:EINTR
181 * lf_setlock:ENOLCK
182 * lf_setlock:ETIMEDOUT
183 * lf_clearlock:ENOLCK
184 * vnode_size:???
185 *
186 * Notes: We return ENOLCK when we run out of memory to support locks; as
187 * such, there is no specific expectation limit other than the
188 * amount of available resources.
189 */
190 int
191 lf_advlock(struct vnop_advlock_args *ap)
192 {
193 struct vnode *vp = ap->a_vp;
194 struct flock *fl = ap->a_fl;
195 vfs_context_t context = ap->a_context;
196 struct lockf *lock;
197 off_t start, end, oadd;
198 u_quad_t size;
199 int error;
200 struct lockf **head = &vp->v_lockf;
201
202 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
203
204 /*
205 * Avoid the common case of unlocking when inode has no locks.
206 */
207 if (*head == (struct lockf *)0) {
208 if (ap->a_op != F_SETLK) {
209 fl->l_type = F_UNLCK;
210 LOCKF_DEBUG(LF_DBG_TRACE,
211 "lf_advlock: '%s' unlock without lock\n",
212 vfs_context_proc(context)->p_comm);
213 return 0;
214 }
215 }
216
217 /*
218 * Convert the flock structure into a start and end.
219 */
220 switch (fl->l_whence) {
221 case SEEK_SET:
222 case SEEK_CUR:
223 /*
224 * Caller is responsible for adding any necessary offset
225 * when SEEK_CUR is used.
226 */
227 start = fl->l_start;
228 break;
229
230 case SEEK_END:
231
232 /*
233 * It's OK to cast the u_quad_t to and off_t here, since they
234 * are the same storage size, and the value of the returned
235 * contents will never overflow into the sign bit. We need to
236 * do this because we will use size to force range checks.
237 */
238 if ((error = vnode_size(vp, (off_t *)&size, context))) {
239 LOCKF_DEBUG(LF_DBG_TRACE,
240 "lf_advlock: vnode_getattr failed: %d\n", error);
241 return error;
242 }
243
244 if (size > OFF_MAX ||
245 (fl->l_start > 0 &&
246 size > (u_quad_t)(OFF_MAX - fl->l_start))) {
247 return EOVERFLOW;
248 }
249 start = size + fl->l_start;
250 break;
251
252 default:
253 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: unknown whence %d\n",
254 fl->l_whence);
255 return EINVAL;
256 }
257 if (start < 0) {
258 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: start < 0 (%qd)\n",
259 start);
260 return EINVAL;
261 }
262 if (fl->l_len < 0) {
263 if (start == 0) {
264 LOCKF_DEBUG(LF_DBG_TRACE,
265 "lf_advlock: len < 0 & start == 0\n");
266 return EINVAL;
267 }
268 end = start - 1;
269 start += fl->l_len;
270 if (start < 0) {
271 LOCKF_DEBUG(LF_DBG_TRACE,
272 "lf_advlock: start < 0 (%qd)\n", start);
273 return EINVAL;
274 }
275 } else if (fl->l_len == 0) {
276 end = -1;
277 } else {
278 oadd = fl->l_len - 1;
279 if (oadd > (off_t)(OFF_MAX - start)) {
280 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: overflow\n");
281 return EOVERFLOW;
282 }
283 end = start + oadd;
284 }
285 /*
286 * Create the lockf structure
287 */
288 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
289 if (lock == NULL) {
290 return ENOLCK;
291 }
292 lock->lf_start = start;
293 lock->lf_end = end;
294 lock->lf_id = ap->a_id;
295 lock->lf_vnode = vp;
296 lock->lf_type = fl->l_type;
297 lock->lf_head = head;
298 lock->lf_next = (struct lockf *)0;
299 TAILQ_INIT(&lock->lf_blkhd);
300 lock->lf_flags = (short)ap->a_flags;
301 #if IMPORTANCE_INHERITANCE
302 lock->lf_boosted = LF_NOT_BOOSTED;
303 #endif
304 if (ap->a_flags & F_POSIX) {
305 lock->lf_owner = (struct proc *)lock->lf_id;
306 } else {
307 lock->lf_owner = NULL;
308 }
309
310 if (ap->a_flags & F_FLOCK) {
311 lock->lf_flags |= F_WAKE1_SAFE;
312 }
313
314 lck_mtx_lock(&vp->v_lock); /* protect the lockf list */
315 /*
316 * Do the requested operation.
317 */
318 switch (ap->a_op) {
319 case F_SETLK:
320 /*
321 * For F_OFD_* locks, lf_id is the fileglob.
322 * Record an "lf_owner" iff this is a confined fd
323 * i.e. it cannot escape this process and will be
324 * F_UNLCKed before the owner exits. (This is
325 * the implicit guarantee needed to ensure lf_owner
326 * remains a valid reference here.)
327 */
328 if (ap->a_flags & F_OFD_LOCK) {
329 struct fileglob *fg = (void *)lock->lf_id;
330 if (fg->fg_lflags & FG_CONFINED) {
331 lock->lf_owner = current_proc();
332 }
333 }
334 error = lf_setlock(lock, ap->a_timeout);
335 break;
336
337 case F_UNLCK:
338 error = lf_clearlock(lock);
339 FREE(lock, M_LOCKF);
340 break;
341
342 case F_GETLK:
343 error = lf_getlock(lock, fl, -1);
344 FREE(lock, M_LOCKF);
345 break;
346
347 case F_GETLKPID:
348 error = lf_getlock(lock, fl, fl->l_pid);
349 FREE(lock, M_LOCKF);
350 break;
351
352 default:
353 FREE(lock, M_LOCKF);
354 error = EINVAL;
355 break;
356 }
357 lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */
358
359 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: normal exit: %d\n", error);
360 return error;
361 }
362
363 /*
364 * Empty the queue of msleeping requests for a lock on the given vnode.
365 * Called with the vnode already locked. Used for forced unmount, where
366 * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
367 * that prevents the vnode from ever being drained. Force unmounting wins.
368 */
369 void
370 lf_abort_advlocks(vnode_t vp)
371 {
372 struct lockf *lock;
373
374 if ((lock = vp->v_lockf) == NULL) {
375 return;
376 }
377
378 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
379
380 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
381 struct lockf *tlock;
382
383 TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
384 /*
385 * Setting this flag should cause all
386 * currently blocked F_SETLK request to
387 * return to userland with an errno.
388 */
389 tlock->lf_flags |= F_ABORT;
390 }
391 lf_wakelock(lock, TRUE);
392 }
393 }
394
395 /*
396 * Take any lock attempts which are currently blocked by a given lock ("from")
397 * and mark them as blocked by a different lock ("to"). Used in the case
398 * where a byte range currently occupied by "from" is to be occupied by "to."
399 */
400 static void
401 lf_move_blocked(struct lockf *to, struct lockf *from)
402 {
403 struct lockf *tlock;
404
405 TAILQ_FOREACH(tlock, &from->lf_blkhd, lf_block) {
406 tlock->lf_next = to;
407 }
408
409 TAILQ_CONCAT(&to->lf_blkhd, &from->lf_blkhd, lf_block);
410 }
411
412 /*
413 * lf_coalesce_adjacent
414 *
415 * Description: Helper function: when setting a lock, coalesce adjacent
416 * locks. Needed because adjacent locks are not overlapping,
417 * but POSIX requires that they be coalesced.
418 *
419 * Parameters: lock The new lock which may be adjacent
420 * to already locked regions, and which
421 * should therefore be coalesced with them
422 *
423 * Returns: <void>
424 */
425 static void
426 lf_coalesce_adjacent(struct lockf *lock)
427 {
428 struct lockf **lf = lock->lf_head;
429
430 while (*lf != NOLOCKF) {
431 /* reject locks that obviously could not be coalesced */
432 if ((*lf == lock) ||
433 ((*lf)->lf_id != lock->lf_id) ||
434 ((*lf)->lf_type != lock->lf_type)) {
435 lf = &(*lf)->lf_next;
436 continue;
437 }
438
439 /*
440 * NOTE: Assumes that if two locks are adjacent on the number line
441 * and belong to the same owner, then they are adjacent on the list.
442 */
443 if ((*lf)->lf_end != -1 &&
444 ((*lf)->lf_end + 1) == lock->lf_start) {
445 struct lockf *adjacent = *lf;
446
447 LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent previous\n");
448 lock->lf_start = (*lf)->lf_start;
449 *lf = lock;
450 lf = &(*lf)->lf_next;
451
452 lf_move_blocked(lock, adjacent);
453
454 FREE(adjacent, M_LOCKF);
455 continue;
456 }
457 /* If the lock starts adjacent to us, we can coalesce it */
458 if (lock->lf_end != -1 &&
459 (lock->lf_end + 1) == (*lf)->lf_start) {
460 struct lockf *adjacent = *lf;
461
462 LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent following\n");
463 lock->lf_end = (*lf)->lf_end;
464 lock->lf_next = (*lf)->lf_next;
465 lf = &lock->lf_next;
466
467 lf_move_blocked(lock, adjacent);
468
469 FREE(adjacent, M_LOCKF);
470 continue;
471 }
472
473 /* no matching conditions; go on to next lock */
474 lf = &(*lf)->lf_next;
475 }
476 }
477
478 /*
479 * lf_setlock
480 *
481 * Description: Set a byte-range lock.
482 *
483 * Parameters: lock The lock structure describing the lock
484 * to be set; allocated by the caller, it
485 * will be linked into the lock list if
486 * the set is successful, and freed if the
487 * set is unsuccessful.
488 *
489 * timeout Timeout specified in the case of
490 * SETLKWTIMEOUT.
491 *
492 * Returns: 0 Success
493 * EAGAIN
494 * EDEADLK
495 * lf_split:ENOLCK
496 * lf_clearlock:ENOLCK
497 * msleep:EINTR
498 * msleep:ETIMEDOUT
499 *
500 * Notes: We add the lock to the provisional lock list. We do not
501 * coalesce at this time; this has implications for other lock
502 * requestors in the blocker search mechanism.
503 */
504 static int
505 lf_setlock(struct lockf *lock, struct timespec *timeout)
506 {
507 struct lockf *block;
508 struct lockf **head = lock->lf_head;
509 struct lockf **prev, *overlap;
510 static const char lockstr[] = "lockf";
511 int priority, needtolink, error;
512 struct vnode *vp = lock->lf_vnode;
513 overlap_t ovcase;
514
515 #ifdef LOCKF_DEBUGGING
516 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
517 lf_print("lf_setlock", lock);
518 lf_printlist("lf_setlock(in)", lock);
519 }
520 #endif /* LOCKF_DEBUGGING */
521 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p Looking for deadlock, vnode %p\n", lock, lock->lf_vnode);
522
523 /*
524 * Set the priority
525 */
526 priority = PLOCK;
527 if (lock->lf_type == F_WRLCK) {
528 priority += 4;
529 }
530 priority |= PCATCH;
531 scan:
532 /*
533 * Scan lock list for this file looking for locks that would block us.
534 */
535 while ((block = lf_getblock(lock, -1))) {
536 /*
537 * Free the structure and return if nonblocking.
538 */
539 if ((lock->lf_flags & F_WAIT) == 0) {
540 DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
541 FREE(lock, M_LOCKF);
542 return EAGAIN;
543 }
544
545 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p found blocking lock %p\n", lock, block);
546
547 /*
548 * We are blocked. Since flock style locks cover
549 * the whole file, there is no chance for deadlock.
550 *
551 * OFD byte-range locks currently do NOT support
552 * deadlock detection.
553 *
554 * For POSIX byte-range locks we must check for deadlock.
555 *
556 * Deadlock detection is done by looking through the
557 * wait channels to see if there are any cycles that
558 * involve us.
559 */
560 if ((lock->lf_flags & F_POSIX) &&
561 (block->lf_flags & F_POSIX)) {
562 lck_mtx_lock(&lf_dead_lock);
563
564 /* The blocked process is waiting on something */
565 struct proc *wproc = block->lf_owner;
566 proc_lock(wproc);
567
568 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p owned by pid %d\n", lock, proc_pid(wproc));
569
570 struct uthread *ut;
571 TAILQ_FOREACH(ut, &wproc->p_uthlist, uu_list) {
572 /*
573 * If the thread is (a) asleep (uu_wchan != 0)
574 * and (b) in this code (uu_wmesg == lockstr)
575 * then check to see if the lock is blocked behind
576 * someone blocked behind us.
577 *
578 * Note: (i) vp->v_lock is held, preventing other
579 * threads from mutating the blocking list for our vnode.
580 * and (ii) the proc_lock is held i.e the thread list
581 * is stable.
582 *
583 * HOWEVER some thread in wproc might be sleeping on a lockf
584 * structure for a different vnode, and be woken at any
585 * time. Thus the waitblock list could mutate while
586 * it's being inspected by this thread, and what
587 * ut->uu_wchan was just pointing at could even be freed.
588 *
589 * Nevertheless this is safe here because of lf_dead_lock; if
590 * any thread blocked with uu_wmesg == lockstr wakes (see below)
591 * it will try to acquire lf_dead_lock which is already held
592 * here. Holding that lock prevents the lockf structure being
593 * pointed at by ut->uu_wchan from going away. Thus the vnode
594 * involved can be found and locked, and the corresponding
595 * blocking chain can then be examined safely.
596 */
597 const struct lockf *waitblock = (const void *)ut->uu_wchan;
598 if ((waitblock != NULL) && (ut->uu_wmesg == lockstr)) {
599 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is also blocked on lock %p vnode %p\n", lock, waitblock, waitblock->lf_vnode);
600
601 vnode_t othervp = NULL;
602 if (waitblock->lf_vnode != vp) {
603 /*
604 * This thread in wproc is waiting for a lock
605 * on a different vnode; grab the lock on it
606 * that protects lf_next while we examine it.
607 */
608 othervp = waitblock->lf_vnode;
609 if (!lck_mtx_try_lock(&othervp->v_lock)) {
610 /*
611 * avoid kernel deadlock: drop all
612 * locks, pause for a bit to let the
613 * other thread do what it needs to do,
614 * then (because we drop and retake
615 * v_lock) retry the scan.
616 */
617 proc_unlock(wproc);
618 lck_mtx_unlock(&lf_dead_lock);
619 static struct timespec ts = {
620 .tv_sec = 0,
621 .tv_nsec = 2 * NSEC_PER_MSEC,
622 };
623 static const char pausestr[] = "lockf:pause";
624 (void) msleep(lock, &vp->v_lock, priority, pausestr, &ts);
625 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p contention for vp %p => restart\n", lock, othervp);
626 goto scan;
627 }
628 }
629
630 /*
631 * Get the lock blocking the lock
632 * which would block us, and make
633 * certain it hasn't become unblocked
634 * (been granted, e.g. between the time
635 * we called lf_getblock, and the time
636 * we successfully acquired the
637 * proc_lock).
638 */
639 const struct lockf *nextblock = waitblock->lf_next;
640 if (nextblock == NULL) {
641 if (othervp) {
642 lck_mtx_unlock(&othervp->v_lock);
643 }
644 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p with waitblock %p and no lf_next; othervp %p\n", lock, waitblock, othervp);
645 continue;
646 }
647 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is also blocked on lock %p vnode %p\n", lock, nextblock, nextblock->lf_vnode);
648
649 /*
650 * Make sure it's an advisory range
651 * lock and not any other kind of lock;
652 * if we mix lock types, it's our own
653 * fault.
654 */
655 if ((nextblock->lf_flags & F_POSIX) == 0) {
656 if (othervp) {
657 lck_mtx_unlock(&othervp->v_lock);
658 }
659 continue;
660 }
661
662 /*
663 * If the owner of the lock that's
664 * blocking a lock that's blocking us
665 * getting the requested lock, then we
666 * would deadlock, so error out.
667 */
668 struct proc *bproc = nextblock->lf_owner;
669 const boolean_t deadlocked = bproc == lock->lf_owner;
670
671 if (othervp) {
672 lck_mtx_unlock(&othervp->v_lock);
673 }
674 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p owned by pid %d\n", lock, proc_pid(bproc));
675 if (deadlocked) {
676 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is me, so EDEADLK\n", lock);
677 proc_unlock(wproc);
678 lck_mtx_unlock(&lf_dead_lock);
679 FREE(lock, M_LOCKF);
680 return EDEADLK;
681 }
682 }
683 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p bottom of thread loop\n", lock);
684 }
685 proc_unlock(wproc);
686 lck_mtx_unlock(&lf_dead_lock);
687 }
688
689 /*
690 * For flock type locks, we must first remove
691 * any shared locks that we hold before we sleep
692 * waiting for an exclusive lock.
693 */
694 if ((lock->lf_flags & F_FLOCK) &&
695 lock->lf_type == F_WRLCK) {
696 lock->lf_type = F_UNLCK;
697 if ((error = lf_clearlock(lock)) != 0) {
698 FREE(lock, M_LOCKF);
699 return error;
700 }
701 lock->lf_type = F_WRLCK;
702 }
703 /*
704 * Add our lock to the blocked list and sleep until we're free.
705 * Remember who blocked us (for deadlock detection).
706 */
707 lock->lf_next = block;
708 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
709
710 if (!(lock->lf_flags & F_FLOCK)) {
711 block->lf_flags &= ~F_WAKE1_SAFE;
712 }
713
714 #if IMPORTANCE_INHERITANCE
715 /*
716 * Importance donation is done only for cases where the
717 * owning task can be unambiguously determined.
718 *
719 * POSIX type locks are not inherited by child processes;
720 * we maintain a 1:1 mapping between a lock and its owning
721 * process.
722 *
723 * Flock type locks are inherited across fork() and there is
724 * no 1:1 mapping in the general case. However, the fileglobs
725 * used by OFD locks *may* be confined to the process that
726 * created them, and thus have an "owner", in which case
727 * we also attempt importance donation.
728 */
729 if ((lock->lf_flags & block->lf_flags & F_POSIX) != 0) {
730 lf_boost_blocking_proc(lock, block);
731 } else if ((lock->lf_flags & block->lf_flags & F_OFD_LOCK) &&
732 lock->lf_owner != block->lf_owner &&
733 NULL != lock->lf_owner && NULL != block->lf_owner) {
734 lf_boost_blocking_proc(lock, block);
735 }
736 #endif /* IMPORTANCE_INHERITANCE */
737
738 #ifdef LOCKF_DEBUGGING
739 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
740 lf_print("lf_setlock: blocking on", block);
741 lf_printlist("lf_setlock(block)", block);
742 }
743 #endif /* LOCKF_DEBUGGING */
744 DTRACE_FSINFO(advlock__wait, vnode_t, vp);
745
746 if (lock->lf_flags & F_POSIX) {
747 error = msleep(lock, &vp->v_lock, priority, lockstr, timeout);
748 /*
749 * Ensure that 'lock' doesn't get mutated or freed if a
750 * wakeup occurs while hunting for deadlocks (and holding
751 * lf_dead_lock - see above)
752 */
753 lck_mtx_lock(&lf_dead_lock);
754 lck_mtx_unlock(&lf_dead_lock);
755 } else {
756 static const char lockstr_np[] = "lockf:np";
757 error = msleep(lock, &vp->v_lock, priority, lockstr_np, timeout);
758 }
759
760 if (error == 0 && (lock->lf_flags & F_ABORT) != 0) {
761 error = EBADF;
762 }
763
764 if (lock->lf_next) {
765 /*
766 * lf_wakelock() always sets wakelock->lf_next to
767 * NULL before a wakeup; so we've been woken early
768 * - perhaps by a debugger, signal or other event.
769 *
770 * Remove 'lock' from the block list (avoids double-add
771 * in the spurious case, which would create a cycle)
772 */
773 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
774 #if IMPORTANCE_INHERITANCE
775 /*
776 * Adjust the boost on lf_next.
777 */
778 lf_adjust_assertion(lock->lf_next);
779 #endif /* IMPORTANCE_INHERITANCE */
780 lock->lf_next = NULL;
781
782 if (error == 0) {
783 /*
784 * If this was a spurious wakeup, retry
785 */
786 printf("%s: spurious wakeup, retrying lock\n",
787 __func__);
788 continue;
789 }
790 }
791
792 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
793 if ((block = lf_getblock(lock, -1)) != NULL) {
794 lf_move_blocked(block, lock);
795 }
796 }
797
798 if (error) {
799 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
800 lf_wakelock(lock, TRUE);
801 }
802 FREE(lock, M_LOCKF);
803 /* Return ETIMEDOUT if timeout occoured. */
804 if (error == EWOULDBLOCK) {
805 error = ETIMEDOUT;
806 }
807 return error;
808 }
809 }
810
811 /*
812 * No blocks!! Add the lock. Note that we will
813 * downgrade or upgrade any overlapping locks this
814 * process already owns.
815 *
816 * Skip over locks owned by other processes.
817 * Handle any locks that overlap and are owned by ourselves.
818 */
819 prev = head;
820 block = *head;
821 needtolink = 1;
822 for (;;) {
823 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
824 if (ovcase) {
825 block = overlap->lf_next;
826 }
827 /*
828 * Six cases:
829 * 0) no overlap
830 * 1) overlap == lock
831 * 2) overlap contains lock
832 * 3) lock contains overlap
833 * 4) overlap starts before lock
834 * 5) overlap ends after lock
835 */
836 switch (ovcase) {
837 case OVERLAP_NONE:
838 if (needtolink) {
839 *prev = lock;
840 lock->lf_next = overlap;
841 }
842 break;
843
844 case OVERLAP_EQUALS_LOCK:
845 /*
846 * If downgrading lock, others may be
847 * able to acquire it.
848 */
849 if (lock->lf_type == F_RDLCK &&
850 overlap->lf_type == F_WRLCK) {
851 lf_wakelock(overlap, TRUE);
852 }
853 overlap->lf_type = lock->lf_type;
854 lf_move_blocked(overlap, lock);
855 FREE(lock, M_LOCKF);
856 lock = overlap; /* for lf_coalesce_adjacent() */
857 break;
858
859 case OVERLAP_CONTAINS_LOCK:
860 /*
861 * Check for common starting point and different types.
862 */
863 if (overlap->lf_type == lock->lf_type) {
864 lf_move_blocked(overlap, lock);
865 FREE(lock, M_LOCKF);
866 lock = overlap; /* for lf_coalesce_adjacent() */
867 break;
868 }
869 if (overlap->lf_start == lock->lf_start) {
870 *prev = lock;
871 lock->lf_next = overlap;
872 overlap->lf_start = lock->lf_end + 1;
873 } else {
874 /*
875 * If we can't split the lock, we can't
876 * grant it. Claim a system limit for the
877 * resource shortage.
878 */
879 if (lf_split(overlap, lock)) {
880 FREE(lock, M_LOCKF);
881 return ENOLCK;
882 }
883 }
884 lf_wakelock(overlap, TRUE);
885 break;
886
887 case OVERLAP_CONTAINED_BY_LOCK:
888 /*
889 * If downgrading lock, others may be able to
890 * acquire it, otherwise take the list.
891 */
892 if (lock->lf_type == F_RDLCK &&
893 overlap->lf_type == F_WRLCK) {
894 lf_wakelock(overlap, TRUE);
895 } else {
896 lf_move_blocked(lock, overlap);
897 }
898 /*
899 * Add the new lock if necessary and delete the overlap.
900 */
901 if (needtolink) {
902 *prev = lock;
903 lock->lf_next = overlap->lf_next;
904 prev = &lock->lf_next;
905 needtolink = 0;
906 } else {
907 *prev = overlap->lf_next;
908 }
909 FREE(overlap, M_LOCKF);
910 continue;
911
912 case OVERLAP_STARTS_BEFORE_LOCK:
913 /*
914 * Add lock after overlap on the list.
915 */
916 lock->lf_next = overlap->lf_next;
917 overlap->lf_next = lock;
918 overlap->lf_end = lock->lf_start - 1;
919 prev = &lock->lf_next;
920 lf_wakelock(overlap, TRUE);
921 needtolink = 0;
922 continue;
923
924 case OVERLAP_ENDS_AFTER_LOCK:
925 /*
926 * Add the new lock before overlap.
927 */
928 if (needtolink) {
929 *prev = lock;
930 lock->lf_next = overlap;
931 }
932 overlap->lf_start = lock->lf_end + 1;
933 lf_wakelock(overlap, TRUE);
934 break;
935 }
936 break;
937 }
938 /* Coalesce adjacent locks with identical attributes */
939 lf_coalesce_adjacent(lock);
940 #ifdef LOCKF_DEBUGGING
941 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
942 lf_print("lf_setlock: got the lock", lock);
943 lf_printlist("lf_setlock(out)", lock);
944 }
945 #endif /* LOCKF_DEBUGGING */
946 return 0;
947 }
948
949
950 /*
951 * lf_clearlock
952 *
953 * Description: Remove a byte-range lock on an vnode. Generally, find the
954 * lock (or an overlap to that lock) and remove it (or shrink
955 * it), then wakeup anyone we can.
956 *
957 * Parameters: unlock The lock to clear
958 *
959 * Returns: 0 Success
960 * lf_split:ENOLCK
961 *
962 * Notes: A caller may unlock all the locks owned by the caller by
963 * specifying the entire file range; locks owned by other
964 * callers are not effected by this operation.
965 */
966 static int
967 lf_clearlock(struct lockf *unlock)
968 {
969 struct lockf **head = unlock->lf_head;
970 struct lockf *lf = *head;
971 struct lockf *overlap, **prev;
972 overlap_t ovcase;
973
974 if (lf == NOLOCKF) {
975 return 0;
976 }
977 #ifdef LOCKF_DEBUGGING
978 if (unlock->lf_type != F_UNLCK) {
979 panic("lf_clearlock: bad type");
980 }
981 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
982 lf_print("lf_clearlock", unlock);
983 }
984 #endif /* LOCKF_DEBUGGING */
985 prev = head;
986 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) {
987 /*
988 * Wakeup the list of locks to be retried.
989 */
990 lf_wakelock(overlap, FALSE);
991 #if IMPORTANCE_INHERITANCE
992 if (overlap->lf_boosted == LF_BOOSTED) {
993 lf_drop_assertion(overlap);
994 }
995 #endif /* IMPORTANCE_INHERITANCE */
996
997 switch (ovcase) {
998 case OVERLAP_NONE: /* satisfy compiler enum/switch */
999 break;
1000
1001 case OVERLAP_EQUALS_LOCK:
1002 *prev = overlap->lf_next;
1003 FREE(overlap, M_LOCKF);
1004 break;
1005
1006 case OVERLAP_CONTAINS_LOCK: /* split it */
1007 if (overlap->lf_start == unlock->lf_start) {
1008 overlap->lf_start = unlock->lf_end + 1;
1009 break;
1010 }
1011 /*
1012 * If we can't split the lock, we can't grant it.
1013 * Claim a system limit for the resource shortage.
1014 */
1015 if (lf_split(overlap, unlock)) {
1016 return ENOLCK;
1017 }
1018 overlap->lf_next = unlock->lf_next;
1019 break;
1020
1021 case OVERLAP_CONTAINED_BY_LOCK:
1022 *prev = overlap->lf_next;
1023 lf = overlap->lf_next;
1024 FREE(overlap, M_LOCKF);
1025 continue;
1026
1027 case OVERLAP_STARTS_BEFORE_LOCK:
1028 overlap->lf_end = unlock->lf_start - 1;
1029 prev = &overlap->lf_next;
1030 lf = overlap->lf_next;
1031 continue;
1032
1033 case OVERLAP_ENDS_AFTER_LOCK:
1034 overlap->lf_start = unlock->lf_end + 1;
1035 break;
1036 }
1037 break;
1038 }
1039 #ifdef LOCKF_DEBUGGING
1040 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1041 lf_printlist("lf_clearlock", unlock);
1042 }
1043 #endif /* LOCKF_DEBUGGING */
1044 return 0;
1045 }
1046
1047
1048 /*
1049 * lf_getlock
1050 *
1051 * Description: Check whether there is a blocking lock, and if so return
1052 * its process identifier into the lock being requested.
1053 *
1054 * Parameters: lock Pointer to lock to test for blocks
1055 * fl Pointer to flock structure to receive
1056 * the blocking lock information, if a
1057 * blocking lock is found.
1058 * matchpid -1, or pid value to match in lookup.
1059 *
1060 * Returns: 0 Success
1061 *
1062 * Implicit Returns:
1063 * *fl Contents modified to reflect the
1064 * blocking lock, if one is found; not
1065 * modified otherwise
1066 *
1067 * Notes: fl->l_pid will be (-1) for file locks and will only be set to
1068 * the blocking process ID for advisory record locks.
1069 */
1070 static int
1071 lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid)
1072 {
1073 struct lockf *block;
1074
1075 #ifdef LOCKF_DEBUGGING
1076 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1077 lf_print("lf_getlock", lock);
1078 }
1079 #endif /* LOCKF_DEBUGGING */
1080
1081 if ((block = lf_getblock(lock, matchpid))) {
1082 fl->l_type = block->lf_type;
1083 fl->l_whence = SEEK_SET;
1084 fl->l_start = block->lf_start;
1085 if (block->lf_end == -1) {
1086 fl->l_len = 0;
1087 } else {
1088 fl->l_len = block->lf_end - block->lf_start + 1;
1089 }
1090 if (NULL != block->lf_owner) {
1091 /*
1092 * lf_owner is only non-NULL when the lock
1093 * "owner" can be unambiguously determined
1094 */
1095 fl->l_pid = proc_pid(block->lf_owner);
1096 } else {
1097 fl->l_pid = -1;
1098 }
1099 } else {
1100 fl->l_type = F_UNLCK;
1101 }
1102 return 0;
1103 }
1104
1105 /*
1106 * lf_getblock
1107 *
1108 * Description: Walk the list of locks for an inode and return the first
1109 * blocking lock. A lock is considered blocking if we are not
1110 * the lock owner; otherwise, we are permitted to upgrade or
1111 * downgrade it, and it's not considered blocking.
1112 *
1113 * Parameters: lock The lock for which we are interested
1114 * in obtaining the blocking lock, if any
1115 * matchpid -1, or pid value to match in lookup.
1116 *
1117 * Returns: NOLOCKF No blocking lock exists
1118 * !NOLOCKF The address of the blocking lock's
1119 * struct lockf.
1120 */
1121 static struct lockf *
1122 lf_getblock(struct lockf *lock, pid_t matchpid)
1123 {
1124 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
1125
1126 for (prev = lock->lf_head;
1127 lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != OVERLAP_NONE;
1128 lf = overlap->lf_next) {
1129 /*
1130 * Found an overlap.
1131 *
1132 * If we're matching pids, and it's a record lock,
1133 * or it's an OFD lock on a process-confined fd,
1134 * but the pid doesn't match, then keep on looking ..
1135 */
1136 if (matchpid != -1 &&
1137 (overlap->lf_flags & (F_POSIX | F_OFD_LOCK)) != 0 &&
1138 proc_pid(overlap->lf_owner) != matchpid) {
1139 continue;
1140 }
1141
1142 /*
1143 * does it block us?
1144 */
1145 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) {
1146 return overlap;
1147 }
1148 }
1149 return NOLOCKF;
1150 }
1151
1152
1153 /*
1154 * lf_findoverlap
1155 *
1156 * Description: Walk the list of locks to find an overlapping lock (if any).
1157 *
1158 * Parameters: lf First lock on lock list
1159 * lock The lock we are checking for an overlap
1160 * check Check type
1161 * prev pointer to pointer pointer to contain
1162 * address of pointer to previous lock
1163 * pointer to overlapping lock, if overlap
1164 * overlap pointer to pointer to contain address
1165 * of overlapping lock
1166 *
1167 * Returns: OVERLAP_NONE
1168 * OVERLAP_EQUALS_LOCK
1169 * OVERLAP_CONTAINS_LOCK
1170 * OVERLAP_CONTAINED_BY_LOCK
1171 * OVERLAP_STARTS_BEFORE_LOCK
1172 * OVERLAP_ENDS_AFTER_LOCK
1173 *
1174 * Implicit Returns:
1175 * *prev The address of the next pointer in the
1176 * lock previous to the overlapping lock;
1177 * this is generally used to relink the
1178 * lock list, avoiding a second iteration.
1179 * *overlap The pointer to the overlapping lock
1180 * itself; this is used to return data in
1181 * the check == OTHERS case, and for the
1182 * caller to modify the overlapping lock,
1183 * in the check == SELF case
1184 *
1185 * Note: This returns only the FIRST overlapping lock. There may be
1186 * more than one. lf_getlock will return the first blocking lock,
1187 * while lf_setlock will iterate over all overlapping locks to
1188 *
1189 * The check parameter can be SELF, meaning we are looking for
1190 * overlapping locks owned by us, or it can be OTHERS, meaning
1191 * we are looking for overlapping locks owned by someone else so
1192 * we can report a blocking lock on an F_GETLK request.
1193 *
1194 * The value of *overlap and *prev are modified, even if there is
1195 * no overlapping lock found; always check the return code.
1196 */
1197 static overlap_t
1198 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
1199 struct lockf ***prev, struct lockf **overlap)
1200 {
1201 off_t start, end;
1202 int found_self = 0;
1203
1204 *overlap = lf;
1205 if (lf == NOLOCKF) {
1206 return 0;
1207 }
1208 #ifdef LOCKF_DEBUGGING
1209 if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1210 lf_print("lf_findoverlap: looking for overlap in", lock);
1211 }
1212 #endif /* LOCKF_DEBUGGING */
1213 start = lock->lf_start;
1214 end = lock->lf_end;
1215 while (lf != NOLOCKF) {
1216 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
1217 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
1218 /*
1219 * Locks belonging to one process are adjacent on the
1220 * list, so if we've found any locks belonging to us,
1221 * and we're now seeing something else, then we've
1222 * examined all "self" locks. Note that bailing out
1223 * here is quite important; for coalescing, we assume
1224 * numerically adjacent locks from the same owner to
1225 * be adjacent on the list.
1226 */
1227 if ((type & SELF) && found_self) {
1228 return OVERLAP_NONE;
1229 }
1230
1231 *prev = &lf->lf_next;
1232 *overlap = lf = lf->lf_next;
1233 continue;
1234 }
1235
1236 if ((type & SELF)) {
1237 found_self = 1;
1238 }
1239
1240 #ifdef LOCKF_DEBUGGING
1241 if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1242 lf_print("\tchecking", lf);
1243 }
1244 #endif /* LOCKF_DEBUGGING */
1245 /*
1246 * OK, check for overlap
1247 */
1248 if ((lf->lf_end != -1 && start > lf->lf_end) ||
1249 (end != -1 && lf->lf_start > end)) {
1250 /* Case 0 */
1251 LOCKF_DEBUG(LF_DBG_LIST, "no overlap\n");
1252
1253 /*
1254 * NOTE: assumes that locks for the same process are
1255 * nonintersecting and ordered.
1256 */
1257 if ((type & SELF) && end != -1 && lf->lf_start > end) {
1258 return OVERLAP_NONE;
1259 }
1260 *prev = &lf->lf_next;
1261 *overlap = lf = lf->lf_next;
1262 continue;
1263 }
1264 if ((lf->lf_start == start) && (lf->lf_end == end)) {
1265 LOCKF_DEBUG(LF_DBG_LIST, "overlap == lock\n");
1266 return OVERLAP_EQUALS_LOCK;
1267 }
1268 if ((lf->lf_start <= start) &&
1269 (end != -1) &&
1270 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
1271 LOCKF_DEBUG(LF_DBG_LIST, "overlap contains lock\n");
1272 return OVERLAP_CONTAINS_LOCK;
1273 }
1274 if (start <= lf->lf_start &&
1275 (end == -1 ||
1276 (lf->lf_end != -1 && end >= lf->lf_end))) {
1277 LOCKF_DEBUG(LF_DBG_LIST, "lock contains overlap\n");
1278 return OVERLAP_CONTAINED_BY_LOCK;
1279 }
1280 if ((lf->lf_start < start) &&
1281 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
1282 LOCKF_DEBUG(LF_DBG_LIST, "overlap starts before lock\n");
1283 return OVERLAP_STARTS_BEFORE_LOCK;
1284 }
1285 if ((lf->lf_start > start) &&
1286 (end != -1) &&
1287 ((lf->lf_end > end) || (lf->lf_end == -1))) {
1288 LOCKF_DEBUG(LF_DBG_LIST, "overlap ends after lock\n");
1289 return OVERLAP_ENDS_AFTER_LOCK;
1290 }
1291 panic("lf_findoverlap: default");
1292 }
1293 return OVERLAP_NONE;
1294 }
1295
1296
1297 /*
1298 * lf_split
1299 *
1300 * Description: Split a lock and a contained region into two or three locks
1301 * as necessary.
1302 *
1303 * Parameters: lock1 Lock to split
1304 * lock2 Overlapping lock region requiring the
1305 * split (upgrade/downgrade/unlock)
1306 *
1307 * Returns: 0 Success
1308 * ENOLCK No memory for new lock
1309 *
1310 * Implicit Returns:
1311 * *lock1 Modified original lock
1312 * *lock2 Overlapping lock (inserted into list)
1313 * (new lock) Potential new lock inserted into list
1314 * if split results in 3 locks
1315 *
1316 * Notes: This operation can only fail if the split would result in three
1317 * locks, and there is insufficient memory to allocate the third
1318 * lock; in that case, neither of the locks will be modified.
1319 */
1320 static int
1321 lf_split(struct lockf *lock1, struct lockf *lock2)
1322 {
1323 struct lockf *splitlock;
1324
1325 #ifdef LOCKF_DEBUGGING
1326 if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1327 lf_print("lf_split", lock1);
1328 lf_print("splitting from", lock2);
1329 }
1330 #endif /* LOCKF_DEBUGGING */
1331 /*
1332 * Check to see if splitting into only two pieces.
1333 */
1334 if (lock1->lf_start == lock2->lf_start) {
1335 lock1->lf_start = lock2->lf_end + 1;
1336 lock2->lf_next = lock1;
1337 return 0;
1338 }
1339 if (lock1->lf_end == lock2->lf_end) {
1340 lock1->lf_end = lock2->lf_start - 1;
1341 lock2->lf_next = lock1->lf_next;
1342 lock1->lf_next = lock2;
1343 return 0;
1344 }
1345 /*
1346 * Make a new lock consisting of the last part of
1347 * the encompassing lock
1348 */
1349 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
1350 if (splitlock == NULL) {
1351 return ENOLCK;
1352 }
1353 bcopy(lock1, splitlock, sizeof *splitlock);
1354 splitlock->lf_start = lock2->lf_end + 1;
1355 TAILQ_INIT(&splitlock->lf_blkhd);
1356 lock1->lf_end = lock2->lf_start - 1;
1357 /*
1358 * OK, now link it in
1359 */
1360 splitlock->lf_next = lock1->lf_next;
1361 lock2->lf_next = splitlock;
1362 lock1->lf_next = lock2;
1363
1364 return 0;
1365 }
1366
1367
1368 /*
1369 * lf_wakelock
1370 *
1371 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1372 * waiting on the lock may now be able to acquire it.
1373 *
1374 * Parameters: listhead Lock list head on which waiters may
1375 * have pending locks
1376 *
1377 * Returns: <void>
1378 *
1379 * Notes: This function iterates a list of locks and wakes all waiters,
1380 * rather than only waiters for the contended regions. Because
1381 * of this, for heavily contended files, this can result in a
1382 * "thundering herd" situation. Refactoring the code could make
1383 * this operation more efficient, if heavy contention ever results
1384 * in a real-world performance problem.
1385 */
1386 static void
1387 lf_wakelock(struct lockf *listhead, boolean_t force_all)
1388 {
1389 struct lockf *wakelock;
1390 boolean_t wake_all = TRUE;
1391
1392 if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE)) {
1393 wake_all = FALSE;
1394 }
1395
1396 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1397 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
1398 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
1399
1400 wakelock->lf_next = NOLOCKF;
1401 #ifdef LOCKF_DEBUGGING
1402 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1403 lf_print("lf_wakelock: awakening", wakelock);
1404 }
1405 #endif /* LOCKF_DEBUGGING */
1406 if (wake_all == FALSE) {
1407 /*
1408 * If there are items on the list head block list,
1409 * move them to the wakelock list instead, and then
1410 * correct their lf_next pointers.
1411 */
1412 if (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1413 TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block);
1414
1415 struct lockf *tlock;
1416
1417 TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
1418 if (TAILQ_NEXT(tlock, lf_block) == tlock) {
1419 /* See rdar://10887303 */
1420 panic("cycle in wakelock list");
1421 }
1422 tlock->lf_next = wakelock;
1423 }
1424 }
1425 }
1426 wakeup(wakelock);
1427
1428 if (wake_all == FALSE) {
1429 break;
1430 }
1431 }
1432 }
1433
1434
1435 #ifdef LOCKF_DEBUGGING
1436 #define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner))
1437
1438 /*
1439 * lf_print DEBUG
1440 *
1441 * Print out a lock; lock information is prefixed by the string in 'tag'
1442 *
1443 * Parameters: tag A string tag for debugging
1444 * lock The lock whose information should be
1445 * displayed
1446 *
1447 * Returns: <void>
1448 */
1449 void
1450 lf_print(const char *tag, struct lockf *lock)
1451 {
1452 printf("%s: lock %p for ", tag, (void *)lock);
1453 if (lock->lf_flags & F_POSIX) {
1454 printf("proc %p (owner %d)",
1455 lock->lf_id, GET_LF_OWNER_PID(lock));
1456 } else if (lock->lf_flags & F_OFD_LOCK) {
1457 printf("fg %p (owner %d)",
1458 lock->lf_id, GET_LF_OWNER_PID(lock));
1459 } else {
1460 printf("id %p", (void *)lock->lf_id);
1461 }
1462 if (lock->lf_vnode != 0) {
1463 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
1464 lock->lf_vnode,
1465 lock->lf_type == F_RDLCK ? "shared" :
1466 lock->lf_type == F_WRLCK ? "exclusive" :
1467 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1468 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1469 } else {
1470 printf(" %s, start 0x%016llx, end 0x%016llx",
1471 lock->lf_type == F_RDLCK ? "shared" :
1472 lock->lf_type == F_WRLCK ? "exclusive" :
1473 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1474 (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
1475 }
1476 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
1477 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1478 } else {
1479 printf("\n");
1480 }
1481 }
1482
1483
1484 /*
1485 * lf_printlist DEBUG
1486 *
1487 * Print out a lock list for the vnode associated with 'lock'; lock information
1488 * is prefixed by the string in 'tag'
1489 *
1490 * Parameters: tag A string tag for debugging
1491 * lock The lock whose vnode's lock list should
1492 * be displayed
1493 *
1494 * Returns: <void>
1495 */
1496 void
1497 lf_printlist(const char *tag, struct lockf *lock)
1498 {
1499 struct lockf *lf, *blk;
1500
1501 if (lock->lf_vnode == 0) {
1502 return;
1503 }
1504
1505 printf("%s: Lock list for vno %p:\n",
1506 tag, lock->lf_vnode);
1507 for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
1508 printf("\tlock %p for ", (void *)lf);
1509 if (lf->lf_flags & F_POSIX) {
1510 printf("proc %p (owner %d)",
1511 lf->lf_id, GET_LF_OWNER_PID(lf));
1512 } else if (lf->lf_flags & F_OFD_LOCK) {
1513 printf("fg %p (owner %d)",
1514 lf->lf_id, GET_LF_OWNER_PID(lf));
1515 } else {
1516 printf("id %p", (void *)lf->lf_id);
1517 }
1518 printf(", %s, start 0x%016llx, end 0x%016llx",
1519 lf->lf_type == F_RDLCK ? "shared" :
1520 lf->lf_type == F_WRLCK ? "exclusive" :
1521 lf->lf_type == F_UNLCK ? "unlock" :
1522 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
1523 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
1524 printf("\n\t\tlock request %p for ", (void *)blk);
1525 if (blk->lf_flags & F_POSIX) {
1526 printf("proc %p (owner %d)",
1527 blk->lf_id, GET_LF_OWNER_PID(blk));
1528 } else if (blk->lf_flags & F_OFD_LOCK) {
1529 printf("fg %p (owner %d)",
1530 blk->lf_id, GET_LF_OWNER_PID(blk));
1531 } else {
1532 printf("id %p", (void *)blk->lf_id);
1533 }
1534 printf(", %s, start 0x%016llx, end 0x%016llx",
1535 blk->lf_type == F_RDLCK ? "shared" :
1536 blk->lf_type == F_WRLCK ? "exclusive" :
1537 blk->lf_type == F_UNLCK ? "unlock" :
1538 "unknown", (intmax_t)blk->lf_start,
1539 (intmax_t)blk->lf_end);
1540 if (!TAILQ_EMPTY(&blk->lf_blkhd)) {
1541 panic("lf_printlist: bad list");
1542 }
1543 }
1544 printf("\n");
1545 }
1546 }
1547 #endif /* LOCKF_DEBUGGING */
1548
1549 #if IMPORTANCE_INHERITANCE
1550
1551 /*
1552 * lf_hold_assertion
1553 *
1554 * Call task importance hold assertion on the owner of the lock.
1555 *
1556 * Parameters: block_task Owner of the lock blocking
1557 * current thread.
1558 *
1559 * block lock on which the current thread
1560 * is blocking on.
1561 *
1562 * Returns: <void>
1563 *
1564 * Notes: The task reference on block_task is not needed to be hold since
1565 * the current thread has vnode lock and block_task has a file
1566 * lock, thus removing file lock in exit requires block_task to
1567 * grab the vnode lock.
1568 */
1569 static void
1570 lf_hold_assertion(task_t block_task, struct lockf *block)
1571 {
1572 if (task_importance_hold_file_lock_assertion(block_task, 1) == 0) {
1573 block->lf_boosted = LF_BOOSTED;
1574 LOCKF_DEBUG(LF_DBG_IMPINH,
1575 "lf: importance hold file lock assert on pid %d lock %p\n",
1576 proc_pid(block->lf_owner), block);
1577 }
1578 }
1579
1580
1581 /*
1582 * lf_jump_to_queue_head
1583 *
1584 * Jump the lock from the tail of the block queue to the head of
1585 * the queue.
1586 *
1587 * Parameters: block lockf struct containing the
1588 * block queue.
1589 * lock lockf struct to be jumped to the
1590 * front.
1591 *
1592 * Returns: <void>
1593 */
1594 static void
1595 lf_jump_to_queue_head(struct lockf *block, struct lockf *lock)
1596 {
1597 /* Move the lock to the head of the block queue. */
1598 TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block);
1599 TAILQ_INSERT_HEAD(&block->lf_blkhd, lock, lf_block);
1600 }
1601
1602
1603 /*
1604 * lf_drop_assertion
1605 *
1606 * Drops the task hold assertion.
1607 *
1608 * Parameters: block lockf struct holding the assertion.
1609 *
1610 * Returns: <void>
1611 */
1612 static void
1613 lf_drop_assertion(struct lockf *block)
1614 {
1615 LOCKF_DEBUG(LF_DBG_IMPINH, "lf: %d: dropping assertion for lock %p\n",
1616 proc_pid(block->lf_owner), block);
1617
1618 task_t current_task = proc_task(block->lf_owner);
1619 task_importance_drop_file_lock_assertion(current_task, 1);
1620 block->lf_boosted = LF_NOT_BOOSTED;
1621 }
1622
1623 /*
1624 * lf_adjust_assertion
1625 *
1626 * Adjusts importance assertion of file lock. Goes through
1627 * all the blocking locks and checks if the file lock needs
1628 * to be boosted anymore.
1629 *
1630 * Parameters: block lockf structure which needs to be adjusted.
1631 *
1632 * Returns: <void>
1633 */
1634 static void
1635 lf_adjust_assertion(struct lockf *block)
1636 {
1637 boolean_t drop_boost = TRUE;
1638 struct lockf *next;
1639
1640 /* Return if the lock is not boosted */
1641 if (block->lf_boosted == LF_NOT_BOOSTED) {
1642 return;
1643 }
1644
1645 TAILQ_FOREACH(next, &block->lf_blkhd, lf_block) {
1646 /* Check if block and next are same type of locks */
1647 if (((block->lf_flags & next->lf_flags & F_POSIX) != 0) ||
1648 ((block->lf_flags & next->lf_flags & F_OFD_LOCK) &&
1649 (block->lf_owner != next->lf_owner) &&
1650 (NULL != block->lf_owner && NULL != next->lf_owner))) {
1651 /* Check if next would be boosting block */
1652 if (task_is_importance_donor(proc_task(next->lf_owner)) &&
1653 task_is_importance_receiver_type(proc_task(block->lf_owner))) {
1654 /* Found a lock boosting block */
1655 drop_boost = FALSE;
1656 break;
1657 }
1658 }
1659 }
1660
1661 if (drop_boost) {
1662 lf_drop_assertion(block);
1663 }
1664 }
1665
1666 static void
1667 lf_boost_blocking_proc(struct lockf *lock, struct lockf *block)
1668 {
1669 task_t ltask = proc_task(lock->lf_owner);
1670 task_t btask = proc_task(block->lf_owner);
1671
1672 /*
1673 * Check if ltask can donate importance. The
1674 * check of imp_donor bit is done without holding
1675 * any lock. The value may change after you read it,
1676 * but it is ok to boost a task while someone else is
1677 * unboosting you.
1678 *
1679 * TODO: Support live inheritance on file locks.
1680 */
1681 if (task_is_importance_donor(ltask)) {
1682 LOCKF_DEBUG(LF_DBG_IMPINH,
1683 "lf: %d: attempt to boost pid %d that holds lock %p\n",
1684 proc_pid(lock->lf_owner), proc_pid(block->lf_owner), block);
1685
1686 if (block->lf_boosted != LF_BOOSTED &&
1687 task_is_importance_receiver_type(btask)) {
1688 lf_hold_assertion(btask, block);
1689 }
1690 lf_jump_to_queue_head(block, lock);
1691 }
1692 }
1693 #endif /* IMPORTANCE_INHERITANCE */