2 * Copyright (c) 2019 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
62 #include <sys/cdefs.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
67 #include <sys/mount.h>
69 #include <sys/signalvar.h>
70 #include <sys/unistd.h>
72 #include <sys/vnode.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/vnode_if.h>
75 #include <sys/malloc.h>
76 #include <sys/fcntl.h>
77 #include <sys/lockf.h>
79 #include <kern/policy_internal.h>
81 #include <sys/file_internal.h>
83 #if (DEVELOPMENT || DEBUG)
84 #define LOCKF_DEBUGGING 1
87 #ifdef LOCKF_DEBUGGING
88 #include <sys/sysctl.h>
89 void lf_print(const char *tag
, struct lockf
*lock
);
90 void lf_printlist(const char *tag
, struct lockf
*lock
);
92 #define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */
93 #define LF_DBG_LIST (1 << 1) /* split, coalesce */
94 #define LF_DBG_IMPINH (1 << 2) /* importance inheritance */
95 #define LF_DBG_TRACE (1 << 3) /* errors, exit */
96 #define LF_DBG_DEADLOCK (1 << 4) /* deadlock detection */
98 static int lockf_debug
= 0; /* was 2, could be 3 ;-) */
99 SYSCTL_INT(_debug
, OID_AUTO
, lockf_debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &lockf_debug
, 0, "");
102 * If there is no mask bit selector, or there is one, and the selector is
103 * set, then output the debugging diagnostic.
105 #define LOCKF_DEBUG(mask, ...) \
107 if (!(mask) || ((mask) & lockf_debug)) { \
108 printf("%s>", __FUNCTION__); \
109 printf(__VA_ARGS__); \
113 #define LOCKF_DEBUGP(mask) \
115 ((mask) & lockf_debug); \
117 #else /* !LOCKF_DEBUGGING */
118 #define LOCKF_DEBUG(mask, ...) /* mask */
119 #endif /* !LOCKF_DEBUGGING */
121 MALLOC_DEFINE(M_LOCKF
, "lockf", "Byte-range locking structures");
123 #define NOLOCKF (struct lockf *)0
126 #define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
129 * Overlapping lock states
134 OVERLAP_CONTAINS_LOCK
,
135 OVERLAP_CONTAINED_BY_LOCK
,
136 OVERLAP_STARTS_BEFORE_LOCK
,
137 OVERLAP_ENDS_AFTER_LOCK
140 static int lf_clearlock(struct lockf
*);
141 static overlap_t
lf_findoverlap(struct lockf
*,
142 struct lockf
*, int, struct lockf
***, struct lockf
**);
143 static struct lockf
*lf_getblock(struct lockf
*, pid_t
);
144 static int lf_getlock(struct lockf
*, struct flock
*, pid_t
);
145 static int lf_setlock(struct lockf
*, struct timespec
*);
146 static int lf_split(struct lockf
*, struct lockf
*);
147 static void lf_wakelock(struct lockf
*, boolean_t
);
148 #if IMPORTANCE_INHERITANCE
149 static void lf_hold_assertion(task_t
, struct lockf
*);
150 static void lf_jump_to_queue_head(struct lockf
*, struct lockf
*);
151 static void lf_drop_assertion(struct lockf
*);
152 static void lf_boost_blocking_proc(struct lockf
*, struct lockf
*);
153 static void lf_adjust_assertion(struct lockf
*block
);
154 #endif /* IMPORTANCE_INHERITANCE */
156 static lck_mtx_t lf_dead_lock
;
157 static lck_grp_t
*lf_dead_lock_grp
;
162 lf_dead_lock_grp
= lck_grp_alloc_init("lf_dead_lock", LCK_GRP_ATTR_NULL
);
163 lck_mtx_init(&lf_dead_lock
, lf_dead_lock_grp
, LCK_ATTR_NULL
);
169 * Description: Advisory record locking support
171 * Parameters: ap Argument pointer to a vnop_advlock_args
172 * argument descriptor structure for the
173 * lock operation to be attempted.
178 * ENOLCK Number of locked regions exceeds limit
183 * lf_setlock:ETIMEDOUT
184 * lf_clearlock:ENOLCK
187 * Notes: We return ENOLCK when we run out of memory to support locks; as
188 * such, there is no specific expectation limit other than the
189 * amount of available resources.
192 lf_advlock(struct vnop_advlock_args
*ap
)
194 struct vnode
*vp
= ap
->a_vp
;
195 struct flock
*fl
= ap
->a_fl
;
196 vfs_context_t context
= ap
->a_context
;
198 off_t start
, end
, oadd
;
201 struct lockf
**head
= &vp
->v_lockf
;
203 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
206 * Avoid the common case of unlocking when inode has no locks.
208 if (*head
== (struct lockf
*)0) {
209 if (ap
->a_op
!= F_SETLK
) {
210 fl
->l_type
= F_UNLCK
;
211 LOCKF_DEBUG(LF_DBG_TRACE
,
212 "lf_advlock: '%s' unlock without lock\n",
213 vfs_context_proc(context
)->p_comm
);
219 * Convert the flock structure into a start and end.
221 switch (fl
->l_whence
) {
225 * Caller is responsible for adding any necessary offset
226 * when SEEK_CUR is used.
234 * It's OK to cast the u_quad_t to and off_t here, since they
235 * are the same storage size, and the value of the returned
236 * contents will never overflow into the sign bit. We need to
237 * do this because we will use size to force range checks.
239 if ((error
= vnode_size(vp
, (off_t
*)&size
, context
))) {
240 LOCKF_DEBUG(LF_DBG_TRACE
,
241 "lf_advlock: vnode_getattr failed: %d\n", error
);
245 if (size
> OFF_MAX
||
247 size
> (u_quad_t
)(OFF_MAX
- fl
->l_start
))) {
250 start
= size
+ fl
->l_start
;
254 LOCKF_DEBUG(LF_DBG_TRACE
, "lf_advlock: unknown whence %d\n",
259 LOCKF_DEBUG(LF_DBG_TRACE
, "lf_advlock: start < 0 (%qd)\n",
265 LOCKF_DEBUG(LF_DBG_TRACE
,
266 "lf_advlock: len < 0 & start == 0\n");
272 LOCKF_DEBUG(LF_DBG_TRACE
,
273 "lf_advlock: start < 0 (%qd)\n", start
);
276 } else if (fl
->l_len
== 0) {
279 oadd
= fl
->l_len
- 1;
280 if (oadd
> (off_t
)(OFF_MAX
- start
)) {
281 LOCKF_DEBUG(LF_DBG_TRACE
, "lf_advlock: overflow\n");
287 * Create the lockf structure
289 MALLOC(lock
, struct lockf
*, sizeof *lock
, M_LOCKF
, M_WAITOK
);
293 lock
->lf_start
= start
;
295 lock
->lf_id
= ap
->a_id
;
297 lock
->lf_type
= fl
->l_type
;
298 lock
->lf_head
= head
;
299 lock
->lf_next
= (struct lockf
*)0;
300 TAILQ_INIT(&lock
->lf_blkhd
);
301 lock
->lf_flags
= ap
->a_flags
;
302 #if IMPORTANCE_INHERITANCE
303 lock
->lf_boosted
= LF_NOT_BOOSTED
;
305 if (ap
->a_flags
& F_POSIX
) {
306 lock
->lf_owner
= (struct proc
*)lock
->lf_id
;
308 lock
->lf_owner
= NULL
;
311 if (ap
->a_flags
& F_FLOCK
) {
312 lock
->lf_flags
|= F_WAKE1_SAFE
;
315 lck_mtx_lock(&vp
->v_lock
); /* protect the lockf list */
317 * Do the requested operation.
322 * For F_OFD_* locks, lf_id is the fileglob.
323 * Record an "lf_owner" iff this is a confined fd
324 * i.e. it cannot escape this process and will be
325 * F_UNLCKed before the owner exits. (This is
326 * the implicit guarantee needed to ensure lf_owner
327 * remains a valid reference here.)
329 if (ap
->a_flags
& F_OFD_LOCK
) {
330 struct fileglob
*fg
= (void *)lock
->lf_id
;
331 if (fg
->fg_lflags
& FG_CONFINED
) {
332 lock
->lf_owner
= current_proc();
335 error
= lf_setlock(lock
, ap
->a_timeout
);
339 error
= lf_clearlock(lock
);
344 error
= lf_getlock(lock
, fl
, -1);
349 error
= lf_getlock(lock
, fl
, fl
->l_pid
);
358 lck_mtx_unlock(&vp
->v_lock
); /* done manipulating the list */
360 LOCKF_DEBUG(LF_DBG_TRACE
, "lf_advlock: normal exit: %d\n", error
);
365 * Empty the queue of msleeping requests for a lock on the given vnode.
366 * Called with the vnode already locked. Used for forced unmount, where
367 * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
368 * that prevents the vnode from ever being drained. Force unmounting wins.
371 lf_abort_advlocks(vnode_t vp
)
375 if ((lock
= vp
->v_lockf
) == NULL
) {
379 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
381 if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) {
384 TAILQ_FOREACH(tlock
, &lock
->lf_blkhd
, lf_block
) {
386 * Setting this flag should cause all
387 * currently blocked F_SETLK request to
388 * return to userland with an errno.
390 tlock
->lf_flags
|= F_ABORT
;
392 lf_wakelock(lock
, TRUE
);
397 * Take any lock attempts which are currently blocked by a given lock ("from")
398 * and mark them as blocked by a different lock ("to"). Used in the case
399 * where a byte range currently occupied by "from" is to be occupied by "to."
402 lf_move_blocked(struct lockf
*to
, struct lockf
*from
)
406 TAILQ_FOREACH(tlock
, &from
->lf_blkhd
, lf_block
) {
410 TAILQ_CONCAT(&to
->lf_blkhd
, &from
->lf_blkhd
, lf_block
);
414 * lf_coalesce_adjacent
416 * Description: Helper function: when setting a lock, coalesce adjacent
417 * locks. Needed because adjacent locks are not overlapping,
418 * but POSIX requires that they be coalesced.
420 * Parameters: lock The new lock which may be adjacent
421 * to already locked regions, and which
422 * should therefore be coalesced with them
427 lf_coalesce_adjacent(struct lockf
*lock
)
429 struct lockf
**lf
= lock
->lf_head
;
431 while (*lf
!= NOLOCKF
) {
432 /* reject locks that obviously could not be coalesced */
434 ((*lf
)->lf_id
!= lock
->lf_id
) ||
435 ((*lf
)->lf_type
!= lock
->lf_type
)) {
436 lf
= &(*lf
)->lf_next
;
441 * NOTE: Assumes that if two locks are adjacent on the number line
442 * and belong to the same owner, then they are adjacent on the list.
444 if ((*lf
)->lf_end
!= -1 &&
445 ((*lf
)->lf_end
+ 1) == lock
->lf_start
) {
446 struct lockf
*adjacent
= *lf
;
448 LOCKF_DEBUG(LF_DBG_LIST
, "lf_coalesce_adjacent: coalesce adjacent previous\n");
449 lock
->lf_start
= (*lf
)->lf_start
;
451 lf
= &(*lf
)->lf_next
;
453 lf_move_blocked(lock
, adjacent
);
455 FREE(adjacent
, M_LOCKF
);
458 /* If the lock starts adjacent to us, we can coalesce it */
459 if (lock
->lf_end
!= -1 &&
460 (lock
->lf_end
+ 1) == (*lf
)->lf_start
) {
461 struct lockf
*adjacent
= *lf
;
463 LOCKF_DEBUG(LF_DBG_LIST
, "lf_coalesce_adjacent: coalesce adjacent following\n");
464 lock
->lf_end
= (*lf
)->lf_end
;
465 lock
->lf_next
= (*lf
)->lf_next
;
468 lf_move_blocked(lock
, adjacent
);
470 FREE(adjacent
, M_LOCKF
);
474 /* no matching conditions; go on to next lock */
475 lf
= &(*lf
)->lf_next
;
482 * Description: Set a byte-range lock.
484 * Parameters: lock The lock structure describing the lock
485 * to be set; allocated by the caller, it
486 * will be linked into the lock list if
487 * the set is successful, and freed if the
488 * set is unsuccessful.
490 * timeout Timeout specified in the case of
497 * lf_clearlock:ENOLCK
501 * Notes: We add the lock to the provisional lock list. We do not
502 * coalesce at this time; this has implications for other lock
503 * requestors in the blocker search mechanism.
506 lf_setlock(struct lockf
*lock
, struct timespec
*timeout
)
509 struct lockf
**head
= lock
->lf_head
;
510 struct lockf
**prev
, *overlap
, *ltmp
;
511 static const char lockstr
[] = "lockf";
512 int priority
, needtolink
, error
;
513 struct vnode
*vp
= lock
->lf_vnode
;
516 #ifdef LOCKF_DEBUGGING
517 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
518 lf_print("lf_setlock", lock
);
519 lf_printlist("lf_setlock(in)", lock
);
521 #endif /* LOCKF_DEBUGGING */
522 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p Looking for deadlock, vnode %p\n", lock
, lock
->lf_vnode
);
528 if (lock
->lf_type
== F_WRLCK
) {
534 * Scan lock list for this file looking for locks that would block us.
536 while ((block
= lf_getblock(lock
, -1))) {
538 * Free the structure and return if nonblocking.
540 if ((lock
->lf_flags
& F_WAIT
) == 0) {
541 DTRACE_FSINFO(advlock__nowait
, vnode_t
, vp
);
546 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p found blocking lock %p\n", lock
, block
);
549 * We are blocked. Since flock style locks cover
550 * the whole file, there is no chance for deadlock.
552 * OFD byte-range locks currently do NOT support
553 * deadlock detection.
555 * For POSIX byte-range locks we must check for deadlock.
557 * Deadlock detection is done by looking through the
558 * wait channels to see if there are any cycles that
561 if ((lock
->lf_flags
& F_POSIX
) &&
562 (block
->lf_flags
& F_POSIX
)) {
563 lck_mtx_lock(&lf_dead_lock
);
565 /* The blocked process is waiting on something */
566 struct proc
*wproc
= block
->lf_owner
;
569 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p owned by pid %d\n", lock
, proc_pid(wproc
));
572 TAILQ_FOREACH(ut
, &wproc
->p_uthlist
, uu_list
) {
574 * If the thread is (a) asleep (uu_wchan != 0)
575 * and (b) in this code (uu_wmesg == lockstr)
576 * then check to see if the lock is blocked behind
577 * someone blocked behind us.
579 * Note: (i) vp->v_lock is held, preventing other
580 * threads from mutating the blocking list for our vnode.
581 * and (ii) the proc_lock is held i.e the thread list
584 * HOWEVER some thread in wproc might be sleeping on a lockf
585 * structure for a different vnode, and be woken at any
586 * time. Thus the waitblock list could mutate while
587 * it's being inspected by this thread, and what
588 * ut->uu_wchan was just pointing at could even be freed.
590 * Nevertheless this is safe here because of lf_dead_lock; if
591 * any thread blocked with uu_wmesg == lockstr wakes (see below)
592 * it will try to acquire lf_dead_lock which is already held
593 * here. Holding that lock prevents the lockf structure being
594 * pointed at by ut->uu_wchan from going away. Thus the vnode
595 * involved can be found and locked, and the corresponding
596 * blocking chain can then be examined safely.
598 const struct lockf
*waitblock
= (const void *)ut
->uu_wchan
;
599 if ((waitblock
!= NULL
) && (ut
->uu_wmesg
== lockstr
)) {
600 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p which is also blocked on lock %p vnode %p\n", lock
, waitblock
, waitblock
->lf_vnode
);
602 vnode_t othervp
= NULL
;
603 if (waitblock
->lf_vnode
!= vp
) {
605 * This thread in wproc is waiting for a lock
606 * on a different vnode; grab the lock on it
607 * that protects lf_next while we examine it.
609 othervp
= waitblock
->lf_vnode
;
610 if (!lck_mtx_try_lock(&othervp
->v_lock
)) {
612 * avoid kernel deadlock: drop all
613 * locks, pause for a bit to let the
614 * other thread do what it needs to do,
615 * then (because we drop and retake
616 * v_lock) retry the scan.
619 lck_mtx_unlock(&lf_dead_lock
);
620 static struct timespec ts
= {
622 .tv_nsec
= 2 * NSEC_PER_MSEC
,
624 static const char pausestr
[] = "lockf:pause";
625 (void) msleep(lock
, &vp
->v_lock
, priority
, pausestr
, &ts
);
626 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p contention for vp %p => restart\n", lock
, othervp
);
632 * Get the lock blocking the lock
633 * which would block us, and make
634 * certain it hasn't become unblocked
635 * (been granted, e.g. between the time
636 * we called lf_getblock, and the time
637 * we successfully acquired the
640 const struct lockf
*nextblock
= waitblock
->lf_next
;
641 if (nextblock
== NULL
) {
643 lck_mtx_unlock(&othervp
->v_lock
);
645 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p with waitblock %p and no lf_next; othervp %p\n", lock
, waitblock
, othervp
);
648 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p which is also blocked on lock %p vnode %p\n", lock
, nextblock
, nextblock
->lf_vnode
);
651 * Make sure it's an advisory range
652 * lock and not any other kind of lock;
653 * if we mix lock types, it's our own
656 if ((nextblock
->lf_flags
& F_POSIX
) == 0) {
658 lck_mtx_unlock(&othervp
->v_lock
);
664 * If the owner of the lock that's
665 * blocking a lock that's blocking us
666 * getting the requested lock, then we
667 * would deadlock, so error out.
669 struct proc
*bproc
= nextblock
->lf_owner
;
670 const boolean_t deadlocked
= bproc
== lock
->lf_owner
;
673 lck_mtx_unlock(&othervp
->v_lock
);
675 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p owned by pid %d\n", lock
, proc_pid(bproc
));
677 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p which is me, so EDEADLK\n", lock
);
679 lck_mtx_unlock(&lf_dead_lock
);
684 LOCKF_DEBUG(LF_DBG_DEADLOCK
, "lock %p bottom of thread loop\n", lock
);
687 lck_mtx_unlock(&lf_dead_lock
);
691 * For flock type locks, we must first remove
692 * any shared locks that we hold before we sleep
693 * waiting for an exclusive lock.
695 if ((lock
->lf_flags
& F_FLOCK
) &&
696 lock
->lf_type
== F_WRLCK
) {
697 lock
->lf_type
= F_UNLCK
;
698 if ((error
= lf_clearlock(lock
)) != 0) {
702 lock
->lf_type
= F_WRLCK
;
705 * Add our lock to the blocked list and sleep until we're free.
706 * Remember who blocked us (for deadlock detection).
708 lock
->lf_next
= block
;
709 TAILQ_INSERT_TAIL(&block
->lf_blkhd
, lock
, lf_block
);
711 if (!(lock
->lf_flags
& F_FLOCK
)) {
712 block
->lf_flags
&= ~F_WAKE1_SAFE
;
715 #if IMPORTANCE_INHERITANCE
717 * Importance donation is done only for cases where the
718 * owning task can be unambiguously determined.
720 * POSIX type locks are not inherited by child processes;
721 * we maintain a 1:1 mapping between a lock and its owning
724 * Flock type locks are inherited across fork() and there is
725 * no 1:1 mapping in the general case. However, the fileglobs
726 * used by OFD locks *may* be confined to the process that
727 * created them, and thus have an "owner", in which case
728 * we also attempt importance donation.
730 if ((lock
->lf_flags
& block
->lf_flags
& F_POSIX
) != 0) {
731 lf_boost_blocking_proc(lock
, block
);
732 } else if ((lock
->lf_flags
& block
->lf_flags
& F_OFD_LOCK
) &&
733 lock
->lf_owner
!= block
->lf_owner
&&
734 NULL
!= lock
->lf_owner
&& NULL
!= block
->lf_owner
) {
735 lf_boost_blocking_proc(lock
, block
);
737 #endif /* IMPORTANCE_INHERITANCE */
739 #ifdef LOCKF_DEBUGGING
740 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
741 lf_print("lf_setlock: blocking on", block
);
742 lf_printlist("lf_setlock(block)", block
);
744 #endif /* LOCKF_DEBUGGING */
745 DTRACE_FSINFO(advlock__wait
, vnode_t
, vp
);
747 if (lock
->lf_flags
& F_POSIX
) {
748 error
= msleep(lock
, &vp
->v_lock
, priority
, lockstr
, timeout
);
750 * Ensure that 'lock' doesn't get mutated or freed if a
751 * wakeup occurs while hunting for deadlocks (and holding
752 * lf_dead_lock - see above)
754 lck_mtx_lock(&lf_dead_lock
);
755 lck_mtx_unlock(&lf_dead_lock
);
757 static const char lockstr_np
[] = "lockf:np";
758 error
= msleep(lock
, &vp
->v_lock
, priority
, lockstr_np
, timeout
);
761 if (error
== 0 && (lock
->lf_flags
& F_ABORT
) != 0) {
767 * lf_wakelock() always sets wakelock->lf_next to
768 * NULL before a wakeup; so we've been woken early
769 * - perhaps by a debugger, signal or other event.
771 * Remove 'lock' from the block list (avoids double-add
772 * in the spurious case, which would create a cycle)
774 TAILQ_REMOVE(&lock
->lf_next
->lf_blkhd
, lock
, lf_block
);
775 #if IMPORTANCE_INHERITANCE
777 * Adjust the boost on lf_next.
779 lf_adjust_assertion(lock
->lf_next
);
780 #endif /* IMPORTANCE_INHERITANCE */
781 lock
->lf_next
= NULL
;
785 * If this was a spurious wakeup, retry
787 printf("%s: spurious wakeup, retrying lock\n",
793 if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) {
794 if ((block
= lf_getblock(lock
, -1)) != NULL
) {
795 lf_move_blocked(block
, lock
);
800 if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) {
801 lf_wakelock(lock
, TRUE
);
804 /* Return ETIMEDOUT if timeout occoured. */
805 if (error
== EWOULDBLOCK
) {
813 * No blocks!! Add the lock. Note that we will
814 * downgrade or upgrade any overlapping locks this
815 * process already owns.
817 * Skip over locks owned by other processes.
818 * Handle any locks that overlap and are owned by ourselves.
824 ovcase
= lf_findoverlap(block
, lock
, SELF
, &prev
, &overlap
);
826 block
= overlap
->lf_next
;
832 * 2) overlap contains lock
833 * 3) lock contains overlap
834 * 4) overlap starts before lock
835 * 5) overlap ends after lock
841 lock
->lf_next
= overlap
;
845 case OVERLAP_EQUALS_LOCK
:
847 * If downgrading lock, others may be
848 * able to acquire it.
850 if (lock
->lf_type
== F_RDLCK
&&
851 overlap
->lf_type
== F_WRLCK
) {
852 lf_wakelock(overlap
, TRUE
);
854 overlap
->lf_type
= lock
->lf_type
;
856 lock
= overlap
; /* for lf_coalesce_adjacent() */
859 case OVERLAP_CONTAINS_LOCK
:
861 * Check for common starting point and different types.
863 if (overlap
->lf_type
== lock
->lf_type
) {
865 lock
= overlap
; /* for lf_coalesce_adjacent() */
868 if (overlap
->lf_start
== lock
->lf_start
) {
870 lock
->lf_next
= overlap
;
871 overlap
->lf_start
= lock
->lf_end
+ 1;
874 * If we can't split the lock, we can't
875 * grant it. Claim a system limit for the
878 if (lf_split(overlap
, lock
)) {
883 lf_wakelock(overlap
, TRUE
);
886 case OVERLAP_CONTAINED_BY_LOCK
:
888 * If downgrading lock, others may be able to
889 * acquire it, otherwise take the list.
891 if (lock
->lf_type
== F_RDLCK
&&
892 overlap
->lf_type
== F_WRLCK
) {
893 lf_wakelock(overlap
, TRUE
);
895 while (!TAILQ_EMPTY(&overlap
->lf_blkhd
)) {
896 ltmp
= TAILQ_FIRST(&overlap
->lf_blkhd
);
897 TAILQ_REMOVE(&overlap
->lf_blkhd
, ltmp
,
899 TAILQ_INSERT_TAIL(&lock
->lf_blkhd
,
901 ltmp
->lf_next
= lock
;
905 * Add the new lock if necessary and delete the overlap.
909 lock
->lf_next
= overlap
->lf_next
;
910 prev
= &lock
->lf_next
;
913 *prev
= overlap
->lf_next
;
915 FREE(overlap
, M_LOCKF
);
918 case OVERLAP_STARTS_BEFORE_LOCK
:
920 * Add lock after overlap on the list.
922 lock
->lf_next
= overlap
->lf_next
;
923 overlap
->lf_next
= lock
;
924 overlap
->lf_end
= lock
->lf_start
- 1;
925 prev
= &lock
->lf_next
;
926 lf_wakelock(overlap
, TRUE
);
930 case OVERLAP_ENDS_AFTER_LOCK
:
932 * Add the new lock before overlap.
936 lock
->lf_next
= overlap
;
938 overlap
->lf_start
= lock
->lf_end
+ 1;
939 lf_wakelock(overlap
, TRUE
);
944 /* Coalesce adjacent locks with identical attributes */
945 lf_coalesce_adjacent(lock
);
946 #ifdef LOCKF_DEBUGGING
947 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
948 lf_print("lf_setlock: got the lock", lock
);
949 lf_printlist("lf_setlock(out)", lock
);
951 #endif /* LOCKF_DEBUGGING */
959 * Description: Remove a byte-range lock on an vnode. Generally, find the
960 * lock (or an overlap to that lock) and remove it (or shrink
961 * it), then wakeup anyone we can.
963 * Parameters: unlock The lock to clear
968 * Notes: A caller may unlock all the locks owned by the caller by
969 * specifying the entire file range; locks owned by other
970 * callers are not effected by this operation.
973 lf_clearlock(struct lockf
*unlock
)
975 struct lockf
**head
= unlock
->lf_head
;
976 struct lockf
*lf
= *head
;
977 struct lockf
*overlap
, **prev
;
983 #ifdef LOCKF_DEBUGGING
984 if (unlock
->lf_type
!= F_UNLCK
) {
985 panic("lf_clearlock: bad type");
987 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
988 lf_print("lf_clearlock", unlock
);
990 #endif /* LOCKF_DEBUGGING */
992 while ((ovcase
= lf_findoverlap(lf
, unlock
, SELF
, &prev
, &overlap
)) != OVERLAP_NONE
) {
994 * Wakeup the list of locks to be retried.
996 lf_wakelock(overlap
, FALSE
);
997 #if IMPORTANCE_INHERITANCE
998 if (overlap
->lf_boosted
== LF_BOOSTED
) {
999 lf_drop_assertion(overlap
);
1001 #endif /* IMPORTANCE_INHERITANCE */
1004 case OVERLAP_NONE
: /* satisfy compiler enum/switch */
1007 case OVERLAP_EQUALS_LOCK
:
1008 *prev
= overlap
->lf_next
;
1009 FREE(overlap
, M_LOCKF
);
1012 case OVERLAP_CONTAINS_LOCK
: /* split it */
1013 if (overlap
->lf_start
== unlock
->lf_start
) {
1014 overlap
->lf_start
= unlock
->lf_end
+ 1;
1018 * If we can't split the lock, we can't grant it.
1019 * Claim a system limit for the resource shortage.
1021 if (lf_split(overlap
, unlock
)) {
1024 overlap
->lf_next
= unlock
->lf_next
;
1027 case OVERLAP_CONTAINED_BY_LOCK
:
1028 *prev
= overlap
->lf_next
;
1029 lf
= overlap
->lf_next
;
1030 FREE(overlap
, M_LOCKF
);
1033 case OVERLAP_STARTS_BEFORE_LOCK
:
1034 overlap
->lf_end
= unlock
->lf_start
- 1;
1035 prev
= &overlap
->lf_next
;
1036 lf
= overlap
->lf_next
;
1039 case OVERLAP_ENDS_AFTER_LOCK
:
1040 overlap
->lf_start
= unlock
->lf_end
+ 1;
1045 #ifdef LOCKF_DEBUGGING
1046 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
1047 lf_printlist("lf_clearlock", unlock
);
1049 #endif /* LOCKF_DEBUGGING */
1057 * Description: Check whether there is a blocking lock, and if so return
1058 * its process identifier into the lock being requested.
1060 * Parameters: lock Pointer to lock to test for blocks
1061 * fl Pointer to flock structure to receive
1062 * the blocking lock information, if a
1063 * blocking lock is found.
1064 * matchpid -1, or pid value to match in lookup.
1066 * Returns: 0 Success
1069 * *fl Contents modified to reflect the
1070 * blocking lock, if one is found; not
1071 * modified otherwise
1073 * Notes: fl->l_pid will be (-1) for file locks and will only be set to
1074 * the blocking process ID for advisory record locks.
1077 lf_getlock(struct lockf
*lock
, struct flock
*fl
, pid_t matchpid
)
1079 struct lockf
*block
;
1081 #ifdef LOCKF_DEBUGGING
1082 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
1083 lf_print("lf_getlock", lock
);
1085 #endif /* LOCKF_DEBUGGING */
1087 if ((block
= lf_getblock(lock
, matchpid
))) {
1088 fl
->l_type
= block
->lf_type
;
1089 fl
->l_whence
= SEEK_SET
;
1090 fl
->l_start
= block
->lf_start
;
1091 if (block
->lf_end
== -1) {
1094 fl
->l_len
= block
->lf_end
- block
->lf_start
+ 1;
1096 if (NULL
!= block
->lf_owner
) {
1098 * lf_owner is only non-NULL when the lock
1099 * "owner" can be unambiguously determined
1101 fl
->l_pid
= proc_pid(block
->lf_owner
);
1106 fl
->l_type
= F_UNLCK
;
1114 * Description: Walk the list of locks for an inode and return the first
1115 * blocking lock. A lock is considered blocking if we are not
1116 * the lock owner; otherwise, we are permitted to upgrade or
1117 * downgrade it, and it's not considered blocking.
1119 * Parameters: lock The lock for which we are interested
1120 * in obtaining the blocking lock, if any
1121 * matchpid -1, or pid value to match in lookup.
1123 * Returns: NOLOCKF No blocking lock exists
1124 * !NOLOCKF The address of the blocking lock's
1127 static struct lockf
*
1128 lf_getblock(struct lockf
*lock
, pid_t matchpid
)
1130 struct lockf
**prev
, *overlap
, *lf
= *(lock
->lf_head
);
1132 for (prev
= lock
->lf_head
;
1133 lf_findoverlap(lf
, lock
, OTHERS
, &prev
, &overlap
) != OVERLAP_NONE
;
1134 lf
= overlap
->lf_next
) {
1138 * If we're matching pids, and it's a record lock,
1139 * or it's an OFD lock on a process-confined fd,
1140 * but the pid doesn't match, then keep on looking ..
1142 if (matchpid
!= -1 &&
1143 (overlap
->lf_flags
& (F_POSIX
| F_OFD_LOCK
)) != 0 &&
1144 proc_pid(overlap
->lf_owner
) != matchpid
) {
1151 if ((lock
->lf_type
== F_WRLCK
|| overlap
->lf_type
== F_WRLCK
)) {
1162 * Description: Walk the list of locks to find an overlapping lock (if any).
1164 * Parameters: lf First lock on lock list
1165 * lock The lock we are checking for an overlap
1167 * prev pointer to pointer pointer to contain
1168 * address of pointer to previous lock
1169 * pointer to overlapping lock, if overlap
1170 * overlap pointer to pointer to contain address
1171 * of overlapping lock
1173 * Returns: OVERLAP_NONE
1174 * OVERLAP_EQUALS_LOCK
1175 * OVERLAP_CONTAINS_LOCK
1176 * OVERLAP_CONTAINED_BY_LOCK
1177 * OVERLAP_STARTS_BEFORE_LOCK
1178 * OVERLAP_ENDS_AFTER_LOCK
1181 * *prev The address of the next pointer in the
1182 * lock previous to the overlapping lock;
1183 * this is generally used to relink the
1184 * lock list, avoiding a second iteration.
1185 * *overlap The pointer to the overlapping lock
1186 * itself; this is used to return data in
1187 * the check == OTHERS case, and for the
1188 * caller to modify the overlapping lock,
1189 * in the check == SELF case
1191 * Note: This returns only the FIRST overlapping lock. There may be
1192 * more than one. lf_getlock will return the first blocking lock,
1193 * while lf_setlock will iterate over all overlapping locks to
1195 * The check parameter can be SELF, meaning we are looking for
1196 * overlapping locks owned by us, or it can be OTHERS, meaning
1197 * we are looking for overlapping locks owned by someone else so
1198 * we can report a blocking lock on an F_GETLK request.
1200 * The value of *overlap and *prev are modified, even if there is
1201 * no overlapping lock found; always check the return code.
1204 lf_findoverlap(struct lockf
*lf
, struct lockf
*lock
, int type
,
1205 struct lockf
***prev
, struct lockf
**overlap
)
1211 if (lf
== NOLOCKF
) {
1214 #ifdef LOCKF_DEBUGGING
1215 if (LOCKF_DEBUGP(LF_DBG_LIST
)) {
1216 lf_print("lf_findoverlap: looking for overlap in", lock
);
1218 #endif /* LOCKF_DEBUGGING */
1219 start
= lock
->lf_start
;
1221 while (lf
!= NOLOCKF
) {
1222 if (((type
& SELF
) && lf
->lf_id
!= lock
->lf_id
) ||
1223 ((type
& OTHERS
) && lf
->lf_id
== lock
->lf_id
)) {
1225 * Locks belonging to one process are adjacent on the
1226 * list, so if we've found any locks belonging to us,
1227 * and we're now seeing something else, then we've
1228 * examined all "self" locks. Note that bailing out
1229 * here is quite important; for coalescing, we assume
1230 * numerically adjacent locks from the same owner to
1231 * be adjacent on the list.
1233 if ((type
& SELF
) && found_self
) {
1234 return OVERLAP_NONE
;
1237 *prev
= &lf
->lf_next
;
1238 *overlap
= lf
= lf
->lf_next
;
1242 if ((type
& SELF
)) {
1246 #ifdef LOCKF_DEBUGGING
1247 if (LOCKF_DEBUGP(LF_DBG_LIST
)) {
1248 lf_print("\tchecking", lf
);
1250 #endif /* LOCKF_DEBUGGING */
1252 * OK, check for overlap
1254 if ((lf
->lf_end
!= -1 && start
> lf
->lf_end
) ||
1255 (end
!= -1 && lf
->lf_start
> end
)) {
1257 LOCKF_DEBUG(LF_DBG_LIST
, "no overlap\n");
1260 * NOTE: assumes that locks for the same process are
1261 * nonintersecting and ordered.
1263 if ((type
& SELF
) && end
!= -1 && lf
->lf_start
> end
) {
1264 return OVERLAP_NONE
;
1266 *prev
= &lf
->lf_next
;
1267 *overlap
= lf
= lf
->lf_next
;
1270 if ((lf
->lf_start
== start
) && (lf
->lf_end
== end
)) {
1271 LOCKF_DEBUG(LF_DBG_LIST
, "overlap == lock\n");
1272 return OVERLAP_EQUALS_LOCK
;
1274 if ((lf
->lf_start
<= start
) &&
1276 ((lf
->lf_end
>= end
) || (lf
->lf_end
== -1))) {
1277 LOCKF_DEBUG(LF_DBG_LIST
, "overlap contains lock\n");
1278 return OVERLAP_CONTAINS_LOCK
;
1280 if (start
<= lf
->lf_start
&&
1282 (lf
->lf_end
!= -1 && end
>= lf
->lf_end
))) {
1283 LOCKF_DEBUG(LF_DBG_LIST
, "lock contains overlap\n");
1284 return OVERLAP_CONTAINED_BY_LOCK
;
1286 if ((lf
->lf_start
< start
) &&
1287 ((lf
->lf_end
>= start
) || (lf
->lf_end
== -1))) {
1288 LOCKF_DEBUG(LF_DBG_LIST
, "overlap starts before lock\n");
1289 return OVERLAP_STARTS_BEFORE_LOCK
;
1291 if ((lf
->lf_start
> start
) &&
1293 ((lf
->lf_end
> end
) || (lf
->lf_end
== -1))) {
1294 LOCKF_DEBUG(LF_DBG_LIST
, "overlap ends after lock\n");
1295 return OVERLAP_ENDS_AFTER_LOCK
;
1297 panic("lf_findoverlap: default");
1299 return OVERLAP_NONE
;
1306 * Description: Split a lock and a contained region into two or three locks
1309 * Parameters: lock1 Lock to split
1310 * lock2 Overlapping lock region requiring the
1311 * split (upgrade/downgrade/unlock)
1313 * Returns: 0 Success
1314 * ENOLCK No memory for new lock
1317 * *lock1 Modified original lock
1318 * *lock2 Overlapping lock (inserted into list)
1319 * (new lock) Potential new lock inserted into list
1320 * if split results in 3 locks
1322 * Notes: This operation can only fail if the split would result in three
1323 * locks, and there is insufficient memory to allocate the third
1324 * lock; in that case, neither of the locks will be modified.
1327 lf_split(struct lockf
*lock1
, struct lockf
*lock2
)
1329 struct lockf
*splitlock
;
1331 #ifdef LOCKF_DEBUGGING
1332 if (LOCKF_DEBUGP(LF_DBG_LIST
)) {
1333 lf_print("lf_split", lock1
);
1334 lf_print("splitting from", lock2
);
1336 #endif /* LOCKF_DEBUGGING */
1338 * Check to see if splitting into only two pieces.
1340 if (lock1
->lf_start
== lock2
->lf_start
) {
1341 lock1
->lf_start
= lock2
->lf_end
+ 1;
1342 lock2
->lf_next
= lock1
;
1345 if (lock1
->lf_end
== lock2
->lf_end
) {
1346 lock1
->lf_end
= lock2
->lf_start
- 1;
1347 lock2
->lf_next
= lock1
->lf_next
;
1348 lock1
->lf_next
= lock2
;
1352 * Make a new lock consisting of the last part of
1353 * the encompassing lock
1355 MALLOC(splitlock
, struct lockf
*, sizeof *splitlock
, M_LOCKF
, M_WAITOK
);
1356 if (splitlock
== NULL
) {
1359 bcopy(lock1
, splitlock
, sizeof *splitlock
);
1360 splitlock
->lf_start
= lock2
->lf_end
+ 1;
1361 TAILQ_INIT(&splitlock
->lf_blkhd
);
1362 lock1
->lf_end
= lock2
->lf_start
- 1;
1364 * OK, now link it in
1366 splitlock
->lf_next
= lock1
->lf_next
;
1367 lock2
->lf_next
= splitlock
;
1368 lock1
->lf_next
= lock2
;
1377 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1378 * waiting on the lock may now be able to acquire it.
1380 * Parameters: listhead Lock list head on which waiters may
1381 * have pending locks
1385 * Notes: This function iterates a list of locks and wakes all waiters,
1386 * rather than only waiters for the contended regions. Because
1387 * of this, for heavily contended files, this can result in a
1388 * "thundering herd" situation. Refactoring the code could make
1389 * this operation more efficient, if heavy contention ever results
1390 * in a real-world performance problem.
1393 lf_wakelock(struct lockf
*listhead
, boolean_t force_all
)
1395 struct lockf
*wakelock
;
1396 boolean_t wake_all
= TRUE
;
1398 if (force_all
== FALSE
&& (listhead
->lf_flags
& F_WAKE1_SAFE
)) {
1402 while (!TAILQ_EMPTY(&listhead
->lf_blkhd
)) {
1403 wakelock
= TAILQ_FIRST(&listhead
->lf_blkhd
);
1404 TAILQ_REMOVE(&listhead
->lf_blkhd
, wakelock
, lf_block
);
1406 wakelock
->lf_next
= NOLOCKF
;
1407 #ifdef LOCKF_DEBUGGING
1408 if (LOCKF_DEBUGP(LF_DBG_LOCKOP
)) {
1409 lf_print("lf_wakelock: awakening", wakelock
);
1411 #endif /* LOCKF_DEBUGGING */
1412 if (wake_all
== FALSE
) {
1414 * If there are items on the list head block list,
1415 * move them to the wakelock list instead, and then
1416 * correct their lf_next pointers.
1418 if (!TAILQ_EMPTY(&listhead
->lf_blkhd
)) {
1419 TAILQ_CONCAT(&wakelock
->lf_blkhd
, &listhead
->lf_blkhd
, lf_block
);
1421 struct lockf
*tlock
;
1423 TAILQ_FOREACH(tlock
, &wakelock
->lf_blkhd
, lf_block
) {
1424 if (TAILQ_NEXT(tlock
, lf_block
) == tlock
) {
1425 /* See rdar://10887303 */
1426 panic("cycle in wakelock list");
1428 tlock
->lf_next
= wakelock
;
1434 if (wake_all
== FALSE
) {
1441 #ifdef LOCKF_DEBUGGING
1442 #define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner))
1447 * Print out a lock; lock information is prefixed by the string in 'tag'
1449 * Parameters: tag A string tag for debugging
1450 * lock The lock whose information should be
1456 lf_print(const char *tag
, struct lockf
*lock
)
1458 printf("%s: lock %p for ", tag
, (void *)lock
);
1459 if (lock
->lf_flags
& F_POSIX
) {
1460 printf("proc %p (owner %d)",
1461 lock
->lf_id
, GET_LF_OWNER_PID(lock
));
1462 } else if (lock
->lf_flags
& F_OFD_LOCK
) {
1463 printf("fg %p (owner %d)",
1464 lock
->lf_id
, GET_LF_OWNER_PID(lock
));
1466 printf("id %p", (void *)lock
->lf_id
);
1468 if (lock
->lf_vnode
!= 0) {
1469 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
1471 lock
->lf_type
== F_RDLCK
? "shared" :
1472 lock
->lf_type
== F_WRLCK
? "exclusive" :
1473 lock
->lf_type
== F_UNLCK
? "unlock" : "unknown",
1474 (intmax_t)lock
->lf_start
, (intmax_t)lock
->lf_end
);
1476 printf(" %s, start 0x%016llx, end 0x%016llx",
1477 lock
->lf_type
== F_RDLCK
? "shared" :
1478 lock
->lf_type
== F_WRLCK
? "exclusive" :
1479 lock
->lf_type
== F_UNLCK
? "unlock" : "unknown",
1480 (intmax_t)lock
->lf_start
, (intmax_t)lock
->lf_end
);
1482 if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) {
1483 printf(" block %p\n", (void *)TAILQ_FIRST(&lock
->lf_blkhd
));
1491 * lf_printlist DEBUG
1493 * Print out a lock list for the vnode associated with 'lock'; lock information
1494 * is prefixed by the string in 'tag'
1496 * Parameters: tag A string tag for debugging
1497 * lock The lock whose vnode's lock list should
1503 lf_printlist(const char *tag
, struct lockf
*lock
)
1505 struct lockf
*lf
, *blk
;
1507 if (lock
->lf_vnode
== 0) {
1511 printf("%s: Lock list for vno %p:\n",
1512 tag
, lock
->lf_vnode
);
1513 for (lf
= lock
->lf_vnode
->v_lockf
; lf
; lf
= lf
->lf_next
) {
1514 printf("\tlock %p for ", (void *)lf
);
1515 if (lf
->lf_flags
& F_POSIX
) {
1516 printf("proc %p (owner %d)",
1517 lf
->lf_id
, GET_LF_OWNER_PID(lf
));
1518 } else if (lf
->lf_flags
& F_OFD_LOCK
) {
1519 printf("fg %p (owner %d)",
1520 lf
->lf_id
, GET_LF_OWNER_PID(lf
));
1522 printf("id %p", (void *)lf
->lf_id
);
1524 printf(", %s, start 0x%016llx, end 0x%016llx",
1525 lf
->lf_type
== F_RDLCK
? "shared" :
1526 lf
->lf_type
== F_WRLCK
? "exclusive" :
1527 lf
->lf_type
== F_UNLCK
? "unlock" :
1528 "unknown", (intmax_t)lf
->lf_start
, (intmax_t)lf
->lf_end
);
1529 TAILQ_FOREACH(blk
, &lf
->lf_blkhd
, lf_block
) {
1530 printf("\n\t\tlock request %p for ", (void *)blk
);
1531 if (blk
->lf_flags
& F_POSIX
) {
1532 printf("proc %p (owner %d)",
1533 blk
->lf_id
, GET_LF_OWNER_PID(blk
));
1534 } else if (blk
->lf_flags
& F_OFD_LOCK
) {
1535 printf("fg %p (owner %d)",
1536 blk
->lf_id
, GET_LF_OWNER_PID(blk
));
1538 printf("id %p", (void *)blk
->lf_id
);
1540 printf(", %s, start 0x%016llx, end 0x%016llx",
1541 blk
->lf_type
== F_RDLCK
? "shared" :
1542 blk
->lf_type
== F_WRLCK
? "exclusive" :
1543 blk
->lf_type
== F_UNLCK
? "unlock" :
1544 "unknown", (intmax_t)blk
->lf_start
,
1545 (intmax_t)blk
->lf_end
);
1546 if (!TAILQ_EMPTY(&blk
->lf_blkhd
)) {
1547 panic("lf_printlist: bad list");
1553 #endif /* LOCKF_DEBUGGING */
1555 #if IMPORTANCE_INHERITANCE
1560 * Call task importance hold assertion on the owner of the lock.
1562 * Parameters: block_task Owner of the lock blocking
1565 * block lock on which the current thread
1570 * Notes: The task reference on block_task is not needed to be hold since
1571 * the current thread has vnode lock and block_task has a file
1572 * lock, thus removing file lock in exit requires block_task to
1573 * grab the vnode lock.
1576 lf_hold_assertion(task_t block_task
, struct lockf
*block
)
1578 if (task_importance_hold_file_lock_assertion(block_task
, 1) == 0) {
1579 block
->lf_boosted
= LF_BOOSTED
;
1580 LOCKF_DEBUG(LF_DBG_IMPINH
,
1581 "lf: importance hold file lock assert on pid %d lock %p\n",
1582 proc_pid(block
->lf_owner
), block
);
1588 * lf_jump_to_queue_head
1590 * Jump the lock from the tail of the block queue to the head of
1593 * Parameters: block lockf struct containing the
1595 * lock lockf struct to be jumped to the
1601 lf_jump_to_queue_head(struct lockf
*block
, struct lockf
*lock
)
1603 /* Move the lock to the head of the block queue. */
1604 TAILQ_REMOVE(&block
->lf_blkhd
, lock
, lf_block
);
1605 TAILQ_INSERT_HEAD(&block
->lf_blkhd
, lock
, lf_block
);
1612 * Drops the task hold assertion.
1614 * Parameters: block lockf struct holding the assertion.
1619 lf_drop_assertion(struct lockf
*block
)
1621 LOCKF_DEBUG(LF_DBG_IMPINH
, "lf: %d: dropping assertion for lock %p\n",
1622 proc_pid(block
->lf_owner
), block
);
1624 task_t current_task
= proc_task(block
->lf_owner
);
1625 task_importance_drop_file_lock_assertion(current_task
, 1);
1626 block
->lf_boosted
= LF_NOT_BOOSTED
;
1630 * lf_adjust_assertion
1632 * Adjusts importance assertion of file lock. Goes through
1633 * all the blocking locks and checks if the file lock needs
1634 * to be boosted anymore.
1636 * Parameters: block lockf structure which needs to be adjusted.
1641 lf_adjust_assertion(struct lockf
*block
)
1643 boolean_t drop_boost
= TRUE
;
1646 /* Return if the lock is not boosted */
1647 if (block
->lf_boosted
== LF_NOT_BOOSTED
) {
1651 TAILQ_FOREACH(next
, &block
->lf_blkhd
, lf_block
) {
1652 /* Check if block and next are same type of locks */
1653 if (((block
->lf_flags
& next
->lf_flags
& F_POSIX
) != 0) ||
1654 ((block
->lf_flags
& next
->lf_flags
& F_OFD_LOCK
) &&
1655 (block
->lf_owner
!= next
->lf_owner
) &&
1656 (NULL
!= block
->lf_owner
&& NULL
!= next
->lf_owner
))) {
1657 /* Check if next would be boosting block */
1658 if (task_is_importance_donor(proc_task(next
->lf_owner
)) &&
1659 task_is_importance_receiver_type(proc_task(block
->lf_owner
))) {
1660 /* Found a lock boosting block */
1668 lf_drop_assertion(block
);
1673 lf_boost_blocking_proc(struct lockf
*lock
, struct lockf
*block
)
1675 task_t ltask
= proc_task(lock
->lf_owner
);
1676 task_t btask
= proc_task(block
->lf_owner
);
1679 * Check if ltask can donate importance. The
1680 * check of imp_donor bit is done without holding
1681 * any lock. The value may change after you read it,
1682 * but it is ok to boost a task while someone else is
1685 * TODO: Support live inheritance on file locks.
1687 if (task_is_importance_donor(ltask
)) {
1688 LOCKF_DEBUG(LF_DBG_IMPINH
,
1689 "lf: %d: attempt to boost pid %d that holds lock %p\n",
1690 proc_pid(lock
->lf_owner
), proc_pid(block
->lf_owner
), block
);
1692 if (block
->lf_boosted
!= LF_BOOSTED
&&
1693 task_is_importance_receiver_type(btask
)) {
1694 lf_hold_assertion(btask
, block
);
1696 lf_jump_to_queue_head(block
, lock
);
1699 #endif /* IMPORTANCE_INHERITANCE */