]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_lockf.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_lockf.c
1 /*
2 * Copyright (c) 2019-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from software contributed to Berkeley by
33 * Scooter Morris at Genentech Inc.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
60 */
61
62 #include <sys/cdefs.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/lock.h>
67 #include <sys/mount.h>
68 #include <sys/proc.h>
69 #include <sys/signalvar.h>
70 #include <sys/unistd.h>
71 #include <sys/user.h>
72 #include <sys/vnode.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/vnode_if.h>
75 #include <sys/malloc.h>
76 #include <sys/fcntl.h>
77 #include <sys/lockf.h>
78 #include <sys/sdt.h>
79 #include <kern/policy_internal.h>
80
81 #include <sys/file_internal.h>
82
83 #if (DEVELOPMENT || DEBUG)
84 #define LOCKF_DEBUGGING 1
85 #endif
86
87 #ifdef LOCKF_DEBUGGING
88 #include <sys/sysctl.h>
89 void lf_print(const char *tag, struct lockf *lock);
90 void lf_printlist(const char *tag, struct lockf *lock);
91
92 #define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */
93 #define LF_DBG_LIST (1 << 1) /* split, coalesce */
94 #define LF_DBG_IMPINH (1 << 2) /* importance inheritance */
95 #define LF_DBG_TRACE (1 << 3) /* errors, exit */
96 #define LF_DBG_DEADLOCK (1 << 4) /* deadlock detection */
97
98 static int lockf_debug = 0; /* was 2, could be 3 ;-) */
99 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, "");
100
101 /*
102 * If the selector is set, then output the debugging diagnostic.
103 */
104 #define LOCKF_DEBUG(mask, ...) \
105 do { \
106 if ((mask) & lockf_debug) { \
107 printf("%s>", __FUNCTION__); \
108 printf(__VA_ARGS__); \
109 } \
110 } while(0)
111
112 #define LOCKF_DEBUGP(mask) \
113 ({ \
114 ((mask) & lockf_debug); \
115 })
116 #else /* !LOCKF_DEBUGGING */
117 #define LOCKF_DEBUG(mask, ...) /* mask */
118 #endif /* !LOCKF_DEBUGGING */
119
120 /*
121 * If you need accounting for KM_LOCKF consider using
122 * ZONE_VIEW_DEFINE to define a view.
123 */
124 #define KM_LOCKF KHEAP_DEFAULT
125
126 #define NOLOCKF (struct lockf *)0
127 #define SELF 0x1
128 #define OTHERS 0x2
129 #define OFF_MAX 0x7fffffffffffffffULL /* max off_t */
130
131 /*
132 * Overlapping lock states
133 */
134 typedef enum {
135 OVERLAP_NONE = 0,
136 OVERLAP_EQUALS_LOCK,
137 OVERLAP_CONTAINS_LOCK,
138 OVERLAP_CONTAINED_BY_LOCK,
139 OVERLAP_STARTS_BEFORE_LOCK,
140 OVERLAP_ENDS_AFTER_LOCK
141 } overlap_t;
142
143 static int lf_clearlock(struct lockf *);
144 static overlap_t lf_findoverlap(struct lockf *,
145 struct lockf *, int, struct lockf ***, struct lockf **);
146 static struct lockf *lf_getblock(struct lockf *, pid_t);
147 static int lf_getlock(struct lockf *, struct flock *, pid_t);
148 static int lf_setlock(struct lockf *, struct timespec *);
149 static int lf_split(struct lockf *, struct lockf *);
150 static void lf_wakelock(struct lockf *, boolean_t);
151 #if IMPORTANCE_INHERITANCE
152 static void lf_hold_assertion(task_t, struct lockf *);
153 static void lf_jump_to_queue_head(struct lockf *, struct lockf *);
154 static void lf_drop_assertion(struct lockf *);
155 static void lf_boost_blocking_proc(struct lockf *, struct lockf *);
156 static void lf_adjust_assertion(struct lockf *block);
157 #endif /* IMPORTANCE_INHERITANCE */
158
159 static LCK_GRP_DECLARE(lf_dead_lock_grp, "lf_dead_lock");
160 static LCK_MTX_DECLARE(lf_dead_lock, &lf_dead_lock_grp);
161
162 /*
163 * lf_advlock
164 *
165 * Description: Advisory record locking support
166 *
167 * Parameters: ap Argument pointer to a vnop_advlock_args
168 * argument descriptor structure for the
169 * lock operation to be attempted.
170 *
171 * Returns: 0 Success
172 * EOVERFLOW
173 * EINVAL
174 * ENOLCK Number of locked regions exceeds limit
175 * lf_setlock:EAGAIN
176 * lf_setlock:EDEADLK
177 * lf_setlock:EINTR
178 * lf_setlock:ENOLCK
179 * lf_setlock:ETIMEDOUT
180 * lf_clearlock:ENOLCK
181 * vnode_size:???
182 *
183 * Notes: We return ENOLCK when we run out of memory to support locks; as
184 * such, there is no specific expectation limit other than the
185 * amount of available resources.
186 */
187 int
188 lf_advlock(struct vnop_advlock_args *ap)
189 {
190 struct vnode *vp = ap->a_vp;
191 struct flock *fl = ap->a_fl;
192 vfs_context_t context = ap->a_context;
193 struct lockf *lock;
194 off_t start, end, oadd;
195 u_quad_t size;
196 int error;
197 struct lockf **head = &vp->v_lockf;
198
199 /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */
200
201 /*
202 * Avoid the common case of unlocking when inode has no locks.
203 */
204 if (*head == (struct lockf *)0) {
205 if (ap->a_op != F_SETLK) {
206 fl->l_type = F_UNLCK;
207 LOCKF_DEBUG(LF_DBG_TRACE,
208 "lf_advlock: '%s' unlock without lock\n",
209 vfs_context_proc(context)->p_comm);
210 return 0;
211 }
212 }
213
214 /*
215 * Convert the flock structure into a start and end.
216 */
217 switch (fl->l_whence) {
218 case SEEK_SET:
219 case SEEK_CUR:
220 /*
221 * Caller is responsible for adding any necessary offset
222 * when SEEK_CUR is used.
223 */
224 start = fl->l_start;
225 break;
226
227 case SEEK_END:
228
229 /*
230 * It's OK to cast the u_quad_t to and off_t here, since they
231 * are the same storage size, and the value of the returned
232 * contents will never overflow into the sign bit. We need to
233 * do this because we will use size to force range checks.
234 */
235 if ((error = vnode_size(vp, (off_t *)&size, context))) {
236 LOCKF_DEBUG(LF_DBG_TRACE,
237 "lf_advlock: vnode_getattr failed: %d\n", error);
238 return error;
239 }
240
241 if (size > OFF_MAX ||
242 (fl->l_start > 0 &&
243 size > (u_quad_t)(OFF_MAX - fl->l_start))) {
244 return EOVERFLOW;
245 }
246 start = size + fl->l_start;
247 break;
248
249 default:
250 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: unknown whence %d\n",
251 fl->l_whence);
252 return EINVAL;
253 }
254 if (start < 0) {
255 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: start < 0 (%qd)\n",
256 start);
257 return EINVAL;
258 }
259 if (fl->l_len < 0) {
260 if (start == 0) {
261 LOCKF_DEBUG(LF_DBG_TRACE,
262 "lf_advlock: len < 0 & start == 0\n");
263 return EINVAL;
264 }
265 end = start - 1;
266 start += fl->l_len;
267 if (start < 0) {
268 LOCKF_DEBUG(LF_DBG_TRACE,
269 "lf_advlock: start < 0 (%qd)\n", start);
270 return EINVAL;
271 }
272 } else if (fl->l_len == 0) {
273 end = -1;
274 } else {
275 oadd = fl->l_len - 1;
276 if (oadd > (off_t)(OFF_MAX - start)) {
277 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: overflow\n");
278 return EOVERFLOW;
279 }
280 end = start + oadd;
281 }
282 /*
283 * Create the lockf structure
284 */
285 lock = kheap_alloc(KM_LOCKF, sizeof(struct lockf), Z_WAITOK);
286 if (lock == NULL) {
287 return ENOLCK;
288 }
289 lock->lf_start = start;
290 lock->lf_end = end;
291 lock->lf_id = ap->a_id;
292 lock->lf_vnode = vp;
293 lock->lf_type = fl->l_type;
294 lock->lf_head = head;
295 lock->lf_next = (struct lockf *)0;
296 TAILQ_INIT(&lock->lf_blkhd);
297 lock->lf_flags = (short)ap->a_flags;
298 #if IMPORTANCE_INHERITANCE
299 lock->lf_boosted = LF_NOT_BOOSTED;
300 #endif
301 if (ap->a_flags & F_POSIX) {
302 lock->lf_owner = (struct proc *)lock->lf_id;
303 } else {
304 lock->lf_owner = NULL;
305 }
306
307 if (ap->a_flags & F_FLOCK) {
308 lock->lf_flags |= F_WAKE1_SAFE;
309 }
310
311 lck_mtx_lock(&vp->v_lock); /* protect the lockf list */
312 /*
313 * Do the requested operation.
314 */
315 switch (ap->a_op) {
316 case F_SETLK:
317 /*
318 * For F_OFD_* locks, lf_id is the fileglob.
319 * Record an "lf_owner" iff this is a confined fd
320 * i.e. it cannot escape this process and will be
321 * F_UNLCKed before the owner exits. (This is
322 * the implicit guarantee needed to ensure lf_owner
323 * remains a valid reference here.)
324 */
325 if (ap->a_flags & F_OFD_LOCK) {
326 struct fileglob *fg = (void *)lock->lf_id;
327 if (fg->fg_lflags & FG_CONFINED) {
328 lock->lf_owner = current_proc();
329 }
330 }
331 error = lf_setlock(lock, ap->a_timeout);
332 break;
333
334 case F_UNLCK:
335 error = lf_clearlock(lock);
336 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
337 break;
338
339 case F_GETLK:
340 error = lf_getlock(lock, fl, -1);
341 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
342 break;
343
344 case F_GETLKPID:
345 error = lf_getlock(lock, fl, fl->l_pid);
346 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
347 break;
348
349 default:
350 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
351 error = EINVAL;
352 break;
353 }
354 lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */
355
356 LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: normal exit: %d\n", error);
357 return error;
358 }
359
360 /*
361 * Empty the queue of msleeping requests for a lock on the given vnode.
362 * Called with the vnode already locked. Used for forced unmount, where
363 * a flock(2) invoker sleeping on a blocked lock holds an iocount reference
364 * that prevents the vnode from ever being drained. Force unmounting wins.
365 */
366 void
367 lf_abort_advlocks(vnode_t vp)
368 {
369 struct lockf *lock;
370
371 if ((lock = vp->v_lockf) == NULL) {
372 return;
373 }
374
375 lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
376
377 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
378 struct lockf *tlock;
379
380 TAILQ_FOREACH(tlock, &lock->lf_blkhd, lf_block) {
381 /*
382 * Setting this flag should cause all
383 * currently blocked F_SETLK request to
384 * return to userland with an errno.
385 */
386 tlock->lf_flags |= F_ABORT;
387 }
388 lf_wakelock(lock, TRUE);
389 }
390 }
391
392 /*
393 * Take any lock attempts which are currently blocked by a given lock ("from")
394 * and mark them as blocked by a different lock ("to"). Used in the case
395 * where a byte range currently occupied by "from" is to be occupied by "to."
396 */
397 static void
398 lf_move_blocked(struct lockf *to, struct lockf *from)
399 {
400 struct lockf *tlock;
401
402 TAILQ_FOREACH(tlock, &from->lf_blkhd, lf_block) {
403 tlock->lf_next = to;
404 }
405
406 TAILQ_CONCAT(&to->lf_blkhd, &from->lf_blkhd, lf_block);
407 }
408
409 /*
410 * lf_coalesce_adjacent
411 *
412 * Description: Helper function: when setting a lock, coalesce adjacent
413 * locks. Needed because adjacent locks are not overlapping,
414 * but POSIX requires that they be coalesced.
415 *
416 * Parameters: lock The new lock which may be adjacent
417 * to already locked regions, and which
418 * should therefore be coalesced with them
419 *
420 * Returns: <void>
421 */
422 static void
423 lf_coalesce_adjacent(struct lockf *lock)
424 {
425 struct lockf **lf = lock->lf_head;
426
427 while (*lf != NOLOCKF) {
428 /* reject locks that obviously could not be coalesced */
429 if ((*lf == lock) ||
430 ((*lf)->lf_id != lock->lf_id) ||
431 ((*lf)->lf_type != lock->lf_type)) {
432 lf = &(*lf)->lf_next;
433 continue;
434 }
435
436 /*
437 * NOTE: Assumes that if two locks are adjacent on the number line
438 * and belong to the same owner, then they are adjacent on the list.
439 */
440 if ((*lf)->lf_end != -1 &&
441 ((*lf)->lf_end + 1) == lock->lf_start) {
442 struct lockf *adjacent = *lf;
443
444 LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent previous\n");
445 lock->lf_start = (*lf)->lf_start;
446 *lf = lock;
447 lf = &(*lf)->lf_next;
448
449 lf_move_blocked(lock, adjacent);
450
451 kheap_free(KM_LOCKF, adjacent, sizeof(struct lockf));
452 continue;
453 }
454 /* If the lock starts adjacent to us, we can coalesce it */
455 if (lock->lf_end != -1 &&
456 (lock->lf_end + 1) == (*lf)->lf_start) {
457 struct lockf *adjacent = *lf;
458
459 LOCKF_DEBUG(LF_DBG_LIST, "lf_coalesce_adjacent: coalesce adjacent following\n");
460 lock->lf_end = (*lf)->lf_end;
461 lock->lf_next = (*lf)->lf_next;
462 lf = &lock->lf_next;
463
464 lf_move_blocked(lock, adjacent);
465
466 kheap_free(KM_LOCKF, adjacent, sizeof(struct lockf));
467 continue;
468 }
469
470 /* no matching conditions; go on to next lock */
471 lf = &(*lf)->lf_next;
472 }
473 }
474
475 /*
476 * lf_setlock
477 *
478 * Description: Set a byte-range lock.
479 *
480 * Parameters: lock The lock structure describing the lock
481 * to be set; allocated by the caller, it
482 * will be linked into the lock list if
483 * the set is successful, and freed if the
484 * set is unsuccessful.
485 *
486 * timeout Timeout specified in the case of
487 * SETLKWTIMEOUT.
488 *
489 * Returns: 0 Success
490 * EAGAIN
491 * EDEADLK
492 * lf_split:ENOLCK
493 * lf_clearlock:ENOLCK
494 * msleep:EINTR
495 * msleep:ETIMEDOUT
496 *
497 * Notes: We add the lock to the provisional lock list. We do not
498 * coalesce at this time; this has implications for other lock
499 * requestors in the blocker search mechanism.
500 */
501 static int
502 lf_setlock(struct lockf *lock, struct timespec *timeout)
503 {
504 struct lockf *block;
505 struct lockf **head = lock->lf_head;
506 struct lockf **prev, *overlap;
507 static const char lockstr[] = "lockf";
508 int priority, needtolink, error;
509 struct vnode *vp = lock->lf_vnode;
510 overlap_t ovcase;
511
512 #ifdef LOCKF_DEBUGGING
513 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
514 lf_print("lf_setlock", lock);
515 lf_printlist("lf_setlock(in)", lock);
516 }
517 #endif /* LOCKF_DEBUGGING */
518 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p Looking for deadlock, vnode %p\n", lock, lock->lf_vnode);
519
520 /*
521 * Set the priority
522 */
523 priority = PLOCK;
524 if (lock->lf_type == F_WRLCK) {
525 priority += 4;
526 }
527 priority |= PCATCH;
528 scan:
529 /*
530 * Scan lock list for this file looking for locks that would block us.
531 */
532 while ((block = lf_getblock(lock, -1))) {
533 /*
534 * Free the structure and return if nonblocking.
535 */
536 if ((lock->lf_flags & F_WAIT) == 0) {
537 DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
538 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
539 return EAGAIN;
540 }
541
542 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p found blocking lock %p\n", lock, block);
543
544 /*
545 * We are blocked. Since flock style locks cover
546 * the whole file, there is no chance for deadlock.
547 *
548 * OFD byte-range locks currently do NOT support
549 * deadlock detection.
550 *
551 * For POSIX byte-range locks we must check for deadlock.
552 *
553 * Deadlock detection is done by looking through the
554 * wait channels to see if there are any cycles that
555 * involve us.
556 */
557 if ((lock->lf_flags & F_POSIX) &&
558 (block->lf_flags & F_POSIX)) {
559 lck_mtx_lock(&lf_dead_lock);
560
561 /* The blocked process is waiting on something */
562 struct proc *wproc = block->lf_owner;
563 proc_lock(wproc);
564
565 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p owned by pid %d\n", lock, proc_pid(wproc));
566
567 struct uthread *ut;
568 TAILQ_FOREACH(ut, &wproc->p_uthlist, uu_list) {
569 /*
570 * If the thread is (a) asleep (uu_wchan != 0)
571 * and (b) in this code (uu_wmesg == lockstr)
572 * then check to see if the lock is blocked behind
573 * someone blocked behind us.
574 *
575 * Note: (i) vp->v_lock is held, preventing other
576 * threads from mutating the blocking list for our vnode.
577 * and (ii) the proc_lock is held i.e the thread list
578 * is stable.
579 *
580 * HOWEVER some thread in wproc might be sleeping on a lockf
581 * structure for a different vnode, and be woken at any
582 * time. Thus the waitblock list could mutate while
583 * it's being inspected by this thread, and what
584 * ut->uu_wchan was just pointing at could even be freed.
585 *
586 * Nevertheless this is safe here because of lf_dead_lock; if
587 * any thread blocked with uu_wmesg == lockstr wakes (see below)
588 * it will try to acquire lf_dead_lock which is already held
589 * here. Holding that lock prevents the lockf structure being
590 * pointed at by ut->uu_wchan from going away. Thus the vnode
591 * involved can be found and locked, and the corresponding
592 * blocking chain can then be examined safely.
593 */
594 const struct lockf *waitblock = (const void *)ut->uu_wchan;
595 if ((waitblock != NULL) && (ut->uu_wmesg == lockstr)) {
596 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is also blocked on lock %p vnode %p\n", lock, waitblock, waitblock->lf_vnode);
597
598 vnode_t othervp = NULL;
599 if (waitblock->lf_vnode != vp) {
600 /*
601 * This thread in wproc is waiting for a lock
602 * on a different vnode; grab the lock on it
603 * that protects lf_next while we examine it.
604 */
605 othervp = waitblock->lf_vnode;
606 if (!lck_mtx_try_lock(&othervp->v_lock)) {
607 /*
608 * avoid kernel deadlock: drop all
609 * locks, pause for a bit to let the
610 * other thread do what it needs to do,
611 * then (because we drop and retake
612 * v_lock) retry the scan.
613 */
614 proc_unlock(wproc);
615 lck_mtx_unlock(&lf_dead_lock);
616 static struct timespec ts = {
617 .tv_sec = 0,
618 .tv_nsec = 2 * NSEC_PER_MSEC,
619 };
620 static const char pausestr[] = "lockf:pause";
621 (void) msleep(lock, &vp->v_lock, priority, pausestr, &ts);
622 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p contention for vp %p => restart\n", lock, othervp);
623 goto scan;
624 }
625 }
626
627 /*
628 * Get the lock blocking the lock
629 * which would block us, and make
630 * certain it hasn't become unblocked
631 * (been granted, e.g. between the time
632 * we called lf_getblock, and the time
633 * we successfully acquired the
634 * proc_lock).
635 */
636 const struct lockf *nextblock = waitblock->lf_next;
637 if (nextblock == NULL) {
638 if (othervp) {
639 lck_mtx_unlock(&othervp->v_lock);
640 }
641 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p with waitblock %p and no lf_next; othervp %p\n", lock, waitblock, othervp);
642 continue;
643 }
644 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is also blocked on lock %p vnode %p\n", lock, nextblock, nextblock->lf_vnode);
645
646 /*
647 * Make sure it's an advisory range
648 * lock and not any other kind of lock;
649 * if we mix lock types, it's our own
650 * fault.
651 */
652 if ((nextblock->lf_flags & F_POSIX) == 0) {
653 if (othervp) {
654 lck_mtx_unlock(&othervp->v_lock);
655 }
656 continue;
657 }
658
659 /*
660 * If the owner of the lock that's
661 * blocking a lock that's blocking us
662 * getting the requested lock, then we
663 * would deadlock, so error out.
664 */
665 struct proc *bproc = nextblock->lf_owner;
666 const boolean_t deadlocked = bproc == lock->lf_owner;
667
668 if (othervp) {
669 lck_mtx_unlock(&othervp->v_lock);
670 }
671 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p owned by pid %d\n", lock, proc_pid(bproc));
672 if (deadlocked) {
673 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is me, so EDEADLK\n", lock);
674 proc_unlock(wproc);
675 lck_mtx_unlock(&lf_dead_lock);
676 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
677 return EDEADLK;
678 }
679 }
680 LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p bottom of thread loop\n", lock);
681 }
682 proc_unlock(wproc);
683 lck_mtx_unlock(&lf_dead_lock);
684 }
685
686 /*
687 * For flock type locks, we must first remove
688 * any shared locks that we hold before we sleep
689 * waiting for an exclusive lock.
690 */
691 if ((lock->lf_flags & F_FLOCK) &&
692 lock->lf_type == F_WRLCK) {
693 lock->lf_type = F_UNLCK;
694 if ((error = lf_clearlock(lock)) != 0) {
695 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
696 return error;
697 }
698 lock->lf_type = F_WRLCK;
699 }
700 /*
701 * Add our lock to the blocked list and sleep until we're free.
702 * Remember who blocked us (for deadlock detection).
703 */
704 lock->lf_next = block;
705 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
706
707 if (!(lock->lf_flags & F_FLOCK)) {
708 block->lf_flags &= ~F_WAKE1_SAFE;
709 }
710
711 #if IMPORTANCE_INHERITANCE
712 /*
713 * Importance donation is done only for cases where the
714 * owning task can be unambiguously determined.
715 *
716 * POSIX type locks are not inherited by child processes;
717 * we maintain a 1:1 mapping between a lock and its owning
718 * process.
719 *
720 * Flock type locks are inherited across fork() and there is
721 * no 1:1 mapping in the general case. However, the fileglobs
722 * used by OFD locks *may* be confined to the process that
723 * created them, and thus have an "owner", in which case
724 * we also attempt importance donation.
725 */
726 if ((lock->lf_flags & block->lf_flags & F_POSIX) != 0) {
727 lf_boost_blocking_proc(lock, block);
728 } else if ((lock->lf_flags & block->lf_flags & F_OFD_LOCK) &&
729 lock->lf_owner != block->lf_owner &&
730 NULL != lock->lf_owner && NULL != block->lf_owner) {
731 lf_boost_blocking_proc(lock, block);
732 }
733 #endif /* IMPORTANCE_INHERITANCE */
734
735 #ifdef LOCKF_DEBUGGING
736 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
737 lf_print("lf_setlock: blocking on", block);
738 lf_printlist("lf_setlock(block)", block);
739 }
740 #endif /* LOCKF_DEBUGGING */
741 DTRACE_FSINFO(advlock__wait, vnode_t, vp);
742
743 if (lock->lf_flags & F_POSIX) {
744 error = msleep(lock, &vp->v_lock, priority, lockstr, timeout);
745 /*
746 * Ensure that 'lock' doesn't get mutated or freed if a
747 * wakeup occurs while hunting for deadlocks (and holding
748 * lf_dead_lock - see above)
749 */
750 lck_mtx_lock(&lf_dead_lock);
751 lck_mtx_unlock(&lf_dead_lock);
752 } else {
753 static const char lockstr_np[] = "lockf:np";
754 error = msleep(lock, &vp->v_lock, priority, lockstr_np, timeout);
755 }
756
757 if (error == 0 && (lock->lf_flags & F_ABORT) != 0) {
758 error = EBADF;
759 }
760
761 if (lock->lf_next) {
762 /*
763 * lf_wakelock() always sets wakelock->lf_next to
764 * NULL before a wakeup; so we've been woken early
765 * - perhaps by a debugger, signal or other event.
766 *
767 * Remove 'lock' from the block list (avoids double-add
768 * in the spurious case, which would create a cycle)
769 */
770 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
771 #if IMPORTANCE_INHERITANCE
772 /*
773 * Adjust the boost on lf_next.
774 */
775 lf_adjust_assertion(lock->lf_next);
776 #endif /* IMPORTANCE_INHERITANCE */
777 lock->lf_next = NULL;
778
779 if (error == 0) {
780 /*
781 * If this was a spurious wakeup, retry
782 */
783 printf("%s: spurious wakeup, retrying lock\n",
784 __func__);
785 continue;
786 }
787 }
788
789 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
790 if ((block = lf_getblock(lock, -1)) != NULL) {
791 lf_move_blocked(block, lock);
792 }
793 }
794
795 if (error) {
796 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
797 lf_wakelock(lock, TRUE);
798 }
799 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
800 /* Return ETIMEDOUT if timeout occoured. */
801 if (error == EWOULDBLOCK) {
802 error = ETIMEDOUT;
803 }
804 return error;
805 }
806 }
807
808 /*
809 * No blocks!! Add the lock. Note that we will
810 * downgrade or upgrade any overlapping locks this
811 * process already owns.
812 *
813 * Skip over locks owned by other processes.
814 * Handle any locks that overlap and are owned by ourselves.
815 */
816 prev = head;
817 block = *head;
818 needtolink = 1;
819 for (;;) {
820 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
821 if (ovcase) {
822 block = overlap->lf_next;
823 }
824 /*
825 * Six cases:
826 * 0) no overlap
827 * 1) overlap == lock
828 * 2) overlap contains lock
829 * 3) lock contains overlap
830 * 4) overlap starts before lock
831 * 5) overlap ends after lock
832 */
833 switch (ovcase) {
834 case OVERLAP_NONE:
835 if (needtolink) {
836 *prev = lock;
837 lock->lf_next = overlap;
838 }
839 break;
840
841 case OVERLAP_EQUALS_LOCK:
842 /*
843 * If downgrading lock, others may be
844 * able to acquire it.
845 */
846 if (lock->lf_type == F_RDLCK &&
847 overlap->lf_type == F_WRLCK) {
848 lf_wakelock(overlap, TRUE);
849 }
850 overlap->lf_type = lock->lf_type;
851 lf_move_blocked(overlap, lock);
852 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
853 lock = overlap; /* for lf_coalesce_adjacent() */
854 break;
855
856 case OVERLAP_CONTAINS_LOCK:
857 /*
858 * Check for common starting point and different types.
859 */
860 if (overlap->lf_type == lock->lf_type) {
861 lf_move_blocked(overlap, lock);
862 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
863 lock = overlap; /* for lf_coalesce_adjacent() */
864 break;
865 }
866 if (overlap->lf_start == lock->lf_start) {
867 *prev = lock;
868 lock->lf_next = overlap;
869 overlap->lf_start = lock->lf_end + 1;
870 } else {
871 /*
872 * If we can't split the lock, we can't
873 * grant it. Claim a system limit for the
874 * resource shortage.
875 */
876 if (lf_split(overlap, lock)) {
877 kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
878 return ENOLCK;
879 }
880 }
881 lf_wakelock(overlap, TRUE);
882 break;
883
884 case OVERLAP_CONTAINED_BY_LOCK:
885 /*
886 * If downgrading lock, others may be able to
887 * acquire it, otherwise take the list.
888 */
889 if (lock->lf_type == F_RDLCK &&
890 overlap->lf_type == F_WRLCK) {
891 lf_wakelock(overlap, TRUE);
892 } else {
893 lf_move_blocked(lock, overlap);
894 }
895 /*
896 * Add the new lock if necessary and delete the overlap.
897 */
898 if (needtolink) {
899 *prev = lock;
900 lock->lf_next = overlap->lf_next;
901 prev = &lock->lf_next;
902 needtolink = 0;
903 } else {
904 *prev = overlap->lf_next;
905 }
906 kheap_free(KM_LOCKF, overlap, sizeof(struct lockf));
907 continue;
908
909 case OVERLAP_STARTS_BEFORE_LOCK:
910 /*
911 * Add lock after overlap on the list.
912 */
913 lock->lf_next = overlap->lf_next;
914 overlap->lf_next = lock;
915 overlap->lf_end = lock->lf_start - 1;
916 prev = &lock->lf_next;
917 lf_wakelock(overlap, TRUE);
918 needtolink = 0;
919 continue;
920
921 case OVERLAP_ENDS_AFTER_LOCK:
922 /*
923 * Add the new lock before overlap.
924 */
925 if (needtolink) {
926 *prev = lock;
927 lock->lf_next = overlap;
928 }
929 overlap->lf_start = lock->lf_end + 1;
930 lf_wakelock(overlap, TRUE);
931 break;
932 }
933 break;
934 }
935 /* Coalesce adjacent locks with identical attributes */
936 lf_coalesce_adjacent(lock);
937 #ifdef LOCKF_DEBUGGING
938 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
939 lf_print("lf_setlock: got the lock", lock);
940 lf_printlist("lf_setlock(out)", lock);
941 }
942 #endif /* LOCKF_DEBUGGING */
943 return 0;
944 }
945
946
947 /*
948 * lf_clearlock
949 *
950 * Description: Remove a byte-range lock on an vnode. Generally, find the
951 * lock (or an overlap to that lock) and remove it (or shrink
952 * it), then wakeup anyone we can.
953 *
954 * Parameters: unlock The lock to clear
955 *
956 * Returns: 0 Success
957 * lf_split:ENOLCK
958 *
959 * Notes: A caller may unlock all the locks owned by the caller by
960 * specifying the entire file range; locks owned by other
961 * callers are not effected by this operation.
962 */
963 static int
964 lf_clearlock(struct lockf *unlock)
965 {
966 struct lockf **head = unlock->lf_head;
967 struct lockf *lf = *head;
968 struct lockf *overlap, **prev;
969 overlap_t ovcase;
970
971 if (lf == NOLOCKF) {
972 return 0;
973 }
974 #ifdef LOCKF_DEBUGGING
975 if (unlock->lf_type != F_UNLCK) {
976 panic("lf_clearlock: bad type");
977 }
978 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
979 lf_print("lf_clearlock", unlock);
980 }
981 #endif /* LOCKF_DEBUGGING */
982 prev = head;
983 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) {
984 /*
985 * Wakeup the list of locks to be retried.
986 */
987 lf_wakelock(overlap, FALSE);
988 #if IMPORTANCE_INHERITANCE
989 if (overlap->lf_boosted == LF_BOOSTED) {
990 lf_drop_assertion(overlap);
991 }
992 #endif /* IMPORTANCE_INHERITANCE */
993
994 switch (ovcase) {
995 case OVERLAP_NONE: /* satisfy compiler enum/switch */
996 break;
997
998 case OVERLAP_EQUALS_LOCK:
999 *prev = overlap->lf_next;
1000 kheap_free(KM_LOCKF, overlap, sizeof(struct lockf));
1001 break;
1002
1003 case OVERLAP_CONTAINS_LOCK: /* split it */
1004 if (overlap->lf_start == unlock->lf_start) {
1005 overlap->lf_start = unlock->lf_end + 1;
1006 break;
1007 }
1008 /*
1009 * If we can't split the lock, we can't grant it.
1010 * Claim a system limit for the resource shortage.
1011 */
1012 if (lf_split(overlap, unlock)) {
1013 return ENOLCK;
1014 }
1015 overlap->lf_next = unlock->lf_next;
1016 break;
1017
1018 case OVERLAP_CONTAINED_BY_LOCK:
1019 *prev = overlap->lf_next;
1020 lf = overlap->lf_next;
1021 kheap_free(KM_LOCKF, overlap, sizeof(struct lockf));
1022 continue;
1023
1024 case OVERLAP_STARTS_BEFORE_LOCK:
1025 overlap->lf_end = unlock->lf_start - 1;
1026 prev = &overlap->lf_next;
1027 lf = overlap->lf_next;
1028 continue;
1029
1030 case OVERLAP_ENDS_AFTER_LOCK:
1031 overlap->lf_start = unlock->lf_end + 1;
1032 break;
1033 }
1034 break;
1035 }
1036 #ifdef LOCKF_DEBUGGING
1037 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1038 lf_printlist("lf_clearlock", unlock);
1039 }
1040 #endif /* LOCKF_DEBUGGING */
1041 return 0;
1042 }
1043
1044
1045 /*
1046 * lf_getlock
1047 *
1048 * Description: Check whether there is a blocking lock, and if so return
1049 * its process identifier into the lock being requested.
1050 *
1051 * Parameters: lock Pointer to lock to test for blocks
1052 * fl Pointer to flock structure to receive
1053 * the blocking lock information, if a
1054 * blocking lock is found.
1055 * matchpid -1, or pid value to match in lookup.
1056 *
1057 * Returns: 0 Success
1058 *
1059 * Implicit Returns:
1060 * *fl Contents modified to reflect the
1061 * blocking lock, if one is found; not
1062 * modified otherwise
1063 *
1064 * Notes: fl->l_pid will be (-1) for file locks and will only be set to
1065 * the blocking process ID for advisory record locks.
1066 */
1067 static int
1068 lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid)
1069 {
1070 struct lockf *block;
1071
1072 #ifdef LOCKF_DEBUGGING
1073 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1074 lf_print("lf_getlock", lock);
1075 }
1076 #endif /* LOCKF_DEBUGGING */
1077
1078 if ((block = lf_getblock(lock, matchpid))) {
1079 fl->l_type = block->lf_type;
1080 fl->l_whence = SEEK_SET;
1081 fl->l_start = block->lf_start;
1082 if (block->lf_end == -1) {
1083 fl->l_len = 0;
1084 } else {
1085 fl->l_len = block->lf_end - block->lf_start + 1;
1086 }
1087 if (NULL != block->lf_owner) {
1088 /*
1089 * lf_owner is only non-NULL when the lock
1090 * "owner" can be unambiguously determined
1091 */
1092 fl->l_pid = proc_pid(block->lf_owner);
1093 } else {
1094 fl->l_pid = -1;
1095 }
1096 } else {
1097 fl->l_type = F_UNLCK;
1098 }
1099 return 0;
1100 }
1101
1102 /*
1103 * lf_getblock
1104 *
1105 * Description: Walk the list of locks for an inode and return the first
1106 * blocking lock. A lock is considered blocking if we are not
1107 * the lock owner; otherwise, we are permitted to upgrade or
1108 * downgrade it, and it's not considered blocking.
1109 *
1110 * Parameters: lock The lock for which we are interested
1111 * in obtaining the blocking lock, if any
1112 * matchpid -1, or pid value to match in lookup.
1113 *
1114 * Returns: NOLOCKF No blocking lock exists
1115 * !NOLOCKF The address of the blocking lock's
1116 * struct lockf.
1117 */
1118 static struct lockf *
1119 lf_getblock(struct lockf *lock, pid_t matchpid)
1120 {
1121 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
1122
1123 for (prev = lock->lf_head;
1124 lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != OVERLAP_NONE;
1125 lf = overlap->lf_next) {
1126 /*
1127 * Found an overlap.
1128 *
1129 * If we're matching pids, and it's a record lock,
1130 * or it's an OFD lock on a process-confined fd,
1131 * but the pid doesn't match, then keep on looking ..
1132 */
1133 if (matchpid != -1 &&
1134 (overlap->lf_flags & (F_POSIX | F_OFD_LOCK)) != 0 &&
1135 proc_pid(overlap->lf_owner) != matchpid) {
1136 continue;
1137 }
1138
1139 /*
1140 * does it block us?
1141 */
1142 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) {
1143 return overlap;
1144 }
1145 }
1146 return NOLOCKF;
1147 }
1148
1149
1150 /*
1151 * lf_findoverlap
1152 *
1153 * Description: Walk the list of locks to find an overlapping lock (if any).
1154 *
1155 * Parameters: lf First lock on lock list
1156 * lock The lock we are checking for an overlap
1157 * check Check type
1158 * prev pointer to pointer pointer to contain
1159 * address of pointer to previous lock
1160 * pointer to overlapping lock, if overlap
1161 * overlap pointer to pointer to contain address
1162 * of overlapping lock
1163 *
1164 * Returns: OVERLAP_NONE
1165 * OVERLAP_EQUALS_LOCK
1166 * OVERLAP_CONTAINS_LOCK
1167 * OVERLAP_CONTAINED_BY_LOCK
1168 * OVERLAP_STARTS_BEFORE_LOCK
1169 * OVERLAP_ENDS_AFTER_LOCK
1170 *
1171 * Implicit Returns:
1172 * *prev The address of the next pointer in the
1173 * lock previous to the overlapping lock;
1174 * this is generally used to relink the
1175 * lock list, avoiding a second iteration.
1176 * *overlap The pointer to the overlapping lock
1177 * itself; this is used to return data in
1178 * the check == OTHERS case, and for the
1179 * caller to modify the overlapping lock,
1180 * in the check == SELF case
1181 *
1182 * Note: This returns only the FIRST overlapping lock. There may be
1183 * more than one. lf_getlock will return the first blocking lock,
1184 * while lf_setlock will iterate over all overlapping locks to
1185 *
1186 * The check parameter can be SELF, meaning we are looking for
1187 * overlapping locks owned by us, or it can be OTHERS, meaning
1188 * we are looking for overlapping locks owned by someone else so
1189 * we can report a blocking lock on an F_GETLK request.
1190 *
1191 * The value of *overlap and *prev are modified, even if there is
1192 * no overlapping lock found; always check the return code.
1193 */
1194 static overlap_t
1195 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
1196 struct lockf ***prev, struct lockf **overlap)
1197 {
1198 off_t start, end;
1199 int found_self = 0;
1200
1201 *overlap = lf;
1202 if (lf == NOLOCKF) {
1203 return 0;
1204 }
1205 #ifdef LOCKF_DEBUGGING
1206 if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1207 lf_print("lf_findoverlap: looking for overlap in", lock);
1208 }
1209 #endif /* LOCKF_DEBUGGING */
1210 start = lock->lf_start;
1211 end = lock->lf_end;
1212 while (lf != NOLOCKF) {
1213 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
1214 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
1215 /*
1216 * Locks belonging to one process are adjacent on the
1217 * list, so if we've found any locks belonging to us,
1218 * and we're now seeing something else, then we've
1219 * examined all "self" locks. Note that bailing out
1220 * here is quite important; for coalescing, we assume
1221 * numerically adjacent locks from the same owner to
1222 * be adjacent on the list.
1223 */
1224 if ((type & SELF) && found_self) {
1225 return OVERLAP_NONE;
1226 }
1227
1228 *prev = &lf->lf_next;
1229 *overlap = lf = lf->lf_next;
1230 continue;
1231 }
1232
1233 if ((type & SELF)) {
1234 found_self = 1;
1235 }
1236
1237 #ifdef LOCKF_DEBUGGING
1238 if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1239 lf_print("\tchecking", lf);
1240 }
1241 #endif /* LOCKF_DEBUGGING */
1242 /*
1243 * OK, check for overlap
1244 */
1245 if ((lf->lf_end != -1 && start > lf->lf_end) ||
1246 (end != -1 && lf->lf_start > end)) {
1247 /* Case 0 */
1248 LOCKF_DEBUG(LF_DBG_LIST, "no overlap\n");
1249
1250 /*
1251 * NOTE: assumes that locks for the same process are
1252 * nonintersecting and ordered.
1253 */
1254 if ((type & SELF) && end != -1 && lf->lf_start > end) {
1255 return OVERLAP_NONE;
1256 }
1257 *prev = &lf->lf_next;
1258 *overlap = lf = lf->lf_next;
1259 continue;
1260 }
1261 if ((lf->lf_start == start) && (lf->lf_end == end)) {
1262 LOCKF_DEBUG(LF_DBG_LIST, "overlap == lock\n");
1263 return OVERLAP_EQUALS_LOCK;
1264 }
1265 if ((lf->lf_start <= start) &&
1266 (end != -1) &&
1267 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
1268 LOCKF_DEBUG(LF_DBG_LIST, "overlap contains lock\n");
1269 return OVERLAP_CONTAINS_LOCK;
1270 }
1271 if (start <= lf->lf_start &&
1272 (end == -1 ||
1273 (lf->lf_end != -1 && end >= lf->lf_end))) {
1274 LOCKF_DEBUG(LF_DBG_LIST, "lock contains overlap\n");
1275 return OVERLAP_CONTAINED_BY_LOCK;
1276 }
1277 if ((lf->lf_start < start) &&
1278 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
1279 LOCKF_DEBUG(LF_DBG_LIST, "overlap starts before lock\n");
1280 return OVERLAP_STARTS_BEFORE_LOCK;
1281 }
1282 if ((lf->lf_start > start) &&
1283 (end != -1) &&
1284 ((lf->lf_end > end) || (lf->lf_end == -1))) {
1285 LOCKF_DEBUG(LF_DBG_LIST, "overlap ends after lock\n");
1286 return OVERLAP_ENDS_AFTER_LOCK;
1287 }
1288 panic("lf_findoverlap: default");
1289 }
1290 return OVERLAP_NONE;
1291 }
1292
1293
1294 /*
1295 * lf_split
1296 *
1297 * Description: Split a lock and a contained region into two or three locks
1298 * as necessary.
1299 *
1300 * Parameters: lock1 Lock to split
1301 * lock2 Overlapping lock region requiring the
1302 * split (upgrade/downgrade/unlock)
1303 *
1304 * Returns: 0 Success
1305 * ENOLCK No memory for new lock
1306 *
1307 * Implicit Returns:
1308 * *lock1 Modified original lock
1309 * *lock2 Overlapping lock (inserted into list)
1310 * (new lock) Potential new lock inserted into list
1311 * if split results in 3 locks
1312 *
1313 * Notes: This operation can only fail if the split would result in three
1314 * locks, and there is insufficient memory to allocate the third
1315 * lock; in that case, neither of the locks will be modified.
1316 */
1317 static int
1318 lf_split(struct lockf *lock1, struct lockf *lock2)
1319 {
1320 struct lockf *splitlock;
1321
1322 #ifdef LOCKF_DEBUGGING
1323 if (LOCKF_DEBUGP(LF_DBG_LIST)) {
1324 lf_print("lf_split", lock1);
1325 lf_print("splitting from", lock2);
1326 }
1327 #endif /* LOCKF_DEBUGGING */
1328 /*
1329 * Check to see if splitting into only two pieces.
1330 */
1331 if (lock1->lf_start == lock2->lf_start) {
1332 lock1->lf_start = lock2->lf_end + 1;
1333 lock2->lf_next = lock1;
1334 return 0;
1335 }
1336 if (lock1->lf_end == lock2->lf_end) {
1337 lock1->lf_end = lock2->lf_start - 1;
1338 lock2->lf_next = lock1->lf_next;
1339 lock1->lf_next = lock2;
1340 return 0;
1341 }
1342 /*
1343 * Make a new lock consisting of the last part of
1344 * the encompassing lock
1345 */
1346 splitlock = kheap_alloc(KM_LOCKF, sizeof(struct lockf), Z_WAITOK);
1347 if (splitlock == NULL) {
1348 return ENOLCK;
1349 }
1350 bcopy(lock1, splitlock, sizeof *splitlock);
1351 splitlock->lf_start = lock2->lf_end + 1;
1352 TAILQ_INIT(&splitlock->lf_blkhd);
1353 lock1->lf_end = lock2->lf_start - 1;
1354 /*
1355 * OK, now link it in
1356 */
1357 splitlock->lf_next = lock1->lf_next;
1358 lock2->lf_next = splitlock;
1359 lock1->lf_next = lock2;
1360
1361 return 0;
1362 }
1363
1364
1365 /*
1366 * lf_wakelock
1367 *
1368 * Wakeup a blocklist in the case of a downgrade or unlock, since others
1369 * waiting on the lock may now be able to acquire it.
1370 *
1371 * Parameters: listhead Lock list head on which waiters may
1372 * have pending locks
1373 *
1374 * Returns: <void>
1375 *
1376 * Notes: This function iterates a list of locks and wakes all waiters,
1377 * rather than only waiters for the contended regions. Because
1378 * of this, for heavily contended files, this can result in a
1379 * "thundering herd" situation. Refactoring the code could make
1380 * this operation more efficient, if heavy contention ever results
1381 * in a real-world performance problem.
1382 */
1383 static void
1384 lf_wakelock(struct lockf *listhead, boolean_t force_all)
1385 {
1386 struct lockf *wakelock;
1387 boolean_t wake_all = TRUE;
1388
1389 if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE)) {
1390 wake_all = FALSE;
1391 }
1392
1393 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1394 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
1395 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
1396
1397 wakelock->lf_next = NOLOCKF;
1398 #ifdef LOCKF_DEBUGGING
1399 if (LOCKF_DEBUGP(LF_DBG_LOCKOP)) {
1400 lf_print("lf_wakelock: awakening", wakelock);
1401 }
1402 #endif /* LOCKF_DEBUGGING */
1403 if (wake_all == FALSE) {
1404 /*
1405 * If there are items on the list head block list,
1406 * move them to the wakelock list instead, and then
1407 * correct their lf_next pointers.
1408 */
1409 if (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
1410 TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block);
1411
1412 struct lockf *tlock;
1413
1414 TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
1415 if (TAILQ_NEXT(tlock, lf_block) == tlock) {
1416 /* See rdar://10887303 */
1417 panic("cycle in wakelock list");
1418 }
1419 tlock->lf_next = wakelock;
1420 }
1421 }
1422 }
1423 wakeup(wakelock);
1424
1425 if (wake_all == FALSE) {
1426 break;
1427 }
1428 }
1429 }
1430
1431
1432 #ifdef LOCKF_DEBUGGING
1433 #define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner))
1434
1435 /*
1436 * lf_print DEBUG
1437 *
1438 * Print out a lock; lock information is prefixed by the string in 'tag'
1439 *
1440 * Parameters: tag A string tag for debugging
1441 * lock The lock whose information should be
1442 * displayed
1443 *
1444 * Returns: <void>
1445 */
1446 void
1447 lf_print(const char *tag, struct lockf *lock)
1448 {
1449 printf("%s: lock %p for ", tag, (void *)lock);
1450 if (lock->lf_flags & F_POSIX) {
1451 printf("proc %p (owner %d)",
1452 lock->lf_id, GET_LF_OWNER_PID(lock));
1453 } else if (lock->lf_flags & F_OFD_LOCK) {
1454 printf("fg %p (owner %d)",
1455 lock->lf_id, GET_LF_OWNER_PID(lock));
1456 } else {
1457 printf("id %p", (void *)lock->lf_id);
1458 }
1459 if (lock->lf_vnode != 0) {
1460 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx",
1461 lock->lf_vnode,
1462 lock->lf_type == F_RDLCK ? "shared" :
1463 lock->lf_type == F_WRLCK ? "exclusive" :
1464 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1465 (uint64_t)lock->lf_start, (uint64_t)lock->lf_end);
1466 } else {
1467 printf(" %s, start 0x%016llx, end 0x%016llx",
1468 lock->lf_type == F_RDLCK ? "shared" :
1469 lock->lf_type == F_WRLCK ? "exclusive" :
1470 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
1471 (uint64_t)lock->lf_start, (uint64_t)lock->lf_end);
1472 }
1473 if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
1474 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
1475 } else {
1476 printf("\n");
1477 }
1478 }
1479
1480
1481 /*
1482 * lf_printlist DEBUG
1483 *
1484 * Print out a lock list for the vnode associated with 'lock'; lock information
1485 * is prefixed by the string in 'tag'
1486 *
1487 * Parameters: tag A string tag for debugging
1488 * lock The lock whose vnode's lock list should
1489 * be displayed
1490 *
1491 * Returns: <void>
1492 */
1493 void
1494 lf_printlist(const char *tag, struct lockf *lock)
1495 {
1496 struct lockf *lf, *blk;
1497
1498 if (lock->lf_vnode == 0) {
1499 return;
1500 }
1501
1502 printf("%s: Lock list for vno %p:\n",
1503 tag, lock->lf_vnode);
1504 for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) {
1505 printf("\tlock %p for ", (void *)lf);
1506 if (lf->lf_flags & F_POSIX) {
1507 printf("proc %p (owner %d)",
1508 lf->lf_id, GET_LF_OWNER_PID(lf));
1509 } else if (lf->lf_flags & F_OFD_LOCK) {
1510 printf("fg %p (owner %d)",
1511 lf->lf_id, GET_LF_OWNER_PID(lf));
1512 } else {
1513 printf("id %p", (void *)lf->lf_id);
1514 }
1515 printf(", %s, start 0x%016llx, end 0x%016llx",
1516 lf->lf_type == F_RDLCK ? "shared" :
1517 lf->lf_type == F_WRLCK ? "exclusive" :
1518 lf->lf_type == F_UNLCK ? "unlock" :
1519 "unknown", (uint64_t)lf->lf_start, (uint64_t)lf->lf_end);
1520 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
1521 printf("\n\t\tlock request %p for ", (void *)blk);
1522 if (blk->lf_flags & F_POSIX) {
1523 printf("proc %p (owner %d)",
1524 blk->lf_id, GET_LF_OWNER_PID(blk));
1525 } else if (blk->lf_flags & F_OFD_LOCK) {
1526 printf("fg %p (owner %d)",
1527 blk->lf_id, GET_LF_OWNER_PID(blk));
1528 } else {
1529 printf("id %p", (void *)blk->lf_id);
1530 }
1531 printf(", %s, start 0x%016llx, end 0x%016llx",
1532 blk->lf_type == F_RDLCK ? "shared" :
1533 blk->lf_type == F_WRLCK ? "exclusive" :
1534 blk->lf_type == F_UNLCK ? "unlock" :
1535 "unknown", (uint64_t)blk->lf_start,
1536 (uint64_t)blk->lf_end);
1537 if (!TAILQ_EMPTY(&blk->lf_blkhd)) {
1538 panic("lf_printlist: bad list");
1539 }
1540 }
1541 printf("\n");
1542 }
1543 }
1544 #endif /* LOCKF_DEBUGGING */
1545
1546 #if IMPORTANCE_INHERITANCE
1547
1548 /*
1549 * lf_hold_assertion
1550 *
1551 * Call task importance hold assertion on the owner of the lock.
1552 *
1553 * Parameters: block_task Owner of the lock blocking
1554 * current thread.
1555 *
1556 * block lock on which the current thread
1557 * is blocking on.
1558 *
1559 * Returns: <void>
1560 *
1561 * Notes: The task reference on block_task is not needed to be hold since
1562 * the current thread has vnode lock and block_task has a file
1563 * lock, thus removing file lock in exit requires block_task to
1564 * grab the vnode lock.
1565 */
1566 static void
1567 lf_hold_assertion(task_t block_task, struct lockf *block)
1568 {
1569 if (task_importance_hold_file_lock_assertion(block_task, 1) == 0) {
1570 block->lf_boosted = LF_BOOSTED;
1571 LOCKF_DEBUG(LF_DBG_IMPINH,
1572 "lf: importance hold file lock assert on pid %d lock %p\n",
1573 proc_pid(block->lf_owner), block);
1574 }
1575 }
1576
1577
1578 /*
1579 * lf_jump_to_queue_head
1580 *
1581 * Jump the lock from the tail of the block queue to the head of
1582 * the queue.
1583 *
1584 * Parameters: block lockf struct containing the
1585 * block queue.
1586 * lock lockf struct to be jumped to the
1587 * front.
1588 *
1589 * Returns: <void>
1590 */
1591 static void
1592 lf_jump_to_queue_head(struct lockf *block, struct lockf *lock)
1593 {
1594 /* Move the lock to the head of the block queue. */
1595 TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block);
1596 TAILQ_INSERT_HEAD(&block->lf_blkhd, lock, lf_block);
1597 }
1598
1599
1600 /*
1601 * lf_drop_assertion
1602 *
1603 * Drops the task hold assertion.
1604 *
1605 * Parameters: block lockf struct holding the assertion.
1606 *
1607 * Returns: <void>
1608 */
1609 static void
1610 lf_drop_assertion(struct lockf *block)
1611 {
1612 LOCKF_DEBUG(LF_DBG_IMPINH, "lf: %d: dropping assertion for lock %p\n",
1613 proc_pid(block->lf_owner), block);
1614
1615 task_t current_task = proc_task(block->lf_owner);
1616 task_importance_drop_file_lock_assertion(current_task, 1);
1617 block->lf_boosted = LF_NOT_BOOSTED;
1618 }
1619
1620 /*
1621 * lf_adjust_assertion
1622 *
1623 * Adjusts importance assertion of file lock. Goes through
1624 * all the blocking locks and checks if the file lock needs
1625 * to be boosted anymore.
1626 *
1627 * Parameters: block lockf structure which needs to be adjusted.
1628 *
1629 * Returns: <void>
1630 */
1631 static void
1632 lf_adjust_assertion(struct lockf *block)
1633 {
1634 boolean_t drop_boost = TRUE;
1635 struct lockf *next;
1636
1637 /* Return if the lock is not boosted */
1638 if (block->lf_boosted == LF_NOT_BOOSTED) {
1639 return;
1640 }
1641
1642 TAILQ_FOREACH(next, &block->lf_blkhd, lf_block) {
1643 /* Check if block and next are same type of locks */
1644 if (((block->lf_flags & next->lf_flags & F_POSIX) != 0) ||
1645 ((block->lf_flags & next->lf_flags & F_OFD_LOCK) &&
1646 (block->lf_owner != next->lf_owner) &&
1647 (NULL != block->lf_owner && NULL != next->lf_owner))) {
1648 /* Check if next would be boosting block */
1649 if (task_is_importance_donor(proc_task(next->lf_owner)) &&
1650 task_is_importance_receiver_type(proc_task(block->lf_owner))) {
1651 /* Found a lock boosting block */
1652 drop_boost = FALSE;
1653 break;
1654 }
1655 }
1656 }
1657
1658 if (drop_boost) {
1659 lf_drop_assertion(block);
1660 }
1661 }
1662
1663 static void
1664 lf_boost_blocking_proc(struct lockf *lock, struct lockf *block)
1665 {
1666 task_t ltask = proc_task(lock->lf_owner);
1667 task_t btask = proc_task(block->lf_owner);
1668
1669 /*
1670 * Check if ltask can donate importance. The
1671 * check of imp_donor bit is done without holding
1672 * any lock. The value may change after you read it,
1673 * but it is ok to boost a task while someone else is
1674 * unboosting you.
1675 *
1676 * TODO: Support live inheritance on file locks.
1677 */
1678 if (task_is_importance_donor(ltask)) {
1679 LOCKF_DEBUG(LF_DBG_IMPINH,
1680 "lf: %d: attempt to boost pid %d that holds lock %p\n",
1681 proc_pid(lock->lf_owner), proc_pid(block->lf_owner), block);
1682
1683 if (block->lf_boosted != LF_BOOSTED &&
1684 task_is_importance_receiver_type(btask)) {
1685 lf_hold_assertion(btask, block);
1686 }
1687 lf_jump_to_queue_head(block, lock);
1688 }
1689 }
1690 #endif /* IMPORTANCE_INHERITANCE */