2  * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  * Copyright (c) 1982, 1986, 1989, 1993 
  30  *      The Regents of the University of California.  All rights reserved. 
  32  * This code is derived from software contributed to Berkeley by 
  33  * Scooter Morris at Genentech Inc. 
  35  * Redistribution and use in source and binary forms, with or without 
  36  * modification, are permitted provided that the following conditions 
  38  * 1. Redistributions of source code must retain the above copyright 
  39  *    notice, this list of conditions and the following disclaimer. 
  40  * 2. Redistributions in binary form must reproduce the above copyright 
  41  *    notice, this list of conditions and the following disclaimer in the 
  42  *    documentation and/or other materials provided with the distribution. 
  43  * 4. Neither the name of the University nor the names of its contributors 
  44  *    may be used to endorse or promote products derived from this software 
  45  *    without specific prior written permission. 
  47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  59  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 
  62 #include <sys/cdefs.h> 
  63 #include <sys/param.h> 
  64 #include <sys/systm.h> 
  65 #include <sys/kernel.h> 
  67 #include <sys/mount.h> 
  69 #include <sys/signalvar.h> 
  70 #include <sys/unistd.h> 
  72 #include <sys/vnode.h> 
  73 #include <sys/vnode_internal.h> 
  74 #include <sys/vnode_if.h> 
  75 #include <sys/malloc.h> 
  76 #include <sys/fcntl.h> 
  77 #include <sys/lockf.h> 
  79 #include <kern/task.h> 
  82  * This variable controls the maximum number of processes that will 
  83  * be checked in doing deadlock detection. 
  85 static int maxlockdepth 
= MAXDEPTH
; 
  87 #ifdef LOCKF_DEBUGGING 
  88 #include <sys/sysctl.h> 
  89 #include <ufs/ufs/quota.h> 
  90 #include <ufs/ufs/inode.h> 
  91 void lf_print(const char *tag
, struct lockf 
*lock
); 
  92 void lf_printlist(const char *tag
, struct lockf 
*lock
); 
  93 static int      lockf_debug 
= 2; 
  94 SYSCTL_INT(_debug
, OID_AUTO
, lockf_debug
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, &lockf_debug
, 0, ""); 
  97  * If there is no mask bit selector, or there is on, and the selector is 
  98  * set, then output the debugging diagnostic. 
 100 #define LOCKF_DEBUG(mask, ...)                                  \ 
 102                 if( !(mask) || ((mask) & lockf_debug)) {        \ 
 103                         printf(__VA_ARGS__);                    \ 
 106 #else   /* !LOCKF_DEBUGGING */ 
 107 #define LOCKF_DEBUG(mask, ...)          /* mask */ 
 108 #endif  /* !LOCKF_DEBUGGING */ 
 110 MALLOC_DEFINE(M_LOCKF
, "lockf", "Byte-range locking structures"); 
 112 #define NOLOCKF (struct lockf *)0 
 115 #define OFF_MAX 0x7fffffffffffffffULL   /* max off_t */ 
 118  * Overlapping lock states 
 123         OVERLAP_CONTAINS_LOCK
, 
 124         OVERLAP_CONTAINED_BY_LOCK
, 
 125         OVERLAP_STARTS_BEFORE_LOCK
, 
 126         OVERLAP_ENDS_AFTER_LOCK
 
 129 static int       lf_clearlock(struct lockf 
*); 
 130 static overlap_t 
lf_findoverlap(struct lockf 
*, 
 131             struct lockf 
*, int, struct lockf 
***, struct lockf 
**); 
 132 static struct lockf 
*lf_getblock(struct lockf 
*, pid_t
); 
 133 static int       lf_getlock(struct lockf 
*, struct flock 
*, pid_t
); 
 134 static int       lf_setlock(struct lockf 
*, struct timespec 
*); 
 135 static int       lf_split(struct lockf 
*, struct lockf 
*); 
 136 static void      lf_wakelock(struct lockf 
*, boolean_t
); 
 137 #if IMPORTANCE_INHERITANCE 
 138 static void      lf_hold_assertion(task_t
, struct lockf 
*); 
 139 static void      lf_jump_to_queue_head(struct lockf 
*, struct lockf 
*); 
 140 static void      lf_drop_assertion(struct lockf 
*); 
 141 #endif /* IMPORTANCE_INHERITANCE */ 
 146  * Description: Advisory record locking support 
 148  * Parameters:  ap                      Argument pointer to a vnop_advlock_args 
 149  *                                      argument descriptor structure for the 
 150  *                                      lock operation to be attempted. 
 155  *              ENOLCK                  Number of locked regions exceeds limit 
 160  *      lf_setlock:ETIMEDOUT 
 161  *      lf_clearlock:ENOLCK 
 164  * Notes:       We return ENOLCK when we run out of memory to support locks; as 
 165  *              such, there is no specific expectation limit other than the 
 166  *              amount of available resources. 
 169 lf_advlock(struct vnop_advlock_args 
*ap
) 
 171         struct vnode 
*vp 
= ap
->a_vp
; 
 172         struct flock 
*fl 
= ap
->a_fl
; 
 173         vfs_context_t context 
= ap
->a_context
; 
 175         off_t start
, end
, oadd
; 
 178         struct lockf 
**head 
= &vp
->v_lockf
; 
 180         /* XXX HFS may need a !vnode_isreg(vp) EISDIR error here */ 
 183          * Avoid the common case of unlocking when inode has no locks. 
 185         if (*head 
== (struct lockf 
*)0) { 
 186                 if (ap
->a_op 
!= F_SETLK
) { 
 187                         fl
->l_type 
= F_UNLCK
; 
 188                         LOCKF_DEBUG(0, "lf_advlock: '%s' unlock without lock\n", vfs_context_proc(context
)->p_comm
); 
 194          * Convert the flock structure into a start and end. 
 196         switch (fl
->l_whence
) { 
 201                  * Caller is responsible for adding any necessary offset 
 202                  * when SEEK_CUR is used. 
 210                  * It's OK to cast the u_quad_t to and off_t here, since they 
 211                  * are the same storage size, and the value of the returned 
 212                  * contents will never overflow into the sign bit.  We need to 
 213                  * do this because we will use size to force range checks. 
 215                 if ((error 
= vnode_size(vp
, (off_t 
*)&size
, context
))) { 
 216                         LOCKF_DEBUG(0, "lf_advlock: vnode_getattr failed: %d\n", error
); 
 220                 if (size 
> OFF_MAX 
|| 
 222                      size 
> (u_quad_t
)(OFF_MAX 
- fl
->l_start
))) 
 224                 start 
= size 
+ fl
->l_start
; 
 228                 LOCKF_DEBUG(0, "lf_advlock: unknown whence %d\n", fl
->l_whence
); 
 232                 LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start
); 
 237                         LOCKF_DEBUG(0, "lf_advlock: len < 0 & start == 0\n"); 
 243                         LOCKF_DEBUG(0, "lf_advlock: start < 0 (%qd)\n", start
); 
 246         } else if (fl
->l_len 
== 0) 
 249                 oadd 
= fl
->l_len 
- 1; 
 250                 if (oadd 
> (off_t
)(OFF_MAX 
- start
)) { 
 251                         LOCKF_DEBUG(0, "lf_advlock: overflow\n"); 
 257          * Create the lockf structure 
 259         MALLOC(lock
, struct lockf 
*, sizeof *lock
, M_LOCKF
, M_WAITOK
); 
 262         lock
->lf_start 
= start
; 
 264         lock
->lf_id 
= ap
->a_id
; 
 266         lock
->lf_type 
= fl
->l_type
; 
 267         lock
->lf_head 
= head
; 
 268         lock
->lf_next 
= (struct lockf 
*)0; 
 269         TAILQ_INIT(&lock
->lf_blkhd
); 
 270         lock
->lf_flags 
= ap
->a_flags
; 
 271 #if IMPORTANCE_INHERITANCE 
 272         lock
->lf_boosted 
= LF_NOT_BOOSTED
; 
 273 #endif /* IMPORTANCE_INHERITANCE */ 
 275         if (ap
->a_flags 
& F_FLOCK
) 
 276                 lock
->lf_flags 
|= F_WAKE1_SAFE
; 
 278         lck_mtx_lock(&vp
->v_lock
);      /* protect the lockf list */ 
 280          * Do the requested operation. 
 284                 error 
= lf_setlock(lock
, ap
->a_timeout
); 
 288                 error 
= lf_clearlock(lock
); 
 293                 error 
= lf_getlock(lock
, fl
, -1); 
 303         lck_mtx_unlock(&vp
->v_lock
);    /* done manipulating the list */ 
 305         LOCKF_DEBUG(0, "lf_advlock: normal exit: %d\n\n", error
); 
 310  * Empty the queue of msleeping requests for a lock on the given vnode. 
 311  * Called with the vnode already locked.  Used for forced unmount, where 
 312  * a flock(2) invoker sleeping on a blocked lock holds an iocount reference 
 313  * that prevents the vnode from ever being drained.  Force unmounting wins. 
 316 lf_abort_advlocks(vnode_t vp
) 
 320         if ((lock 
= vp
->v_lockf
) == NULL
) 
 323         lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
); 
 325         if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) { 
 328                 TAILQ_FOREACH(tlock
, &lock
->lf_blkhd
, lf_block
) { 
 330                          * Setting this flag should cause all 
 331                          * currently blocked F_SETLK request to 
 332                          * return to userland with an errno. 
 334                         tlock
->lf_flags 
|= F_ABORT
; 
 336                 lf_wakelock(lock
, TRUE
); 
 341  * Take any lock attempts which are currently blocked by a given lock ("from") 
 342  * and mark them as blocked by a different lock ("to").  Used in the case 
 343  * where a byte range currently occupied by "from" is to be occupied by "to." 
 346 lf_move_blocked(struct lockf 
*to
, struct lockf 
*from
) 
 350         TAILQ_FOREACH(tlock
, &from
->lf_blkhd
, lf_block
) { 
 354         TAILQ_CONCAT(&to
->lf_blkhd
, &from
->lf_blkhd
, lf_block
); 
 358  * lf_coalesce_adjacent 
 360  * Description: Helper function: when setting a lock, coalesce adjacent 
 361  *              locks.  Needed because adjacent locks are not overlapping, 
 362  *              but POSIX requires that they be coalesced. 
 364  * Parameters:  lock                    The new lock which may be adjacent 
 365  *                                      to already locked regions, and which 
 366  *                                      should therefore be coalesced with them 
 371 lf_coalesce_adjacent(struct lockf 
*lock
) 
 373         struct lockf 
**lf 
= lock
->lf_head
; 
 375         while (*lf 
!= NOLOCKF
) { 
 376                 /* reject locks that obviously could not be coalesced */ 
 378                     ((*lf
)->lf_id 
!= lock
->lf_id
) || 
 379                     ((*lf
)->lf_type 
!= lock
->lf_type
)) { 
 380                         lf 
= &(*lf
)->lf_next
; 
 385                  * NOTE: Assumes that if two locks are adjacent on the number line  
 386                  * and belong to the same owner, then they are adjacent on the list. 
 388                 if ((*lf
)->lf_end 
!= -1 && 
 389                     ((*lf
)->lf_end 
+ 1) == lock
->lf_start
) { 
 390                         struct lockf 
*adjacent 
= *lf
; 
 392                         LOCKF_DEBUG(0, "lf_coalesce_adjacent: coalesce adjacent previous\n"); 
 393                         lock
->lf_start 
= (*lf
)->lf_start
; 
 395                         lf 
= &(*lf
)->lf_next
; 
 397                         lf_move_blocked(lock
, adjacent
); 
 399                         FREE(adjacent
, M_LOCKF
); 
 402                 /* If the lock starts adjacent to us, we can coalesce it */ 
 403                 if (lock
->lf_end 
!= -1 && 
 404                     (lock
->lf_end 
+ 1) == (*lf
)->lf_start
) { 
 405                         struct lockf 
*adjacent 
= *lf
; 
 407                         LOCKF_DEBUG(0, "lf_coalesce_adjacent: coalesce adjacent following\n"); 
 408                         lock
->lf_end 
= (*lf
)->lf_end
; 
 409                         lock
->lf_next 
= (*lf
)->lf_next
; 
 412                         lf_move_blocked(lock
, adjacent
); 
 414                         FREE(adjacent
, M_LOCKF
); 
 418                 /* no matching conditions; go on to next lock */ 
 419                 lf 
= &(*lf
)->lf_next
; 
 427  * Description: Set a byte-range lock. 
 429  * Parameters:  lock                    The lock structure describing the lock 
 430  *                                      to be set; allocated by the caller, it 
 431  *                                      will be linked into the lock list if 
 432  *                                      the set is successful, and freed if the 
 433  *                                      set is unsuccessful. 
 435  *              timeout                 Timeout specified in the case of 
 442  *      lf_clearlock:ENOLCK 
 446  * Notes:       We add the lock to the provisional lock list.  We do not 
 447  *              coalesce at this time; this has implications for other lock 
 448  *              requestors in the blocker search mechanism. 
 451 lf_setlock(struct lockf 
*lock
, struct timespec 
*timeout
) 
 454         struct lockf 
**head 
= lock
->lf_head
; 
 455         struct lockf 
**prev
, *overlap
, *ltmp
; 
 456         static char lockstr
[] = "lockf"; 
 457         int priority
, needtolink
, error
; 
 458         struct vnode 
*vp 
= lock
->lf_vnode
; 
 460 #if IMPORTANCE_INHERITANCE 
 461         task_t boosting_task
, block_task
; 
 462 #endif /* IMPORTANCE_INHERITANCE */ 
 464 #ifdef LOCKF_DEBUGGING 
 465         if (lockf_debug 
& 1) { 
 466                 lf_print("lf_setlock", lock
); 
 467                 lf_printlist("lf_setlock(in)", lock
); 
 469 #endif /* LOCKF_DEBUGGING */ 
 475         if (lock
->lf_type 
== F_WRLCK
) 
 479          * Scan lock list for this file looking for locks that would block us. 
 481         while ((block 
= lf_getblock(lock
, -1))) { 
 483                  * Free the structure and return if nonblocking. 
 485                 if ((lock
->lf_flags 
& F_WAIT
) == 0) { 
 486                         DTRACE_FSINFO(advlock__nowait
, vnode_t
, vp
); 
 492                  * We are blocked. Since flock style locks cover 
 493                  * the whole file, there is no chance for deadlock. 
 494                  * For byte-range locks we must check for deadlock. 
 496                  * Deadlock detection is done by looking through the 
 497                  * wait channels to see if there are any cycles that 
 498                  * involve us. MAXDEPTH is set just to make sure we 
 499                  * do not go off into neverland. 
 501                 if ((lock
->lf_flags 
& F_POSIX
) && 
 502                     (block
->lf_flags 
& F_POSIX
)) { 
 503                         struct proc 
*wproc
, *bproc
; 
 505                         struct lockf 
*waitblock
; 
 508                         /* The block is waiting on something */ 
 509                         wproc 
= (struct proc 
*)block
->lf_id
; 
 511                         TAILQ_FOREACH(ut
, &wproc
->p_uthlist
, uu_list
) { 
 513                                  * While the thread is asleep (uu_wchan != 0) 
 514                                  * in this code (uu_wmesg == lockstr) 
 515                                  * and we have not exceeded the maximum cycle 
 516                                  * depth (i < maxlockdepth), then check for a 
 517                                  * cycle to see if the lock is blocked behind 
 518                                  * someone blocked behind us. 
 520                                 while (((waitblock 
= (struct lockf 
*)ut
->uu_wchan
) != NULL
) && 
 521                                     ut
->uu_wmesg 
== lockstr 
&& 
 522                                     (i
++ < maxlockdepth
)) { 
 523                                         waitblock 
= (struct lockf 
*)ut
->uu_wchan
; 
 525                                          * Get the lock blocking the lock 
 526                                          * which would block us, and make 
 527                                          * certain it hasn't come unblocked 
 528                                          * (been granted, e.g. between the time 
 529                                          * we called lf_getblock, and the time 
 530                                          * we successfully acquired the 
 533                                         waitblock 
= waitblock
->lf_next
; 
 534                                         if (waitblock 
== NULL
) 
 538                                          * Make sure it's an advisory range 
 539                                          * lock and not an overall file lock; 
 540                                          * if we mix lock types, it's our own 
 543                                         if ((waitblock
->lf_flags 
& F_POSIX
) == 0) 
 547                                          * If the owner of the lock that's 
 548                                          * blocking a lock that's blocking us 
 549                                          * getting the requested lock, then we 
 550                                          * would deadlock, so error out. 
 552                                         bproc 
= (struct proc 
*)waitblock
->lf_id
; 
 553                                         if (bproc 
== (struct proc 
*)lock
->lf_id
) { 
 564                  * For flock type locks, we must first remove 
 565                  * any shared locks that we hold before we sleep 
 566                  * waiting for an exclusive lock. 
 568                 if ((lock
->lf_flags 
& F_FLOCK
) && 
 569                     lock
->lf_type 
== F_WRLCK
) { 
 570                         lock
->lf_type 
= F_UNLCK
; 
 571                         if ((error 
= lf_clearlock(lock
)) != 0) { 
 575                         lock
->lf_type 
= F_WRLCK
; 
 578                  * Add our lock to the blocked list and sleep until we're free. 
 579                  * Remember who blocked us (for deadlock detection). 
 581                 lock
->lf_next 
= block
; 
 582                 TAILQ_INSERT_TAIL(&block
->lf_blkhd
, lock
, lf_block
); 
 584                 if ( !(lock
->lf_flags 
& F_FLOCK
)) 
 585                         block
->lf_flags 
&= ~F_WAKE1_SAFE
; 
 587 #ifdef LOCKF_DEBUGGING 
 588                 if (lockf_debug 
& 1) { 
 589                         lf_print("lf_setlock: blocking on", block
); 
 590                         lf_printlist("lf_setlock(block)", block
); 
 592 #endif /* LOCKF_DEBUGGING */ 
 593                 DTRACE_FSINFO(advlock__wait
, vnode_t
, vp
); 
 594 #if IMPORTANCE_INHERITANCE 
 596                  * Posix type of locks are not inherited by child processes and  
 597                  * it maintains one to one mapping between lock and its owner, while 
 598                  * Flock type of locks are inherited across forks and it does not 
 599                  * maintian any one to one mapping between the lock and the lock  
 600                  * owner. Thus importance donation is done only for Posix type of  
 603                 if ((lock
->lf_flags 
& F_POSIX
) && (block
->lf_flags 
& F_POSIX
)) { 
 604                         block_task 
= proc_task((proc_t
) block
->lf_id
); 
 605                         boosting_task 
= proc_task((proc_t
) lock
->lf_id
); 
 607                         /* Check if current task can donate importance. The  
 608                          * check of imp_donor bit is done without holding  
 609                          * any lock. The value may change after you read it,  
 610                          * but it is ok to boost a task while someone else is  
 613                          * TODO: Support live inheritance on file locks. 
 615                         if (task_is_importance_donor(boosting_task
)) { 
 616                                 if (block
->lf_boosted 
!= LF_BOOSTED 
&&  
 617                                     task_is_importance_receiver_type(block_task
)) { 
 618                                         lf_hold_assertion(block_task
, block
); 
 620                                 lf_jump_to_queue_head(block
, lock
); 
 623 #endif /* IMPORTANCE_INHERITANCE */ 
 624                 error 
= msleep(lock
, &vp
->v_lock
, priority
, lockstr
, timeout
); 
 626                 if (error 
== 0 && (lock
->lf_flags 
& F_ABORT
) != 0) 
 631                          * lf_wakelock() always sets wakelock->lf_next to 
 632                          * NULL before a wakeup; so we've been woken early 
 633                          * - perhaps by a debugger, signal or other event. 
 635                          * Remove 'lock' from the block list (avoids double-add 
 636                          * in the spurious case, which would create a cycle) 
 638                         TAILQ_REMOVE(&lock
->lf_next
->lf_blkhd
, lock
, lf_block
); 
 639                         lock
->lf_next 
= NULL
; 
 643                                  * If this was a spurious wakeup, retry 
 645                                 printf("%s: spurious wakeup, retrying lock\n", 
 651                 if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) { 
 652                         if ((block 
= lf_getblock(lock
, -1)) != NULL
) 
 653                                 lf_move_blocked(block
, lock
); 
 657                         if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) 
 658                                 lf_wakelock(lock
, TRUE
); 
 660                         /* Return ETIMEDOUT if timeout occoured. */ 
 661                         if (error 
== EWOULDBLOCK
) { 
 669          * No blocks!!  Add the lock.  Note that we will 
 670          * downgrade or upgrade any overlapping locks this 
 671          * process already owns. 
 673          * Skip over locks owned by other processes. 
 674          * Handle any locks that overlap and are owned by ourselves. 
 680                 ovcase 
= lf_findoverlap(block
, lock
, SELF
, &prev
, &overlap
); 
 682                         block 
= overlap
->lf_next
; 
 687                  *      2) overlap contains lock 
 688                  *      3) lock contains overlap 
 689                  *      4) overlap starts before lock 
 690                  *      5) overlap ends after lock 
 696                                 lock
->lf_next 
= overlap
; 
 700                 case OVERLAP_EQUALS_LOCK
: 
 702                          * If downgrading lock, others may be 
 703                          * able to acquire it. 
 705                         if (lock
->lf_type 
== F_RDLCK 
&& 
 706                             overlap
->lf_type 
== F_WRLCK
) 
 707                                 lf_wakelock(overlap
, TRUE
); 
 708                         overlap
->lf_type 
= lock
->lf_type
; 
 710                         lock 
= overlap
; /* for lf_coalesce_adjacent() */ 
 713                 case OVERLAP_CONTAINS_LOCK
: 
 715                          * Check for common starting point and different types. 
 717                         if (overlap
->lf_type 
== lock
->lf_type
) { 
 719                                 lock 
= overlap
; /* for lf_coalesce_adjacent() */ 
 722                         if (overlap
->lf_start 
== lock
->lf_start
) { 
 724                                 lock
->lf_next 
= overlap
; 
 725                                 overlap
->lf_start 
= lock
->lf_end 
+ 1; 
 728                                  * If we can't split the lock, we can't 
 729                                  * grant it.  Claim a system limit for the 
 732                                 if (lf_split(overlap
, lock
)) { 
 737                         lf_wakelock(overlap
, TRUE
); 
 740                 case OVERLAP_CONTAINED_BY_LOCK
: 
 742                          * If downgrading lock, others may be able to 
 743                          * acquire it, otherwise take the list. 
 745                         if (lock
->lf_type 
== F_RDLCK 
&& 
 746                             overlap
->lf_type 
== F_WRLCK
) { 
 747                                 lf_wakelock(overlap
, TRUE
); 
 749                                 while (!TAILQ_EMPTY(&overlap
->lf_blkhd
)) { 
 750                                         ltmp 
= TAILQ_FIRST(&overlap
->lf_blkhd
); 
 751                                         TAILQ_REMOVE(&overlap
->lf_blkhd
, ltmp
, 
 753                                         TAILQ_INSERT_TAIL(&lock
->lf_blkhd
, 
 755                                         ltmp
->lf_next 
= lock
; 
 759                          * Add the new lock if necessary and delete the overlap. 
 763                                 lock
->lf_next 
= overlap
->lf_next
; 
 764                                 prev 
= &lock
->lf_next
; 
 767                                 *prev 
= overlap
->lf_next
; 
 768                         FREE(overlap
, M_LOCKF
); 
 771                 case OVERLAP_STARTS_BEFORE_LOCK
: 
 773                          * Add lock after overlap on the list. 
 775                         lock
->lf_next 
= overlap
->lf_next
; 
 776                         overlap
->lf_next 
= lock
; 
 777                         overlap
->lf_end 
= lock
->lf_start 
- 1; 
 778                         prev 
= &lock
->lf_next
; 
 779                         lf_wakelock(overlap
, TRUE
); 
 783                 case OVERLAP_ENDS_AFTER_LOCK
: 
 785                          * Add the new lock before overlap. 
 789                                 lock
->lf_next 
= overlap
; 
 791                         overlap
->lf_start 
= lock
->lf_end 
+ 1; 
 792                         lf_wakelock(overlap
, TRUE
); 
 797         /* Coalesce adjacent locks with identical attributes */ 
 798         lf_coalesce_adjacent(lock
); 
 799 #ifdef LOCKF_DEBUGGING 
 800         if (lockf_debug 
& 1) { 
 801                 lf_print("lf_setlock: got the lock", lock
); 
 802                 lf_printlist("lf_setlock(out)", lock
); 
 804 #endif /* LOCKF_DEBUGGING */ 
 812  * Description: Remove a byte-range lock on an vnode.  Generally, find the 
 813  *              lock (or an overlap to that lock) and remove it (or shrink 
 814  *              it), then wakeup anyone we can. 
 816  * Parameters:  unlock                  The lock to clear 
 821  * Notes:       A caller may unlock all the locks owned by the caller by 
 822  *              specifying the entire file range; locks owned by other 
 823  *              callers are not effected by this operation. 
 826 lf_clearlock(struct lockf 
*unlock
) 
 828         struct lockf 
**head 
= unlock
->lf_head
; 
 829         struct lockf 
*lf 
= *head
; 
 830         struct lockf 
*overlap
, **prev
; 
 835 #ifdef LOCKF_DEBUGGING 
 836         if (unlock
->lf_type 
!= F_UNLCK
) 
 837                 panic("lf_clearlock: bad type"); 
 839                 lf_print("lf_clearlock", unlock
); 
 840 #endif /* LOCKF_DEBUGGING */ 
 842         while ((ovcase 
= lf_findoverlap(lf
, unlock
, SELF
, &prev
, &overlap
)) != OVERLAP_NONE
) { 
 844                  * Wakeup the list of locks to be retried. 
 846                 lf_wakelock(overlap
, FALSE
); 
 847 #if IMPORTANCE_INHERITANCE 
 848                 if (overlap
->lf_boosted 
== LF_BOOSTED
) { 
 849                         lf_drop_assertion(overlap
); 
 851 #endif /* IMPORTANCE_INHERITANCE */ 
 854                 case OVERLAP_NONE
:      /* satisfy compiler enum/switch */ 
 857                 case OVERLAP_EQUALS_LOCK
: 
 858                         *prev 
= overlap
->lf_next
; 
 859                         FREE(overlap
, M_LOCKF
); 
 862                 case OVERLAP_CONTAINS_LOCK
: /* split it */ 
 863                         if (overlap
->lf_start 
== unlock
->lf_start
) { 
 864                                 overlap
->lf_start 
= unlock
->lf_end 
+ 1; 
 868                          * If we can't split the lock, we can't grant it. 
 869                          * Claim a system limit for the resource shortage. 
 871                         if (lf_split(overlap
, unlock
)) 
 873                         overlap
->lf_next 
= unlock
->lf_next
; 
 876                 case OVERLAP_CONTAINED_BY_LOCK
: 
 877                         *prev 
= overlap
->lf_next
; 
 878                         lf 
= overlap
->lf_next
; 
 879                         FREE(overlap
, M_LOCKF
); 
 882                 case OVERLAP_STARTS_BEFORE_LOCK
: 
 883                         overlap
->lf_end 
= unlock
->lf_start 
- 1; 
 884                         prev 
= &overlap
->lf_next
; 
 885                         lf 
= overlap
->lf_next
; 
 888                 case OVERLAP_ENDS_AFTER_LOCK
: 
 889                         overlap
->lf_start 
= unlock
->lf_end 
+ 1; 
 894 #ifdef LOCKF_DEBUGGING 
 896                 lf_printlist("lf_clearlock", unlock
); 
 897 #endif /* LOCKF_DEBUGGING */ 
 905  * Description: Check whether there is a blocking lock, and if so return 
 906  *              its process identifier into the lock being requested. 
 908  * Parameters:  lock                    Pointer to lock to test for blocks 
 909  *              fl                      Pointer to flock structure to receive 
 910  *                                      the blocking lock information, if a 
 911  *                                      blocking lock is found. 
 912  *              matchpid                -1, or pid value to match in lookup. 
 917  *              *fl                     Contents modified to reflect the 
 918  *                                      blocking lock, if one is found; not 
 921  * Notes:       fl->l_pid will be (-1) for file locks and will only be set to 
 922  *              the blocking process ID for advisory record locks. 
 925 lf_getlock(struct lockf 
*lock
, struct flock 
*fl
, pid_t matchpid
) 
 929 #ifdef LOCKF_DEBUGGING 
 931                 lf_print("lf_getlock", lock
); 
 932 #endif /* LOCKF_DEBUGGING */ 
 934         if ((block 
= lf_getblock(lock
, matchpid
))) { 
 935                 fl
->l_type 
= block
->lf_type
; 
 936                 fl
->l_whence 
= SEEK_SET
; 
 937                 fl
->l_start 
= block
->lf_start
; 
 938                 if (block
->lf_end 
== -1) 
 941                         fl
->l_len 
= block
->lf_end 
- block
->lf_start 
+ 1; 
 942                 if (block
->lf_flags 
& F_POSIX
) 
 943                         fl
->l_pid 
= proc_pid((struct proc 
*)(block
->lf_id
)); 
 947                 fl
->l_type 
= F_UNLCK
; 
 955  * Description: Walk the list of locks for an inode and return the first 
 956  *              blocking lock.  A lock is considered blocking if we are not 
 957  *              the lock owner; otherwise, we are permitted to upgrade or 
 958  *              downgrade it, and it's not considered blocking. 
 960  * Parameters:  lock                    The lock for which we are interested 
 961  *                                      in obtaining the blocking lock, if any 
 962  *              matchpid                -1, or pid value to match in lookup. 
 964  * Returns:     NOLOCKF                 No blocking lock exists 
 965  *              !NOLOCKF                The address of the blocking lock's 
 968 static struct lockf 
* 
 969 lf_getblock(struct lockf 
*lock
, pid_t matchpid
) 
 971         struct lockf 
**prev
, *overlap
, *lf 
= *(lock
->lf_head
); 
 973         for (prev 
= lock
->lf_head
; 
 974             lf_findoverlap(lf
, lock
, OTHERS
, &prev
, &overlap
) != OVERLAP_NONE
; 
 975             lf 
= overlap
->lf_next
) { 
 979                  * If we're matching pids, and it's a record lock, 
 980                  * but the pid doesn't match, then keep on looking .. 
 982                 if (matchpid 
!= -1 && 
 983                     (overlap
->lf_flags 
& F_POSIX
) != 0 && 
 984                     proc_pid((struct proc 
*)(overlap
->lf_id
)) != matchpid
) 
 989                 if ((lock
->lf_type 
== F_WRLCK 
|| overlap
->lf_type 
== F_WRLCK
)) 
 999  * Description: Walk the list of locks to find an overlapping lock (if any). 
1001  * Parameters:  lf                      First lock on lock list 
1002  *              lock                    The lock we are checking for an overlap 
1004  *              prev                    pointer to pointer pointer to contain 
1005  *                                      address of pointer to previous lock 
1006  *                                      pointer to overlapping lock, if overlap 
1007  *              overlap                 pointer to pointer to contain address 
1008  *                                      of overlapping lock 
1010  * Returns:     OVERLAP_NONE 
1011  *              OVERLAP_EQUALS_LOCK 
1012  *              OVERLAP_CONTAINS_LOCK 
1013  *              OVERLAP_CONTAINED_BY_LOCK 
1014  *              OVERLAP_STARTS_BEFORE_LOCK 
1015  *              OVERLAP_ENDS_AFTER_LOCK 
1018  *              *prev                   The address of the next pointer in the 
1019  *                                      lock previous to the overlapping lock; 
1020  *                                      this is generally used to relink the 
1021  *                                      lock list, avoiding a second iteration. 
1022  *              *overlap                The pointer to the overlapping lock 
1023  *                                      itself; this is used to return data in 
1024  *                                      the check == OTHERS case, and for the 
1025  *                                      caller to modify the overlapping lock, 
1026  *                                      in the check == SELF case 
1028  * Note:        This returns only the FIRST overlapping lock.  There may be 
1029  *              more than one.  lf_getlock will return the first blocking lock, 
1030  *              while lf_setlock will iterate over all overlapping locks to 
1032  *              The check parameter can be SELF, meaning we are looking for 
1033  *              overlapping locks owned by us, or it can be OTHERS, meaning 
1034  *              we are looking for overlapping locks owned by someone else so 
1035  *              we can report a blocking lock on an F_GETLK request. 
1037  *              The value of *overlap and *prev are modified, even if there is 
1038  *              no overlapping lock found; always check the return code. 
1041 lf_findoverlap(struct lockf 
*lf
, struct lockf 
*lock
, int type
, 
1042                struct lockf 
***prev
, struct lockf 
**overlap
) 
1050 #ifdef LOCKF_DEBUGGING 
1051         if (lockf_debug 
& 2) 
1052                 lf_print("lf_findoverlap: looking for overlap in", lock
); 
1053 #endif /* LOCKF_DEBUGGING */ 
1054         start 
= lock
->lf_start
; 
1056         while (lf 
!= NOLOCKF
) { 
1057                 if (((type 
& SELF
) && lf
->lf_id 
!= lock
->lf_id
) || 
1058                     ((type 
& OTHERS
) && lf
->lf_id 
== lock
->lf_id
)) { 
1060                          * Locks belonging to one process are adjacent on the 
1061                          * list, so if we've found any locks belonging to us, 
1062                          * and we're now seeing something else, then we've 
1063                          * examined all "self" locks.  Note that bailing out 
1064                          * here is quite important; for coalescing, we assume  
1065                          * numerically adjacent locks from the same owner to  
1066                          * be adjacent on the list. 
1068                         if ((type 
& SELF
) && found_self
) { 
1069                                 return OVERLAP_NONE
; 
1072                         *prev 
= &lf
->lf_next
; 
1073                         *overlap 
= lf 
= lf
->lf_next
; 
1077                 if ((type 
& SELF
)) { 
1081 #ifdef LOCKF_DEBUGGING 
1082                 if (lockf_debug 
& 2) 
1083                         lf_print("\tchecking", lf
); 
1084 #endif /* LOCKF_DEBUGGING */ 
1086                  * OK, check for overlap 
1088                 if ((lf
->lf_end 
!= -1 && start 
> lf
->lf_end
) || 
1089                     (end 
!= -1 && lf
->lf_start 
> end
)) { 
1091                         LOCKF_DEBUG(2, "no overlap\n"); 
1094                          * NOTE: assumes that locks for the same process are  
1095                          * nonintersecting and ordered. 
1097                         if ((type 
& SELF
) && end 
!= -1 && lf
->lf_start 
> end
) 
1098                                 return (OVERLAP_NONE
); 
1099                         *prev 
= &lf
->lf_next
; 
1100                         *overlap 
= lf 
= lf
->lf_next
; 
1103                 if ((lf
->lf_start 
== start
) && (lf
->lf_end 
== end
)) { 
1104                         LOCKF_DEBUG(2, "overlap == lock\n"); 
1105                         return (OVERLAP_EQUALS_LOCK
); 
1107                 if ((lf
->lf_start 
<= start
) && 
1109                     ((lf
->lf_end 
>= end
) || (lf
->lf_end 
== -1))) { 
1110                         LOCKF_DEBUG(2, "overlap contains lock\n"); 
1111                         return (OVERLAP_CONTAINS_LOCK
); 
1113                 if (start 
<= lf
->lf_start 
&& 
1115                            (lf
->lf_end 
!= -1 && end 
>= lf
->lf_end
))) { 
1116                         LOCKF_DEBUG(2, "lock contains overlap\n"); 
1117                         return (OVERLAP_CONTAINED_BY_LOCK
); 
1119                 if ((lf
->lf_start 
< start
) && 
1120                         ((lf
->lf_end 
>= start
) || (lf
->lf_end 
== -1))) { 
1121                         LOCKF_DEBUG(2, "overlap starts before lock\n"); 
1122                         return (OVERLAP_STARTS_BEFORE_LOCK
); 
1124                 if ((lf
->lf_start 
> start
) && 
1126                         ((lf
->lf_end 
> end
) || (lf
->lf_end 
== -1))) { 
1127                         LOCKF_DEBUG(2, "overlap ends after lock\n"); 
1128                         return (OVERLAP_ENDS_AFTER_LOCK
); 
1130                 panic("lf_findoverlap: default"); 
1132         return (OVERLAP_NONE
); 
1139  * Description: Split a lock and a contained region into two or three locks 
1142  * Parameters:  lock1                   Lock to split 
1143  *              lock2                   Overlapping lock region requiring the 
1144  *                                      split (upgrade/downgrade/unlock) 
1146  * Returns:     0                       Success 
1147  *              ENOLCK                  No memory for new lock 
1150  *              *lock1                  Modified original lock 
1151  *              *lock2                  Overlapping lock (inserted into list) 
1152  *              (new lock)              Potential new lock inserted into list 
1153  *                                      if split results in 3 locks 
1155  * Notes:       This operation can only fail if the split would result in three 
1156  *              locks, and there is insufficient memory to allocate the third 
1157  *              lock; in that case, neither of the locks will be modified. 
1160 lf_split(struct lockf 
*lock1
, struct lockf 
*lock2
) 
1162         struct lockf 
*splitlock
; 
1164 #ifdef LOCKF_DEBUGGING 
1165         if (lockf_debug 
& 2) { 
1166                 lf_print("lf_split", lock1
); 
1167                 lf_print("splitting from", lock2
); 
1169 #endif /* LOCKF_DEBUGGING */ 
1171          * Check to see if spliting into only two pieces. 
1173         if (lock1
->lf_start 
== lock2
->lf_start
) { 
1174                 lock1
->lf_start 
= lock2
->lf_end 
+ 1; 
1175                 lock2
->lf_next 
= lock1
; 
1178         if (lock1
->lf_end 
== lock2
->lf_end
) { 
1179                 lock1
->lf_end 
= lock2
->lf_start 
- 1; 
1180                 lock2
->lf_next 
= lock1
->lf_next
; 
1181                 lock1
->lf_next 
= lock2
; 
1185          * Make a new lock consisting of the last part of 
1186          * the encompassing lock 
1188         MALLOC(splitlock
, struct lockf 
*, sizeof *splitlock
, M_LOCKF
, M_WAITOK
); 
1189         if (splitlock 
== NULL
) 
1191         bcopy(lock1
, splitlock
, sizeof *splitlock
); 
1192         splitlock
->lf_start 
= lock2
->lf_end 
+ 1; 
1193         TAILQ_INIT(&splitlock
->lf_blkhd
); 
1194         lock1
->lf_end 
= lock2
->lf_start 
- 1; 
1196          * OK, now link it in 
1198         splitlock
->lf_next 
= lock1
->lf_next
; 
1199         lock2
->lf_next 
= splitlock
; 
1200         lock1
->lf_next 
= lock2
; 
1209  * Wakeup a blocklist in the case of a downgrade or unlock, since others 
1210  * waiting on the lock may now be able to acquire it. 
1212  * Parameters:  listhead                Lock list head on which waiters may 
1213  *                                      have pending locks 
1217  * Notes:       This function iterates a list of locks and wakes all waiters, 
1218  *              rather than only waiters for the contended regions.  Because 
1219  *              of this, for heavily contended files, this can result in a 
1220  *              "thundering herd" situation.  Refactoring the code could make 
1221  *              this operation more efficient, if heavy contention ever results 
1222  *              in a real-world performance problem. 
1225 lf_wakelock(struct lockf 
*listhead
, boolean_t force_all
) 
1227         struct lockf 
*wakelock
; 
1228         boolean_t wake_all 
= TRUE
; 
1230         if (force_all 
== FALSE 
&& (listhead
->lf_flags 
& F_WAKE1_SAFE
)) 
1233         while (!TAILQ_EMPTY(&listhead
->lf_blkhd
)) { 
1234                 wakelock 
= TAILQ_FIRST(&listhead
->lf_blkhd
); 
1235                 TAILQ_REMOVE(&listhead
->lf_blkhd
, wakelock
, lf_block
); 
1237                 wakelock
->lf_next 
= NOLOCKF
; 
1238 #ifdef LOCKF_DEBUGGING 
1239                 if (lockf_debug 
& 2) 
1240                         lf_print("lf_wakelock: awakening", wakelock
); 
1241 #endif /* LOCKF_DEBUGGING */ 
1242                 if (wake_all 
== FALSE
) { 
1244                          * If there are items on the list head block list, 
1245                          * move them to the wakelock list instead, and then 
1246                          * correct their lf_next pointers. 
1248                         if (!TAILQ_EMPTY(&listhead
->lf_blkhd
)) { 
1249                                 TAILQ_CONCAT(&wakelock
->lf_blkhd
, &listhead
->lf_blkhd
, lf_block
); 
1251                                 struct lockf 
*tlock
; 
1253                                 TAILQ_FOREACH(tlock
, &wakelock
->lf_blkhd
, lf_block
) { 
1254                                         if (TAILQ_NEXT(tlock
, lf_block
) == tlock
) { 
1255                                                 /* See rdar://10887303 */ 
1256                                                 panic("cycle in wakelock list"); 
1258                                         tlock
->lf_next 
= wakelock
; 
1264                 if (wake_all 
== FALSE
) 
1270 #ifdef LOCKF_DEBUGGING 
1274  * Print out a lock; lock information is prefixed by the string in 'tag' 
1276  * Parameters:  tag                     A string tag for debugging 
1277  *              lock                    The lock whose information should be 
1283 lf_print(const char *tag
, struct lockf 
*lock
) 
1285         printf("%s: lock %p for ", tag
, (void *)lock
); 
1286         if (lock
->lf_flags 
& F_POSIX
) 
1287                 printf("proc %ld", (long)((struct proc 
*)lock
->lf_id
)->p_pid
); 
1289                 printf("id %p", (void *)lock
->lf_id
); 
1290         if (lock
->lf_vnode 
!= 0) 
1291                 printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx", 
1293                     lock
->lf_type 
== F_RDLCK 
? "shared" : 
1294                     lock
->lf_type 
== F_WRLCK 
? "exclusive" : 
1295                     lock
->lf_type 
== F_UNLCK 
? "unlock" : "unknown", 
1296                     (intmax_t)lock
->lf_start
, (intmax_t)lock
->lf_end
); 
1298                 printf(" %s, start 0x%016llx, end 0x%016llx", 
1299                     lock
->lf_type 
== F_RDLCK 
? "shared" : 
1300                     lock
->lf_type 
== F_WRLCK 
? "exclusive" : 
1301                     lock
->lf_type 
== F_UNLCK 
? "unlock" : "unknown", 
1302                     (intmax_t)lock
->lf_start
, (intmax_t)lock
->lf_end
); 
1303         if (!TAILQ_EMPTY(&lock
->lf_blkhd
)) 
1304                 printf(" block %p\n", (void *)TAILQ_FIRST(&lock
->lf_blkhd
)); 
1311  * lf_printlist DEBUG 
1313  * Print out a lock list for the vnode associated with 'lock'; lock information 
1314  * is prefixed by the string in 'tag' 
1316  * Parameters:  tag                     A string tag for debugging 
1317  *              lock                    The lock whose vnode's lock list should 
1323 lf_printlist(const char *tag
, struct lockf 
*lock
) 
1325         struct lockf 
*lf
, *blk
; 
1327         if (lock
->lf_vnode 
== 0) 
1330         printf("%s: Lock list for vno %p:\n", 
1331             tag
, lock
->lf_vnode
); 
1332         for (lf 
= lock
->lf_vnode
->v_lockf
; lf
; lf 
= lf
->lf_next
) { 
1333                 printf("\tlock %p for ",(void *)lf
); 
1334                 if (lf
->lf_flags 
& F_POSIX
) 
1336                             (long)((struct proc 
*)lf
->lf_id
)->p_pid
); 
1338                         printf("id %p", (void *)lf
->lf_id
); 
1339                 printf(", %s, start 0x%016llx, end 0x%016llx", 
1340                     lf
->lf_type 
== F_RDLCK 
? "shared" : 
1341                     lf
->lf_type 
== F_WRLCK 
? "exclusive" : 
1342                     lf
->lf_type 
== F_UNLCK 
? "unlock" : 
1343                     "unknown", (intmax_t)lf
->lf_start
, (intmax_t)lf
->lf_end
); 
1344                 TAILQ_FOREACH(blk
, &lf
->lf_blkhd
, lf_block
) { 
1345                         printf("\n\t\tlock request %p for ", (void *)blk
); 
1346                         if (blk
->lf_flags 
& F_POSIX
) 
1348                                     (long)((struct proc 
*)blk
->lf_id
)->p_pid
); 
1350                                 printf("id %p", (void *)blk
->lf_id
); 
1351                         printf(", %s, start 0x%016llx, end 0x%016llx", 
1352                             blk
->lf_type 
== F_RDLCK 
? "shared" : 
1353                             blk
->lf_type 
== F_WRLCK 
? "exclusive" : 
1354                             blk
->lf_type 
== F_UNLCK 
? "unlock" : 
1355                             "unknown", (intmax_t)blk
->lf_start
, 
1356                             (intmax_t)blk
->lf_end
); 
1357                         if (!TAILQ_EMPTY(&blk
->lf_blkhd
)) 
1358                                 panic("lf_printlist: bad list"); 
1363 #endif /* LOCKF_DEBUGGING */ 
1365 #if IMPORTANCE_INHERITANCE 
1370  * Call task importance hold assertion on the owner of the lock. 
1372  * Parameters: block_task               Owner of the lock blocking  
1375  *             block                    lock on which the current thread  
1380  * Notes: The task reference on block_task is not needed to be hold since  
1381  *        the current thread has vnode lock and block_task has a file  
1382  *        lock, thus removing file lock in exit requires block_task to  
1383  *        grab the vnode lock. 
1386 lf_hold_assertion(task_t block_task
, struct lockf 
*block
) 
1388         if (task_importance_hold_file_lock_assertion(block_task
, 1)) { 
1389                 block
->lf_boosted 
= LF_BOOSTED
; 
1395  * lf_jump_to_queue_head 
1397  * Jump the lock from the tail of the block queue to the head of 
1400  * Parameters: block                    lockf struct containing the  
1402  *             lock                     lockf struct to be jumped to the 
1408 lf_jump_to_queue_head(struct lockf 
*block
, struct lockf 
*lock
)  
1410         /* Move the lock to the head of the block queue. */ 
1411         TAILQ_REMOVE(&block
->lf_blkhd
, lock
, lf_block
); 
1412         TAILQ_INSERT_HEAD(&block
->lf_blkhd
, lock
, lf_block
); 
1419  * Drops the task hold assertion. 
1421  * Parameters: block                    lockf struct holding the assertion. 
1426 lf_drop_assertion(struct lockf 
*block
) 
1428         task_t current_task
; 
1430         current_task 
= proc_task((proc_t
) block
->lf_id
); 
1431         task_importance_drop_file_lock_assertion(current_task
, 1); 
1432         block
->lf_boosted 
= LF_NOT_BOOSTED
; 
1435 #endif /* IMPORTANCE_INHERITANCE */