2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  32  * Mach Operating System 
  33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 
  34  * All Rights Reserved. 
  36  * Permission to use, copy, modify and distribute this software and its 
  37  * documentation is hereby granted, provided that both the copyright 
  38  * notice and this permission notice appear in all copies of the 
  39  * software, derivative works or modified versions, and any portions 
  40  * thereof, and that both notices appear in supporting documentation. 
  42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
  43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 
  44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  46  * Carnegie Mellon requests users of this software to return to 
  48  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU 
  49  *  School of Computer Science 
  50  *  Carnegie Mellon University 
  51  *  Pittsburgh PA 15213-3890 
  53  * any improvements or extensions that they make and grant Carnegie Mellon 
  54  * the rights to redistribute these changes. 
  56 #include <mach_ldebug.h> 
  59 #include <mach/kern_return.h> 
  60 #include <mach/mach_host_server.h> 
  61 #include <mach_debug/lockgroup_info.h> 
  63 #include <kern/locks.h> 
  64 #include <kern/misc_protos.h> 
  65 #include <kern/kalloc.h> 
  66 #include <kern/thread.h> 
  67 #include <kern/processor.h> 
  68 #include <kern/sched_prim.h> 
  69 #include <kern/debug.h> 
  73 #include <sys/kdebug.h> 
  77  * We need only enough declarations from the BSD-side to be able to 
  78  * test if our probe is active, and to call __dtrace_probe().  Setting 
  79  * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in. 
  81 #define NEED_DTRACE_DEFS 
  82 #include <../bsd/sys/lockstat.h> 
  85 #define LCK_MTX_SLEEP_CODE              0 
  86 #define LCK_MTX_SLEEP_DEADLINE_CODE     1 
  87 #define LCK_MTX_LCK_WAIT_CODE           2 
  88 #define LCK_MTX_UNLCK_WAKEUP_CODE       3 
  91 static queue_head_t     lck_grp_queue
; 
  92 static unsigned int     lck_grp_cnt
; 
  94 decl_lck_mtx_data(static,lck_grp_lock
) 
  95 static lck_mtx_ext_t lck_grp_lock_ext
; 
  97 lck_grp_attr_t  LockDefaultGroupAttr
; 
  98 lck_grp_t               LockCompatGroup
; 
  99 lck_attr_t              LockDefaultLckAttr
; 
 102  * Routine:     lck_mod_init 
 110          * Obtain "lcks" options:this currently controls lock statistics 
 112         if (!PE_parse_boot_argn("lcks", &LcksOpts
, sizeof (LcksOpts
))) 
 115         queue_init(&lck_grp_queue
); 
 118          * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids 
 119          * grabbing the lck_grp_lock before it is initialized. 
 122         bzero(&LockCompatGroup
, sizeof(lck_grp_t
)); 
 123         (void) strncpy(LockCompatGroup
.lck_grp_name
, "Compatibility APIs", LCK_GRP_MAX_NAME
); 
 125         if (LcksOpts 
& enaLkStat
) 
 126                 LockCompatGroup
.lck_grp_attr 
= LCK_GRP_ATTR_STAT
; 
 128                 LockCompatGroup
.lck_grp_attr 
= LCK_ATTR_NONE
; 
 130         LockCompatGroup
.lck_grp_refcnt 
= 1; 
 132         enqueue_tail(&lck_grp_queue
, (queue_entry_t
)&LockCompatGroup
); 
 135         lck_grp_attr_setdefault(&LockDefaultGroupAttr
); 
 136         lck_attr_setdefault(&LockDefaultLckAttr
); 
 138         lck_mtx_init_ext(&lck_grp_lock
, &lck_grp_lock_ext
, &LockCompatGroup
, &LockDefaultLckAttr
); 
 143  * Routine:     lck_grp_attr_alloc_init 
 147 lck_grp_attr_alloc_init( 
 150         lck_grp_attr_t  
*attr
; 
 152         if ((attr 
= (lck_grp_attr_t 
*)kalloc(sizeof(lck_grp_attr_t
))) != 0) 
 153                 lck_grp_attr_setdefault(attr
); 
 160  * Routine:     lck_grp_attr_setdefault 
 164 lck_grp_attr_setdefault( 
 165         lck_grp_attr_t  
*attr
) 
 167         if (LcksOpts 
& enaLkStat
) 
 168                 attr
->grp_attr_val 
= LCK_GRP_ATTR_STAT
; 
 170                 attr
->grp_attr_val 
= 0; 
 175  * Routine:     lck_grp_attr_setstat 
 179 lck_grp_attr_setstat( 
 180         lck_grp_attr_t  
*attr
) 
 182         (void)hw_atomic_or(&attr
->grp_attr_val
, LCK_GRP_ATTR_STAT
); 
 187  * Routine:     lck_grp_attr_free 
 192         lck_grp_attr_t  
*attr
) 
 194         kfree(attr
, sizeof(lck_grp_attr_t
)); 
 199  * Routine:     lck_grp_alloc_init 
 204         const char*     grp_name
, 
 205         lck_grp_attr_t  
*attr
) 
 209         if ((grp 
= (lck_grp_t 
*)kalloc(sizeof(lck_grp_t
))) != 0) 
 210                 lck_grp_init(grp
, grp_name
, attr
); 
 217  * Routine:     lck_grp_init 
 223         const char*             grp_name
,            
 224         lck_grp_attr_t  
*attr
)              
 226         bzero((void *)grp
, sizeof(lck_grp_t
)); 
 228         (void) strncpy(grp
->lck_grp_name
, grp_name
, LCK_GRP_MAX_NAME
); 
 230         if (attr 
!= LCK_GRP_ATTR_NULL
) 
 231                 grp
->lck_grp_attr 
= attr
->grp_attr_val
; 
 232         else if (LcksOpts 
& enaLkStat
) 
 233                 grp
->lck_grp_attr 
= LCK_GRP_ATTR_STAT
; 
 235                 grp
->lck_grp_attr 
= LCK_ATTR_NONE
; 
 237         grp
->lck_grp_refcnt 
= 1; 
 239         lck_mtx_lock(&lck_grp_lock
); 
 240         enqueue_tail(&lck_grp_queue
, (queue_entry_t
)grp
); 
 242         lck_mtx_unlock(&lck_grp_lock
); 
 248  * Routine:     lck_grp_free 
 255         lck_mtx_lock(&lck_grp_lock
); 
 257         (void)remque((queue_entry_t
)grp
); 
 258         lck_mtx_unlock(&lck_grp_lock
); 
 259         lck_grp_deallocate(grp
); 
 264  * Routine:     lck_grp_reference 
 271         (void)hw_atomic_add(&grp
->lck_grp_refcnt
, 1); 
 276  * Routine:     lck_grp_deallocate 
 283         if (hw_atomic_sub(&grp
->lck_grp_refcnt
, 1) == 0) 
 284                 kfree(grp
, sizeof(lck_grp_t
)); 
 288  * Routine:     lck_grp_lckcnt_incr 
 296         unsigned int    *lckcnt
; 
 300                 lckcnt 
= &grp
->lck_grp_spincnt
; 
 303                 lckcnt 
= &grp
->lck_grp_mtxcnt
; 
 306                 lckcnt 
= &grp
->lck_grp_rwcnt
; 
 309                 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type
); 
 312         (void)hw_atomic_add(lckcnt
, 1); 
 316  * Routine:     lck_grp_lckcnt_decr 
 324         unsigned int    *lckcnt
; 
 328                 lckcnt 
= &grp
->lck_grp_spincnt
; 
 331                 lckcnt 
= &grp
->lck_grp_mtxcnt
; 
 334                 lckcnt 
= &grp
->lck_grp_rwcnt
; 
 337                 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type
); 
 340         (void)hw_atomic_sub(lckcnt
, 1); 
 344  * Routine:     lck_attr_alloc_init 
 353         if ((attr 
= (lck_attr_t 
*)kalloc(sizeof(lck_attr_t
))) != 0) 
 354                 lck_attr_setdefault(attr
); 
 361  * Routine:     lck_attr_setdefault 
 368 #if   __i386__ || __x86_64__ 
 370         if (LcksOpts 
& enaLkDeb
) 
 371                 attr
->lck_attr_val 
=  LCK_ATTR_DEBUG
; 
 373                 attr
->lck_attr_val 
=  LCK_ATTR_NONE
; 
 375         attr
->lck_attr_val 
=  LCK_ATTR_DEBUG
; 
 378 #error Unknown architecture. 
 384  * Routine:     lck_attr_setdebug 
 390         (void)hw_atomic_or(&attr
->lck_attr_val
, LCK_ATTR_DEBUG
); 
 394  * Routine:     lck_attr_setdebug 
 400         (void)hw_atomic_and(&attr
->lck_attr_val
, ~LCK_ATTR_DEBUG
); 
 405  * Routine:     lck_attr_rw_shared_priority 
 408 lck_attr_rw_shared_priority( 
 411         (void)hw_atomic_or(&attr
->lck_attr_val
, LCK_ATTR_RW_SHARED_PRIORITY
); 
 416  * Routine:     lck_attr_free 
 422         kfree(attr
, sizeof(lck_attr_t
)); 
 427  * Routine:     lck_spin_sleep 
 432         lck_sleep_action_t      lck_sleep_action
, 
 434         wait_interrupt_t        interruptible
) 
 438         if ((lck_sleep_action 
& ~LCK_SLEEP_MASK
) != 0) 
 439                 panic("Invalid lock sleep action %x\n", lck_sleep_action
); 
 441         res 
= assert_wait(event
, interruptible
); 
 442         if (res 
== THREAD_WAITING
) { 
 443                 lck_spin_unlock(lck
); 
 444                 res 
= thread_block(THREAD_CONTINUE_NULL
); 
 445                 if (!(lck_sleep_action 
& LCK_SLEEP_UNLOCK
)) 
 449         if (lck_sleep_action 
& LCK_SLEEP_UNLOCK
) 
 450                 lck_spin_unlock(lck
); 
 457  * Routine:     lck_spin_sleep_deadline 
 460 lck_spin_sleep_deadline( 
 462         lck_sleep_action_t      lck_sleep_action
, 
 464         wait_interrupt_t        interruptible
, 
 469         if ((lck_sleep_action 
& ~LCK_SLEEP_MASK
) != 0) 
 470                 panic("Invalid lock sleep action %x\n", lck_sleep_action
); 
 472         res 
= assert_wait_deadline(event
, interruptible
, deadline
); 
 473         if (res 
== THREAD_WAITING
) { 
 474                 lck_spin_unlock(lck
); 
 475                 res 
= thread_block(THREAD_CONTINUE_NULL
); 
 476                 if (!(lck_sleep_action 
& LCK_SLEEP_UNLOCK
)) 
 480         if (lck_sleep_action 
& LCK_SLEEP_UNLOCK
) 
 481                 lck_spin_unlock(lck
); 
 488  * Routine:     lck_mtx_sleep 
 493         lck_sleep_action_t      lck_sleep_action
, 
 495         wait_interrupt_t        interruptible
) 
 499         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_CODE
) | DBG_FUNC_START
, 
 500                      (int)lck
, (int)lck_sleep_action
, (int)event
, (int)interruptible
, 0); 
 502         if ((lck_sleep_action 
& ~LCK_SLEEP_MASK
) != 0) 
 503                 panic("Invalid lock sleep action %x\n", lck_sleep_action
); 
 505         res 
= assert_wait(event
, interruptible
); 
 506         if (res 
== THREAD_WAITING
) { 
 508                 res 
= thread_block(THREAD_CONTINUE_NULL
); 
 509                 if (!(lck_sleep_action 
& LCK_SLEEP_UNLOCK
)) { 
 510                         if ((lck_sleep_action 
& LCK_SLEEP_SPIN
)) 
 511                                 lck_mtx_lock_spin(lck
); 
 517         if (lck_sleep_action 
& LCK_SLEEP_UNLOCK
) 
 520         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_CODE
) | DBG_FUNC_END
, (int)res
, 0, 0, 0, 0); 
 527  * Routine:     lck_mtx_sleep_deadline 
 530 lck_mtx_sleep_deadline( 
 532         lck_sleep_action_t      lck_sleep_action
, 
 534         wait_interrupt_t        interruptible
, 
 539         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_DEADLINE_CODE
) | DBG_FUNC_START
, 
 540                      (int)lck
, (int)lck_sleep_action
, (int)event
, (int)interruptible
, 0); 
 542         if ((lck_sleep_action 
& ~LCK_SLEEP_MASK
) != 0) 
 543                 panic("Invalid lock sleep action %x\n", lck_sleep_action
); 
 545         res 
= assert_wait_deadline(event
, interruptible
, deadline
); 
 546         if (res 
== THREAD_WAITING
) { 
 548                 res 
= thread_block(THREAD_CONTINUE_NULL
); 
 549                 if (!(lck_sleep_action 
& LCK_SLEEP_UNLOCK
)) { 
 550                         if ((lck_sleep_action 
& LCK_SLEEP_SPIN
)) 
 551                                 lck_mtx_lock_spin(lck
); 
 557         if (lck_sleep_action 
& LCK_SLEEP_UNLOCK
) 
 560         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_DEADLINE_CODE
) | DBG_FUNC_END
, (int)res
, 0, 0, 0, 0); 
 566  * Routine:     lck_mtx_lock_wait 
 568  * Invoked in order to wait on contention. 
 570  * Called with the interlock locked and 
 571  * returns it unlocked. 
 578         thread_t                self 
= current_thread(); 
 581         spl_t                   s 
= splsched(); 
 583         uint64_t                sleep_start 
= 0; 
 585         if (lockstat_probemap
[LS_LCK_MTX_LOCK_BLOCK
] || lockstat_probemap
[LS_LCK_MTX_EXT_LOCK_BLOCK
]) { 
 586                 sleep_start 
= mach_absolute_time(); 
 590         if (lck
->lck_mtx_tag 
!= LCK_MTX_TAG_INDIRECT
) 
 593                 mutex 
= &lck
->lck_mtx_ptr
->lck_mtx
; 
 595         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAIT_CODE
) | DBG_FUNC_START
, (int)lck
, (int)holder
, 0, 0, 0); 
 597         priority 
= self
->sched_pri
; 
 598         if (priority 
< self
->priority
) 
 599                 priority 
= self
->priority
; 
 600         if (priority 
< BASEPRI_DEFAULT
) 
 601                 priority 
= BASEPRI_DEFAULT
; 
 604         if (mutex
->lck_mtx_pri 
== 0) 
 605                 holder
->promotions
++; 
 606         holder
->sched_flags 
|= TH_SFLAG_PROMOTED
; 
 607         if (            mutex
->lck_mtx_pri 
< priority   
&& 
 608                                 holder
->sched_pri 
< priority            
) { 
 609                 KERNEL_DEBUG_CONSTANT( 
 610                         MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
, 
 611                                         holder
->sched_pri
, priority
, holder
, lck
, 0); 
 613                 set_sched_pri(holder
, priority
); 
 615         thread_unlock(holder
); 
 618         if (mutex
->lck_mtx_pri 
< priority
) 
 619                 mutex
->lck_mtx_pri 
= priority
; 
 620         if (self
->pending_promoter
[self
->pending_promoter_index
] == NULL
) { 
 621                 self
->pending_promoter
[self
->pending_promoter_index
] = mutex
; 
 622                 mutex
->lck_mtx_waiters
++; 
 625         if (self
->pending_promoter
[self
->pending_promoter_index
] != mutex
) { 
 626                 self
->pending_promoter
[++self
->pending_promoter_index
] = mutex
; 
 627                 mutex
->lck_mtx_waiters
++; 
 630         assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_mtx_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
); 
 631         lck_mtx_ilk_unlock(mutex
); 
 633         thread_block(THREAD_CONTINUE_NULL
); 
 635         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAIT_CODE
) | DBG_FUNC_END
, 0, 0, 0, 0, 0); 
 638          * Record the Dtrace lockstat probe for blocking, block time 
 639          * measured from when we were entered. 
 642                 if (lck
->lck_mtx_tag 
!= LCK_MTX_TAG_INDIRECT
) { 
 643                         LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK
, lck
, 
 644                             mach_absolute_time() - sleep_start
); 
 646                         LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK
, lck
, 
 647                             mach_absolute_time() - sleep_start
); 
 654  * Routine:     lck_mtx_lock_acquire 
 656  * Invoked on acquiring the mutex when there is 
 659  * Returns the current number of waiters. 
 661  * Called with the interlock locked. 
 664 lck_mtx_lock_acquire( 
 667         thread_t                thread 
= current_thread(); 
 670         if (lck
->lck_mtx_tag 
!= LCK_MTX_TAG_INDIRECT
) 
 673                 mutex 
= &lck
->lck_mtx_ptr
->lck_mtx
; 
 675         if (thread
->pending_promoter
[thread
->pending_promoter_index
] == mutex
) { 
 676                 thread
->pending_promoter
[thread
->pending_promoter_index
] = NULL
; 
 677                 if (thread
->pending_promoter_index 
> 0) 
 678                         thread
->pending_promoter_index
--; 
 679                 mutex
->lck_mtx_waiters
--; 
 682         if (mutex
->lck_mtx_waiters 
> 0) { 
 683                 integer_t               priority 
= mutex
->lck_mtx_pri
; 
 684                 spl_t                   s 
= splsched(); 
 687                 thread
->promotions
++; 
 688                 thread
->sched_flags 
|= TH_SFLAG_PROMOTED
; 
 689                 if (thread
->sched_pri 
< priority
) { 
 690                         KERNEL_DEBUG_CONSTANT( 
 691                                 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
, 
 692                                                 thread
->sched_pri
, priority
, 0, lck
, 0); 
 694                         set_sched_pri(thread
, priority
); 
 696                 thread_unlock(thread
); 
 700                 mutex
->lck_mtx_pri 
= 0; 
 702         return (mutex
->lck_mtx_waiters
); 
 706  * Routine:     lck_mtx_unlock_wakeup 
 708  * Invoked on unlock when there is contention. 
 710  * Called with the interlock locked. 
 713 lck_mtx_unlock_wakeup ( 
 717         thread_t                thread 
= current_thread(); 
 720         if (lck
->lck_mtx_tag 
!= LCK_MTX_TAG_INDIRECT
) 
 723                 mutex 
= &lck
->lck_mtx_ptr
->lck_mtx
; 
 725         if (thread 
!= holder
) 
 726                 panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex
, holder
); 
 728         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_UNLCK_WAKEUP_CODE
) | DBG_FUNC_START
, (int)lck
, (int)holder
, 0, 0, 0); 
 730         assert(mutex
->lck_mtx_waiters 
> 0); 
 731         thread_wakeup_one((event_t
)(((unsigned int*)lck
)+(sizeof(lck_mtx_t
)-1)/sizeof(unsigned int))); 
 733         if (thread
->promotions 
> 0) { 
 734                 spl_t           s 
= splsched(); 
 737                 if (    --thread
->promotions 
== 0                               && 
 738                                 (thread
->sched_flags 
& TH_SFLAG_PROMOTED
)               ) { 
 739                         thread
->sched_flags 
&= ~TH_SFLAG_PROMOTED
; 
 740                         if (thread
->sched_flags 
& TH_SFLAG_DEPRESSED_MASK
) { 
 741                                 KERNEL_DEBUG_CONSTANT( 
 742                                         MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) | DBG_FUNC_NONE
, 
 743                                                   thread
->sched_pri
, DEPRESSPRI
, 0, lck
, 0); 
 745                                 set_sched_pri(thread
, DEPRESSPRI
); 
 748                                 if (thread
->priority 
< thread
->sched_pri
) { 
 749                                         KERNEL_DEBUG_CONSTANT( 
 750                                                 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) | 
 752                                                         thread
->sched_pri
, thread
->priority
, 
 756                                 SCHED(compute_priority
)(thread
, FALSE
); 
 759                 thread_unlock(thread
); 
 763         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_UNLCK_WAKEUP_CODE
) | DBG_FUNC_END
, 0, 0, 0, 0, 0); 
 767 lck_mtx_unlockspin_wakeup ( 
 770         assert(lck
->lck_mtx_waiters 
> 0); 
 771         thread_wakeup_one((event_t
)(((unsigned int*)lck
)+(sizeof(lck_mtx_t
)-1)/sizeof(unsigned int))); 
 773         KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_UNLCK_WAKEUP_CODE
) | DBG_FUNC_NONE
, (int)lck
, 0, 0, 1, 0); 
 776          * When there are waiters, we skip the hot-patch spot in the 
 777          * fastpath, so we record it here. 
 779         LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE
, lck
, 0); 
 785  * Routine:     mutex_pause 
 787  * Called by former callers of simple_lock_pause(). 
 789 #define MAX_COLLISION_COUNTS    32 
 790 #define MAX_COLLISION   8 
 792 unsigned int max_collision_count
[MAX_COLLISION_COUNTS
]; 
 794 uint32_t collision_backoffs
[MAX_COLLISION
] = { 
 795         10, 50, 100, 200, 400, 600, 800, 1000 
 800 mutex_pause(uint32_t collisions
) 
 802         wait_result_t wait_result
; 
 805         if (collisions 
>= MAX_COLLISION_COUNTS
) 
 806                 collisions 
= MAX_COLLISION_COUNTS 
- 1; 
 807         max_collision_count
[collisions
]++; 
 809         if (collisions 
>= MAX_COLLISION
) 
 810                 collisions 
= MAX_COLLISION 
- 1; 
 811         back_off 
= collision_backoffs
[collisions
]; 
 813         wait_result 
= assert_wait_timeout((event_t
)mutex_pause
, THREAD_UNINT
, back_off
, NSEC_PER_USEC
); 
 814         assert(wait_result 
== THREAD_WAITING
); 
 816         wait_result 
= thread_block(THREAD_CONTINUE_NULL
); 
 817         assert(wait_result 
== THREAD_TIMED_OUT
); 
 821 unsigned int mutex_yield_wait 
= 0; 
 822 unsigned int mutex_yield_no_wait 
= 0; 
 831         lck_mtx_assert(lck
, LCK_MTX_ASSERT_OWNED
); 
 834         if (lck
->lck_mtx_tag 
== LCK_MTX_TAG_INDIRECT
) 
 835                 waiters 
= lck
->lck_mtx_ptr
->lck_mtx
.lck_mtx_waiters
; 
 837                 waiters 
= lck
->lck_mtx_waiters
; 
 840                 mutex_yield_no_wait
++; 
 851  * Routine:     lck_rw_sleep 
 856         lck_sleep_action_t      lck_sleep_action
, 
 858         wait_interrupt_t        interruptible
) 
 861         lck_rw_type_t   lck_rw_type
; 
 863         if ((lck_sleep_action 
& ~LCK_SLEEP_MASK
) != 0) 
 864                 panic("Invalid lock sleep action %x\n", lck_sleep_action
); 
 866         res 
= assert_wait(event
, interruptible
); 
 867         if (res 
== THREAD_WAITING
) { 
 868                 lck_rw_type 
= lck_rw_done(lck
); 
 869                 res 
= thread_block(THREAD_CONTINUE_NULL
); 
 870                 if (!(lck_sleep_action 
& LCK_SLEEP_UNLOCK
)) { 
 871                         if (!(lck_sleep_action 
& (LCK_SLEEP_SHARED
|LCK_SLEEP_EXCLUSIVE
))) 
 872                                 lck_rw_lock(lck
, lck_rw_type
); 
 873                         else if (lck_sleep_action 
& LCK_SLEEP_EXCLUSIVE
) 
 874                                 lck_rw_lock_exclusive(lck
); 
 876                                 lck_rw_lock_shared(lck
); 
 880         if (lck_sleep_action 
& LCK_SLEEP_UNLOCK
) 
 881                 (void)lck_rw_done(lck
); 
 888  * Routine:     lck_rw_sleep_deadline 
 891 lck_rw_sleep_deadline( 
 893         lck_sleep_action_t      lck_sleep_action
, 
 895         wait_interrupt_t        interruptible
, 
 899         lck_rw_type_t   lck_rw_type
; 
 901         if ((lck_sleep_action 
& ~LCK_SLEEP_MASK
) != 0) 
 902                 panic("Invalid lock sleep action %x\n", lck_sleep_action
); 
 904         res 
= assert_wait_deadline(event
, interruptible
, deadline
); 
 905         if (res 
== THREAD_WAITING
) { 
 906                 lck_rw_type 
= lck_rw_done(lck
); 
 907                 res 
= thread_block(THREAD_CONTINUE_NULL
); 
 908                 if (!(lck_sleep_action 
& LCK_SLEEP_UNLOCK
)) { 
 909                         if (!(lck_sleep_action 
& (LCK_SLEEP_SHARED
|LCK_SLEEP_EXCLUSIVE
))) 
 910                                 lck_rw_lock(lck
, lck_rw_type
); 
 911                         else if (lck_sleep_action 
& LCK_SLEEP_EXCLUSIVE
) 
 912                                 lck_rw_lock_exclusive(lck
); 
 914                                 lck_rw_lock_shared(lck
); 
 918         if (lck_sleep_action 
& LCK_SLEEP_UNLOCK
) 
 919                 (void)lck_rw_done(lck
); 
 927         lockgroup_info_array_t  
*lockgroup_infop
, 
 928         mach_msg_type_number_t  
*lockgroup_infoCntp
) 
 930         lockgroup_info_t        
*lockgroup_info_base
; 
 931         lockgroup_info_t        
*lockgroup_info
; 
 932         vm_offset_t                     lockgroup_info_addr
; 
 933         vm_size_t                       lockgroup_info_size
; 
 940         if (host 
== HOST_NULL
) 
 941                 return KERN_INVALID_HOST
; 
 943         lck_mtx_lock(&lck_grp_lock
); 
 945         lockgroup_info_size 
= round_page(lck_grp_cnt 
* sizeof *lockgroup_info
); 
 946         kr 
= kmem_alloc_pageable(ipc_kernel_map
, 
 947                                                  &lockgroup_info_addr
, lockgroup_info_size
); 
 948         if (kr 
!= KERN_SUCCESS
) { 
 949                 lck_mtx_unlock(&lck_grp_lock
); 
 953         lockgroup_info_base 
= (lockgroup_info_t 
*) lockgroup_info_addr
; 
 954         lck_grp 
= (lck_grp_t 
*)queue_first(&lck_grp_queue
); 
 955         lockgroup_info 
= lockgroup_info_base
; 
 957         for (i 
= 0; i 
< lck_grp_cnt
; i
++) { 
 959                 lockgroup_info
->lock_spin_cnt 
= lck_grp
->lck_grp_spincnt
; 
 960                 lockgroup_info
->lock_spin_util_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_util_cnt
; 
 961                 lockgroup_info
->lock_spin_held_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_held_cnt
; 
 962                 lockgroup_info
->lock_spin_miss_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_miss_cnt
; 
 963                 lockgroup_info
->lock_spin_held_max 
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_held_max
; 
 964                 lockgroup_info
->lock_spin_held_cum 
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_held_cum
; 
 966                 lockgroup_info
->lock_mtx_cnt 
= lck_grp
->lck_grp_mtxcnt
; 
 967                 lockgroup_info
->lock_mtx_util_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_util_cnt
; 
 968                 lockgroup_info
->lock_mtx_held_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_held_cnt
; 
 969                 lockgroup_info
->lock_mtx_miss_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_miss_cnt
; 
 970                 lockgroup_info
->lock_mtx_wait_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_cnt
; 
 971                 lockgroup_info
->lock_mtx_held_max 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_held_max
; 
 972                 lockgroup_info
->lock_mtx_held_cum 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_held_cum
; 
 973                 lockgroup_info
->lock_mtx_wait_max 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_max
; 
 974                 lockgroup_info
->lock_mtx_wait_cum 
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_cum
; 
 976                 lockgroup_info
->lock_rw_cnt 
= lck_grp
->lck_grp_rwcnt
; 
 977                 lockgroup_info
->lock_rw_util_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
; 
 978                 lockgroup_info
->lock_rw_held_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_held_cnt
; 
 979                 lockgroup_info
->lock_rw_miss_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
; 
 980                 lockgroup_info
->lock_rw_wait_cnt 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
; 
 981                 lockgroup_info
->lock_rw_held_max 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_held_max
; 
 982                 lockgroup_info
->lock_rw_held_cum 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_held_cum
; 
 983                 lockgroup_info
->lock_rw_wait_max 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_max
; 
 984                 lockgroup_info
->lock_rw_wait_cum 
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cum
; 
 986                 (void) strncpy(lockgroup_info
->lockgroup_name
,lck_grp
->lck_grp_name
, LOCKGROUP_MAX_NAME
); 
 988                 lck_grp 
= (lck_grp_t 
*)(queue_next((queue_entry_t
)(lck_grp
))); 
 992         *lockgroup_infoCntp 
= lck_grp_cnt
; 
 993         lck_mtx_unlock(&lck_grp_lock
); 
 995         used 
= (*lockgroup_infoCntp
) * sizeof *lockgroup_info
; 
 997         if (used 
!= lockgroup_info_size
) 
 998                 bzero((char *) lockgroup_info
, lockgroup_info_size 
- used
); 
1000         kr 
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)lockgroup_info_addr
, 
1001                            (vm_map_size_t
)lockgroup_info_size
, TRUE
, ©
); 
1002         assert(kr 
== KERN_SUCCESS
); 
1004         *lockgroup_infop 
= (lockgroup_info_t 
*) copy
; 
1006         return(KERN_SUCCESS
); 
1010  * Compatibility module  
1013 extern lck_rw_t         
*lock_alloc_EXT( boolean_t can_sleep
, unsigned short  tag0
, unsigned short  tag1
); 
1014 extern void             lock_done_EXT(lck_rw_t 
*lock
); 
1015 extern void             lock_free_EXT(lck_rw_t 
*lock
); 
1016 extern void             lock_init_EXT(lck_rw_t 
*lock
, boolean_t can_sleep
, unsigned short tag0
, unsigned short tag1
); 
1017 extern void             lock_read_EXT(lck_rw_t 
*lock
); 
1018 extern boolean_t        
lock_read_to_write_EXT(lck_rw_t 
*lock
); 
1019 extern void             lock_write_EXT(lck_rw_t 
*lock
); 
1020 extern void             lock_write_to_read_EXT(lck_rw_t 
*lock
); 
1021 extern wait_result_t    
thread_sleep_lock_write_EXT(  
1022                                 event_t event
, lck_rw_t 
*lock
, wait_interrupt_t interruptible
); 
1024 extern void             usimple_lock_EXT(lck_spin_t 
*lock
); 
1025 extern void             usimple_lock_init_EXT(lck_spin_t 
*lock
, unsigned short tag
); 
1026 extern unsigned int     usimple_lock_try_EXT(lck_spin_t 
*lock
); 
1027 extern void             usimple_unlock_EXT(lck_spin_t 
*lock
); 
1028 extern wait_result_t    
thread_sleep_usimple_lock_EXT(event_t event
, lck_spin_t 
*lock
, wait_interrupt_t interruptible
); 
1031 lck_mtx_t
*              mutex_alloc_EXT(__unused 
unsigned short tag
); 
1032 void                    mutex_free_EXT(lck_mtx_t 
*mutex
); 
1033 void                    mutex_init_EXT(lck_mtx_t 
*mutex
, __unused 
unsigned short tag
); 
1034 wait_result_t           
thread_sleep_mutex_EXT(event_t event
, lck_mtx_t 
*mutex
, wait_interrupt_t interruptible
); 
1035 wait_result_t           
thread_sleep_mutex_deadline_EXT(event_t event
, lck_mtx_t 
*mutex
, uint64_t deadline
, wait_interrupt_t interruptible
); 
1039         __unused boolean_t       can_sleep
, 
1040         __unused 
unsigned short  tag0
, 
1041         __unused 
unsigned short  tag1
) 
1043         return( lck_rw_alloc_init( &LockCompatGroup
, LCK_ATTR_NULL
)); 
1050         (void) lck_rw_done(lock
); 
1057         lck_rw_free(lock
, &LockCompatGroup
); 
1063         __unused boolean_t      can_sleep
, 
1064         __unused 
unsigned short tag0
, 
1065         __unused 
unsigned short tag1
) 
1067         lck_rw_init(lock
, &LockCompatGroup
, LCK_ATTR_NULL
);      
1074         lck_rw_lock_shared( lock
); 
1078 lock_read_to_write_EXT( 
1081         return( lck_rw_lock_shared_to_exclusive(lock
)); 
1088         lck_rw_lock_exclusive(lock
); 
1092 lock_write_to_read_EXT( 
1095         lck_rw_lock_exclusive_to_shared(lock
); 
1099 thread_sleep_lock_write_EXT( 
1102         wait_interrupt_t        interruptible
) 
1104         return( lck_rw_sleep(lock
, LCK_SLEEP_EXCLUSIVE
, event
, interruptible
)); 
1111         lck_spin_lock(lock
); 
1115 usimple_lock_init_EXT( 
1117         __unused 
unsigned short tag
) 
1119         lck_spin_init(lock
, &LockCompatGroup
, LCK_ATTR_NULL
); 
1123 usimple_lock_try_EXT( 
1126         return(lck_spin_try_lock(lock
)); 
1133         lck_spin_unlock(lock
); 
1137 thread_sleep_usimple_lock_EXT( 
1140         wait_interrupt_t        interruptible
) 
1142         return( lck_spin_sleep(lock
, LCK_SLEEP_DEFAULT
, event
, interruptible
)); 
1146         __unused 
unsigned short         tag
)  
1148         return(lck_mtx_alloc_init(&LockCompatGroup
, LCK_ATTR_NULL
)); 
1155         lck_mtx_free(mutex
, &LockCompatGroup
);   
1161         __unused 
unsigned short tag
)  
1163         lck_mtx_init(mutex
, &LockCompatGroup
, LCK_ATTR_NULL
);    
1167 thread_sleep_mutex_EXT( 
1170         wait_interrupt_t        interruptible
) 
1172         return( lck_mtx_sleep(mutex
, LCK_SLEEP_DEFAULT
, event
, interruptible
)); 
1176 thread_sleep_mutex_deadline_EXT( 
1180         wait_interrupt_t        interruptible
) 
1182         return( lck_mtx_sleep_deadline(mutex
, LCK_SLEEP_DEFAULT
, event
, interruptible
, deadline
));