2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
52 #include <mach_ldebug.h>
55 #include <mach/kern_return.h>
56 #include <mach/mach_host_server.h>
57 #include <mach_debug/lockgroup_info.h>
59 #include <kern/locks.h>
60 #include <kern/misc_protos.h>
61 #include <kern/kalloc.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/sched_prim.h>
65 #include <kern/debug.h>
69 #include <sys/kdebug.h>
71 #define LCK_MTX_SLEEP_CODE 0
72 #define LCK_MTX_SLEEP_DEADLINE_CODE 1
73 #define LCK_MTX_LCK_WAIT_CODE 2
74 #define LCK_MTX_UNLCK_WAKEUP_CODE 3
77 static queue_head_t lck_grp_queue
;
78 static unsigned int lck_grp_cnt
;
80 decl_mutex_data(static,lck_grp_lock
)
82 lck_grp_attr_t LockDefaultGroupAttr
;
83 lck_grp_t LockCompatGroup
;
84 lck_attr_t LockDefaultLckAttr
;
87 * Routine: lck_mod_init
94 queue_init(&lck_grp_queue
);
95 mutex_init(&lck_grp_lock
, 0);
97 lck_grp_attr_setdefault( &LockDefaultGroupAttr
);
98 lck_grp_init( &LockCompatGroup
, "Compatibility APIs", LCK_GRP_ATTR_NULL
);
99 lck_attr_setdefault(&LockDefaultLckAttr
);
103 * Routine: lck_grp_attr_alloc_init
107 lck_grp_attr_alloc_init(
110 lck_grp_attr_t
*attr
;
112 if ((attr
= (lck_grp_attr_t
*)kalloc(sizeof(lck_grp_attr_t
))) != 0)
113 lck_grp_attr_setdefault(attr
);
120 * Routine: lck_grp_attr_setdefault
124 lck_grp_attr_setdefault(
125 lck_grp_attr_t
*attr
)
127 if (LcksOpts
& enaLkStat
)
128 attr
->grp_attr_val
= LCK_GRP_ATTR_STAT
;
130 attr
->grp_attr_val
= 0;
135 * Routine: lck_grp_attr_setstat
139 lck_grp_attr_setstat(
140 lck_grp_attr_t
*attr
)
142 (void)hw_atomic_or((uint32_t *)&attr
->grp_attr_val
, LCK_GRP_ATTR_STAT
);
147 * Routine: lck_grp_attr_free
152 lck_grp_attr_t
*attr
)
154 kfree(attr
, sizeof(lck_grp_attr_t
));
159 * Routine: lck_grp_alloc_init
164 const char* grp_name
,
165 lck_grp_attr_t
*attr
)
169 if ((grp
= (lck_grp_t
*)kalloc(sizeof(lck_grp_t
))) != 0)
170 lck_grp_init(grp
, grp_name
, attr
);
177 * Routine: lck_grp_init
183 const char* grp_name
,
184 lck_grp_attr_t
*attr
)
186 bzero((void *)grp
, sizeof(lck_grp_t
));
188 (void) strncpy(grp
->lck_grp_name
, grp_name
, LCK_GRP_MAX_NAME
);
190 if (attr
!= LCK_GRP_ATTR_NULL
)
191 grp
->lck_grp_attr
= attr
->grp_attr_val
;
192 else if (LcksOpts
& enaLkStat
)
193 grp
->lck_grp_attr
= LCK_GRP_ATTR_STAT
;
195 grp
->lck_grp_attr
= LCK_ATTR_NONE
;
197 grp
->lck_grp_refcnt
= 1;
199 mutex_lock(&lck_grp_lock
);
200 enqueue_tail(&lck_grp_queue
, (queue_entry_t
)grp
);
202 mutex_unlock(&lck_grp_lock
);
208 * Routine: lck_grp_free
215 mutex_lock(&lck_grp_lock
);
217 (void)remque((queue_entry_t
)grp
);
218 mutex_unlock(&lck_grp_lock
);
219 lck_grp_deallocate(grp
);
224 * Routine: lck_grp_reference
231 (void)hw_atomic_add((uint32_t *)(&grp
->lck_grp_refcnt
), 1);
236 * Routine: lck_grp_deallocate
243 if (hw_atomic_sub((uint32_t *)(&grp
->lck_grp_refcnt
), 1) == 0)
244 kfree(grp
, sizeof(lck_grp_t
));
248 * Routine: lck_grp_lckcnt_incr
256 unsigned int *lckcnt
;
260 lckcnt
= &grp
->lck_grp_spincnt
;
263 lckcnt
= &grp
->lck_grp_mtxcnt
;
266 lckcnt
= &grp
->lck_grp_rwcnt
;
269 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type
);
272 (void)hw_atomic_add((uint32_t *)lckcnt
, 1);
276 * Routine: lck_grp_lckcnt_decr
284 unsigned int *lckcnt
;
288 lckcnt
= &grp
->lck_grp_spincnt
;
291 lckcnt
= &grp
->lck_grp_mtxcnt
;
294 lckcnt
= &grp
->lck_grp_rwcnt
;
297 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type
);
300 (void)hw_atomic_sub((uint32_t *)lckcnt
, 1);
304 * Routine: lck_attr_alloc_init
313 if ((attr
= (lck_attr_t
*)kalloc(sizeof(lck_attr_t
))) != 0)
314 lck_attr_setdefault(attr
);
321 * Routine: lck_attr_setdefault
329 if (LcksOpts
& enaLkDeb
)
330 attr
->lck_attr_val
= LCK_ATTR_DEBUG
;
332 attr
->lck_attr_val
= LCK_ATTR_NONE
;
334 attr
->lck_attr_val
= LCK_ATTR_DEBUG
;
341 * Routine: lck_attr_setdebug
347 (void)hw_atomic_or((uint32_t *)&attr
->lck_attr_val
, LCK_ATTR_DEBUG
);
352 * Routine: lck_attr_free
358 kfree(attr
, sizeof(lck_attr_t
));
363 * Routine: lck_spin_sleep
368 lck_sleep_action_t lck_sleep_action
,
370 wait_interrupt_t interruptible
)
374 if ((lck_sleep_action
& ~LCK_SLEEP_MASK
) != 0)
375 panic("Invalid lock sleep action %x\n", lck_sleep_action
);
377 res
= assert_wait(event
, interruptible
);
378 if (res
== THREAD_WAITING
) {
379 lck_spin_unlock(lck
);
380 res
= thread_block(THREAD_CONTINUE_NULL
);
381 if (!(lck_sleep_action
& LCK_SLEEP_UNLOCK
))
385 if (lck_sleep_action
& LCK_SLEEP_UNLOCK
)
386 lck_spin_unlock(lck
);
393 * Routine: lck_spin_sleep_deadline
396 lck_spin_sleep_deadline(
398 lck_sleep_action_t lck_sleep_action
,
400 wait_interrupt_t interruptible
,
405 if ((lck_sleep_action
& ~LCK_SLEEP_MASK
) != 0)
406 panic("Invalid lock sleep action %x\n", lck_sleep_action
);
408 res
= assert_wait_deadline(event
, interruptible
, deadline
);
409 if (res
== THREAD_WAITING
) {
410 lck_spin_unlock(lck
);
411 res
= thread_block(THREAD_CONTINUE_NULL
);
412 if (!(lck_sleep_action
& LCK_SLEEP_UNLOCK
))
416 if (lck_sleep_action
& LCK_SLEEP_UNLOCK
)
417 lck_spin_unlock(lck
);
424 * Routine: lck_mtx_sleep
429 lck_sleep_action_t lck_sleep_action
,
431 wait_interrupt_t interruptible
)
435 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_CODE
) | DBG_FUNC_START
,
436 (int)lck
, (int)lck_sleep_action
, (int)event
, (int)interruptible
, 0);
438 if ((lck_sleep_action
& ~LCK_SLEEP_MASK
) != 0)
439 panic("Invalid lock sleep action %x\n", lck_sleep_action
);
441 res
= assert_wait(event
, interruptible
);
442 if (res
== THREAD_WAITING
) {
444 res
= thread_block(THREAD_CONTINUE_NULL
);
445 if (!(lck_sleep_action
& LCK_SLEEP_UNLOCK
))
449 if (lck_sleep_action
& LCK_SLEEP_UNLOCK
)
452 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_CODE
) | DBG_FUNC_END
, (int)res
, 0, 0, 0, 0);
459 * Routine: lck_mtx_sleep_deadline
462 lck_mtx_sleep_deadline(
464 lck_sleep_action_t lck_sleep_action
,
466 wait_interrupt_t interruptible
,
471 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_DEADLINE_CODE
) | DBG_FUNC_START
,
472 (int)lck
, (int)lck_sleep_action
, (int)event
, (int)interruptible
, 0);
474 if ((lck_sleep_action
& ~LCK_SLEEP_MASK
) != 0)
475 panic("Invalid lock sleep action %x\n", lck_sleep_action
);
477 res
= assert_wait_deadline(event
, interruptible
, deadline
);
478 if (res
== THREAD_WAITING
) {
480 res
= thread_block(THREAD_CONTINUE_NULL
);
481 if (!(lck_sleep_action
& LCK_SLEEP_UNLOCK
))
485 if (lck_sleep_action
& LCK_SLEEP_UNLOCK
)
488 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_SLEEP_DEADLINE_CODE
) | DBG_FUNC_END
, (int)res
, 0, 0, 0, 0);
494 * Routine: lck_mtx_lock_wait
496 * Invoked in order to wait on contention.
498 * Called with the interlock locked and
499 * returns it unlocked.
506 thread_t self
= current_thread();
509 spl_t s
= splsched();
511 if (lck
->lck_mtx_tag
!= LCK_MTX_TAG_INDIRECT
)
514 mutex
= &lck
->lck_mtx_ptr
->lck_mtx
;
516 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAIT_CODE
) | DBG_FUNC_START
, (int)lck
, (int)holder
, 0, 0, 0);
518 priority
= self
->sched_pri
;
519 if (priority
< self
->priority
)
520 priority
= self
->priority
;
521 if (priority
> MINPRI_KERNEL
)
522 priority
= MINPRI_KERNEL
;
524 if (priority
< BASEPRI_DEFAULT
)
525 priority
= BASEPRI_DEFAULT
;
528 if (mutex
->lck_mtx_pri
== 0)
529 holder
->promotions
++;
530 if (holder
->priority
< MINPRI_KERNEL
) {
531 holder
->sched_mode
|= TH_MODE_PROMOTED
;
532 if ( mutex
->lck_mtx_pri
< priority
&&
533 holder
->sched_pri
< priority
) {
534 KERNEL_DEBUG_CONSTANT(
535 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
,
536 holder
->sched_pri
, priority
, (int)holder
, (int)lck
, 0);
538 set_sched_pri(holder
, priority
);
541 thread_unlock(holder
);
544 if (mutex
->lck_mtx_pri
< priority
)
545 mutex
->lck_mtx_pri
= priority
;
546 if (self
->pending_promoter
[self
->pending_promoter_index
] == NULL
) {
547 self
->pending_promoter
[self
->pending_promoter_index
] = mutex
;
548 mutex
->lck_mtx_waiters
++;
551 if (self
->pending_promoter
[self
->pending_promoter_index
] != mutex
) {
552 self
->pending_promoter
[++self
->pending_promoter_index
] = mutex
;
553 mutex
->lck_mtx_waiters
++;
556 assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_mtx_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
557 lck_mtx_ilk_unlock(mutex
);
559 thread_block(THREAD_CONTINUE_NULL
);
561 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_LCK_WAIT_CODE
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
565 * Routine: lck_mtx_lock_acquire
567 * Invoked on acquiring the mutex when there is
570 * Returns the current number of waiters.
572 * Called with the interlock locked.
575 lck_mtx_lock_acquire(
578 thread_t thread
= current_thread();
581 if (lck
->lck_mtx_tag
!= LCK_MTX_TAG_INDIRECT
)
584 mutex
= &lck
->lck_mtx_ptr
->lck_mtx
;
586 if (thread
->pending_promoter
[thread
->pending_promoter_index
] == mutex
) {
587 thread
->pending_promoter
[thread
->pending_promoter_index
] = NULL
;
588 if (thread
->pending_promoter_index
> 0)
589 thread
->pending_promoter_index
--;
590 mutex
->lck_mtx_waiters
--;
593 if (mutex
->lck_mtx_waiters
> 0) {
594 integer_t priority
= mutex
->lck_mtx_pri
;
595 spl_t s
= splsched();
598 thread
->promotions
++;
599 if (thread
->priority
< MINPRI_KERNEL
) {
600 thread
->sched_mode
|= TH_MODE_PROMOTED
;
601 if (thread
->sched_pri
< priority
) {
602 KERNEL_DEBUG_CONSTANT(
603 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
,
604 thread
->sched_pri
, priority
, 0, (int)lck
, 0);
606 set_sched_pri(thread
, priority
);
609 thread_unlock(thread
);
613 mutex
->lck_mtx_pri
= 0;
615 return (mutex
->lck_mtx_waiters
);
619 * Routine: lck_mtx_unlock_wakeup
621 * Invoked on unlock when there is contention.
623 * Called with the interlock locked.
626 lck_mtx_unlock_wakeup (
630 thread_t thread
= current_thread();
633 if (lck
->lck_mtx_tag
!= LCK_MTX_TAG_INDIRECT
)
636 mutex
= &lck
->lck_mtx_ptr
->lck_mtx
;
639 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_UNLCK_WAKEUP_CODE
) | DBG_FUNC_START
, (int)lck
, (int)holder
, 0, 0, 0);
641 if (thread
!= holder
)
642 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex
, holder
);
644 if (thread
->promotions
> 0) {
645 spl_t s
= splsched();
648 if ( --thread
->promotions
== 0 &&
649 (thread
->sched_mode
& TH_MODE_PROMOTED
) ) {
650 thread
->sched_mode
&= ~TH_MODE_PROMOTED
;
651 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
652 KERNEL_DEBUG_CONSTANT(
653 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) | DBG_FUNC_NONE
,
654 thread
->sched_pri
, DEPRESSPRI
, 0, (int)lck
, 0);
656 set_sched_pri(thread
, DEPRESSPRI
);
659 if (thread
->priority
< thread
->sched_pri
) {
660 KERNEL_DEBUG_CONSTANT(
661 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) |
663 thread
->sched_pri
, thread
->priority
,
667 compute_priority(thread
, FALSE
);
670 thread_unlock(thread
);
673 assert(mutex
->lck_mtx_waiters
> 0);
674 thread_wakeup_one((event_t
)(((unsigned int*)lck
)+(sizeof(lck_mtx_t
)-1)/sizeof(unsigned int)));
676 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_MTX_UNLCK_WAKEUP_CODE
) | DBG_FUNC_END
, 0, 0, 0, 0, 0);
680 * Routine: mutex_pause
682 * Called by former callers of simple_lock_pause().
688 wait_result_t wait_result
;
690 wait_result
= assert_wait_timeout((event_t
)mutex_pause
, THREAD_UNINT
, 1, 1000*NSEC_PER_USEC
);
691 assert(wait_result
== THREAD_WAITING
);
693 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
694 assert(wait_result
== THREAD_TIMED_OUT
);
698 * Routine: lck_rw_sleep
703 lck_sleep_action_t lck_sleep_action
,
705 wait_interrupt_t interruptible
)
708 lck_rw_type_t lck_rw_type
;
710 if ((lck_sleep_action
& ~LCK_SLEEP_MASK
) != 0)
711 panic("Invalid lock sleep action %x\n", lck_sleep_action
);
713 res
= assert_wait(event
, interruptible
);
714 if (res
== THREAD_WAITING
) {
715 lck_rw_type
= lck_rw_done(lck
);
716 res
= thread_block(THREAD_CONTINUE_NULL
);
717 if (!(lck_sleep_action
& LCK_SLEEP_UNLOCK
)) {
718 if (!(lck_sleep_action
& (LCK_SLEEP_SHARED
|LCK_SLEEP_EXCLUSIVE
)))
719 lck_rw_lock(lck
, lck_rw_type
);
720 else if (lck_sleep_action
& LCK_SLEEP_EXCLUSIVE
)
721 lck_rw_lock_exclusive(lck
);
723 lck_rw_lock_shared(lck
);
727 if (lck_sleep_action
& LCK_SLEEP_UNLOCK
)
728 (void)lck_rw_done(lck
);
735 * Routine: lck_rw_sleep_deadline
738 lck_rw_sleep_deadline(
740 lck_sleep_action_t lck_sleep_action
,
742 wait_interrupt_t interruptible
,
746 lck_rw_type_t lck_rw_type
;
748 if ((lck_sleep_action
& ~LCK_SLEEP_MASK
) != 0)
749 panic("Invalid lock sleep action %x\n", lck_sleep_action
);
751 res
= assert_wait_deadline(event
, interruptible
, deadline
);
752 if (res
== THREAD_WAITING
) {
753 lck_rw_type
= lck_rw_done(lck
);
754 res
= thread_block(THREAD_CONTINUE_NULL
);
755 if (!(lck_sleep_action
& LCK_SLEEP_UNLOCK
)) {
756 if (!(lck_sleep_action
& (LCK_SLEEP_SHARED
|LCK_SLEEP_EXCLUSIVE
)))
757 lck_rw_lock(lck
, lck_rw_type
);
758 else if (lck_sleep_action
& LCK_SLEEP_EXCLUSIVE
)
759 lck_rw_lock_exclusive(lck
);
761 lck_rw_lock_shared(lck
);
765 if (lck_sleep_action
& LCK_SLEEP_UNLOCK
)
766 (void)lck_rw_done(lck
);
774 lockgroup_info_array_t
*lockgroup_infop
,
775 mach_msg_type_number_t
*lockgroup_infoCntp
)
777 lockgroup_info_t
*lockgroup_info_base
;
778 lockgroup_info_t
*lockgroup_info
;
779 vm_offset_t lockgroup_info_addr
;
780 vm_size_t lockgroup_info_size
;
787 if (host
== HOST_NULL
)
788 return KERN_INVALID_HOST
;
790 mutex_lock(&lck_grp_lock
);
792 lockgroup_info_size
= round_page(lck_grp_cnt
* sizeof *lockgroup_info
);
793 kr
= kmem_alloc_pageable(ipc_kernel_map
,
794 &lockgroup_info_addr
, lockgroup_info_size
);
795 if (kr
!= KERN_SUCCESS
) {
796 mutex_unlock(&lck_grp_lock
);
800 lockgroup_info_base
= (lockgroup_info_t
*) lockgroup_info_addr
;
801 lck_grp
= (lck_grp_t
*)queue_first(&lck_grp_queue
);
802 lockgroup_info
= lockgroup_info_base
;
804 for (i
= 0; i
< lck_grp_cnt
; i
++) {
806 lockgroup_info
->lock_spin_cnt
= lck_grp
->lck_grp_spincnt
;
807 lockgroup_info
->lock_spin_util_cnt
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_util_cnt
;
808 lockgroup_info
->lock_spin_held_cnt
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_held_cnt
;
809 lockgroup_info
->lock_spin_miss_cnt
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_miss_cnt
;
810 lockgroup_info
->lock_spin_held_max
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_held_max
;
811 lockgroup_info
->lock_spin_held_cum
= lck_grp
->lck_grp_stat
.lck_grp_spin_stat
.lck_grp_spin_held_cum
;
813 lockgroup_info
->lock_mtx_cnt
= lck_grp
->lck_grp_mtxcnt
;
814 lockgroup_info
->lock_mtx_util_cnt
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_util_cnt
;
815 lockgroup_info
->lock_mtx_held_cnt
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_held_cnt
;
816 lockgroup_info
->lock_mtx_miss_cnt
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_miss_cnt
;
817 lockgroup_info
->lock_mtx_wait_cnt
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_cnt
;
818 lockgroup_info
->lock_mtx_held_max
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_held_max
;
819 lockgroup_info
->lock_mtx_held_cum
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_held_cum
;
820 lockgroup_info
->lock_mtx_wait_max
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_max
;
821 lockgroup_info
->lock_mtx_wait_cum
= lck_grp
->lck_grp_stat
.lck_grp_mtx_stat
.lck_grp_mtx_wait_cum
;
823 lockgroup_info
->lock_rw_cnt
= lck_grp
->lck_grp_rwcnt
;
824 lockgroup_info
->lock_rw_util_cnt
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
;
825 lockgroup_info
->lock_rw_held_cnt
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_held_cnt
;
826 lockgroup_info
->lock_rw_miss_cnt
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
;
827 lockgroup_info
->lock_rw_wait_cnt
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
;
828 lockgroup_info
->lock_rw_held_max
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_held_max
;
829 lockgroup_info
->lock_rw_held_cum
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_held_cum
;
830 lockgroup_info
->lock_rw_wait_max
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_max
;
831 lockgroup_info
->lock_rw_wait_cum
= lck_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cum
;
833 (void) strncpy(lockgroup_info
->lockgroup_name
,lck_grp
->lck_grp_name
, LOCKGROUP_MAX_NAME
);
835 lck_grp
= (lck_grp_t
*)(queue_next((queue_entry_t
)(lck_grp
)));
839 *lockgroup_infoCntp
= lck_grp_cnt
;
840 mutex_unlock(&lck_grp_lock
);
842 used
= (*lockgroup_infoCntp
) * sizeof *lockgroup_info
;
844 if (used
!= lockgroup_info_size
)
845 bzero((char *) lockgroup_info
, lockgroup_info_size
- used
);
847 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)lockgroup_info_addr
,
848 (vm_map_size_t
)lockgroup_info_size
, TRUE
, ©
);
849 assert(kr
== KERN_SUCCESS
);
851 *lockgroup_infop
= (lockgroup_info_t
*) copy
;
853 return(KERN_SUCCESS
);
857 * Compatibility module
860 extern lck_rw_t
*lock_alloc_EXT( boolean_t can_sleep
, unsigned short tag0
, unsigned short tag1
);
861 extern void lock_done_EXT(lck_rw_t
*lock
);
862 extern void lock_free_EXT(lck_rw_t
*lock
);
863 extern void lock_init_EXT(lck_rw_t
*lock
, boolean_t can_sleep
, unsigned short tag0
, unsigned short tag1
);
864 extern void lock_read_EXT(lck_rw_t
*lock
);
865 extern boolean_t
lock_read_to_write_EXT(lck_rw_t
*lock
);
866 extern void lock_write_EXT(lck_rw_t
*lock
);
867 extern void lock_write_to_read_EXT(lck_rw_t
*lock
);
868 extern wait_result_t
thread_sleep_lock_write_EXT(
869 event_t event
, lck_rw_t
*lock
, wait_interrupt_t interruptible
);
871 extern lck_mtx_t
*mutex_alloc_EXT(unsigned short tag
);
872 extern void mutex_free_EXT(lck_mtx_t
*mutex
);
873 extern void mutex_init_EXT(lck_mtx_t
*mutex
, unsigned short tag
);
874 extern void mutex_lock_EXT(lck_mtx_t
*mutex
);
875 extern boolean_t
mutex_try_EXT(lck_mtx_t
*mutex
);
876 extern void mutex_unlock_EXT(lck_mtx_t
*mutex
);
877 extern wait_result_t
thread_sleep_mutex_EXT(
878 event_t event
, lck_mtx_t
*mutex
, wait_interrupt_t interruptible
);
879 extern wait_result_t
thread_sleep_mutex_deadline_EXT(
880 event_t event
, lck_mtx_t
*mutex
, uint64_t deadline
, wait_interrupt_t interruptible
);
882 extern void usimple_lock_EXT(lck_spin_t
*lock
);
883 extern void usimple_lock_init_EXT(lck_spin_t
*lock
, unsigned short tag
);
884 extern unsigned int usimple_lock_try_EXT(lck_spin_t
*lock
);
885 extern void usimple_unlock_EXT(lck_spin_t
*lock
);
886 extern wait_result_t
thread_sleep_usimple_lock_EXT(event_t event
, lck_spin_t
*lock
, wait_interrupt_t interruptible
);
890 __unused boolean_t can_sleep
,
891 __unused
unsigned short tag0
,
892 __unused
unsigned short tag1
)
894 return( lck_rw_alloc_init( &LockCompatGroup
, LCK_ATTR_NULL
));
901 (void) lck_rw_done(lock
);
908 lck_rw_free(lock
, &LockCompatGroup
);
914 __unused boolean_t can_sleep
,
915 __unused
unsigned short tag0
,
916 __unused
unsigned short tag1
)
918 lck_rw_init(lock
, &LockCompatGroup
, LCK_ATTR_NULL
);
925 lck_rw_lock_shared( lock
);
929 lock_read_to_write_EXT(
932 return( lck_rw_lock_shared_to_exclusive(lock
));
939 lck_rw_lock_exclusive(lock
);
943 lock_write_to_read_EXT(
946 lck_rw_lock_exclusive_to_shared(lock
);
950 thread_sleep_lock_write_EXT(
953 wait_interrupt_t interruptible
)
955 return( lck_rw_sleep(lock
, LCK_SLEEP_EXCLUSIVE
, event
, interruptible
));
960 __unused
unsigned short tag
)
962 return(lck_mtx_alloc_init(&LockCompatGroup
, LCK_ATTR_NULL
));
969 lck_mtx_free(mutex
, &LockCompatGroup
);
975 __unused
unsigned short tag
)
977 lck_mtx_init(mutex
, &LockCompatGroup
, LCK_ATTR_NULL
);
991 return(lck_mtx_try_lock(mutex
));
998 lck_mtx_unlock(mutex
);
1002 thread_sleep_mutex_EXT(
1005 wait_interrupt_t interruptible
)
1007 return( lck_mtx_sleep(mutex
, LCK_SLEEP_DEFAULT
, event
, interruptible
));
1011 thread_sleep_mutex_deadline_EXT(
1015 wait_interrupt_t interruptible
)
1017 return( lck_mtx_sleep_deadline(mutex
, LCK_SLEEP_DEFAULT
, event
, interruptible
, deadline
));
1024 lck_spin_lock(lock
);
1028 usimple_lock_init_EXT(
1030 __unused
unsigned short tag
)
1032 lck_spin_init(lock
, &LockCompatGroup
, LCK_ATTR_NULL
);
1036 usimple_lock_try_EXT(
1039 lck_spin_try_lock(lock
);
1046 lck_spin_unlock(lock
);
1050 thread_sleep_usimple_lock_EXT(
1053 wait_interrupt_t interruptible
)
1055 return( lck_spin_sleep(lock
, LCK_SLEEP_DEFAULT
, event
, interruptible
));