2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Locking primitives implementation
65 #include <mach_ldebug.h>
67 #include <kern/kalloc.h>
68 #include <kern/lock.h>
69 #include <kern/locks.h>
70 #include <kern/misc_protos.h>
71 #include <kern/thread.h>
72 #include <kern/processor.h>
73 #include <kern/sched_prim.h>
75 #include <kern/debug.h>
79 #include <ddb/db_command.h>
80 #include <ddb/db_output.h>
81 #include <ddb/db_sym.h>
82 #include <ddb/db_print.h>
86 #include <ppc/Firmware.h>
89 #include <sys/kdebug.h>
92 * We need only enough declarations from the BSD-side to be able to
93 * test if our probe is active, and to call __dtrace_probe(). Setting
94 * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
96 * Note that if CONFIG_DTRACE is off, the include file below stubs out
97 * the code hooks here.
100 #define NEED_DTRACE_DEFS
101 #include <../bsd/sys/lockstat.h>
104 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
105 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
106 #define LCK_RW_LCK_SHARED_CODE 0x102
107 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
108 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
109 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
112 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
114 unsigned int lock_wait_time
[2] = { (unsigned int)-1, 0 } ;
121 * Perform simple lock checks.
123 int uslock_check
= 1;
124 int max_lock_loops
= 100000000;
125 decl_simple_lock_data(extern , printf_lock
)
126 decl_simple_lock_data(extern , panic_lock
)
128 decl_simple_lock_data(extern , kdb_lock
)
129 #endif /* MACH_KDB */
130 #endif /* USLOCK_DEBUG */
134 * We often want to know the addresses of the callers
135 * of the various lock routines. However, this information
136 * is only used for debugging and statistics.
139 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
140 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
142 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
143 #else /* ANY_LOCK_DEBUG */
146 * Eliminate lint complaints about unused local pc variables.
148 #define OBTAIN_PC(pc,l) ++pc
150 #define OBTAIN_PC(pc,l)
152 #endif /* USLOCK_DEBUG */
156 * Portable lock package implementation of usimple_locks.
160 #define USLDBG(stmt) stmt
161 void usld_lock_init(usimple_lock_t
, unsigned short);
162 void usld_lock_pre(usimple_lock_t
, pc_t
);
163 void usld_lock_post(usimple_lock_t
, pc_t
);
164 void usld_unlock(usimple_lock_t
, pc_t
);
165 void usld_lock_try_pre(usimple_lock_t
, pc_t
);
166 void usld_lock_try_post(usimple_lock_t
, pc_t
);
167 int usld_lock_common_checks(usimple_lock_t
, const char *);
168 #else /* USLOCK_DEBUG */
170 #endif /* USLOCK_DEBUG */
173 * Routine: lck_spin_alloc_init
181 if ((lck
= (lck_spin_t
*)kalloc(sizeof(lck_spin_t
))) != 0)
182 lck_spin_init(lck
, grp
, attr
);
188 * Routine: lck_spin_free
194 lck_spin_destroy(lck
, grp
);
195 kfree((void *)lck
, sizeof(lck_spin_t
));
199 * Routine: lck_spin_init
205 __unused lck_attr_t
*attr
) {
208 lck_grp_reference(grp
);
209 lck_grp_lckcnt_incr(grp
, LCK_TYPE_SPIN
);
213 * Routine: lck_spin_destroy
219 if (lck
->interlock
== LCK_SPIN_TAG_DESTROYED
)
221 lck
->interlock
= LCK_SPIN_TAG_DESTROYED
;
222 lck_grp_lckcnt_decr(grp
, LCK_TYPE_SPIN
);
223 lck_grp_deallocate(grp
);
227 * Initialize a usimple_lock.
229 * No change in preemption state.
236 #ifndef MACHINE_SIMPLE_LOCK
237 USLDBG(usld_lock_init(l
, tag
));
238 hw_lock_init(&l
->interlock
);
240 simple_lock_init((simple_lock_t
)l
,tag
);
246 * Acquire a usimple_lock.
248 * Returns with preemption disabled. Note
249 * that the hw_lock routines are responsible for
250 * maintaining preemption state.
256 #ifndef MACHINE_SIMPLE_LOCK
260 USLDBG(usld_lock_pre(l
, pc
));
262 if(!hw_lock_to(&l
->interlock
, LockTimeOut
)) /* Try to get the lock with a timeout */
263 panic("simple lock deadlock detection - l=%p, cpu=%d, ret=%p", l
, cpu_number(), pc
);
265 USLDBG(usld_lock_post(l
, pc
));
267 simple_lock((simple_lock_t
)l
);
273 * Release a usimple_lock.
275 * Returns with preemption enabled. Note
276 * that the hw_lock routines are responsible for
277 * maintaining preemption state.
283 #ifndef MACHINE_SIMPLE_LOCK
287 USLDBG(usld_unlock(l
, pc
));
289 hw_lock_unlock(&l
->interlock
);
291 simple_unlock_rwmb((simple_lock_t
)l
);
297 * Conditionally acquire a usimple_lock.
299 * On success, returns with preemption disabled.
300 * On failure, returns with preemption in the same state
301 * as when first invoked. Note that the hw_lock routines
302 * are responsible for maintaining preemption state.
304 * XXX No stats are gathered on a miss; I preserved this
305 * behavior from the original assembly-language code, but
306 * doesn't it make sense to log misses? XXX
312 #ifndef MACHINE_SIMPLE_LOCK
314 unsigned int success
;
317 USLDBG(usld_lock_try_pre(l
, pc
));
318 success
= hw_lock_try(&l
->interlock
);
320 USLDBG(usld_lock_try_post(l
, pc
));
323 return(simple_lock_try((simple_lock_t
)l
));
329 * States of a usimple_lock. The default when initializing
330 * a usimple_lock is setting it up for debug checking.
332 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
333 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
334 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
335 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
336 #define USLOCK_CHECKING(l) (uslock_check && \
337 ((l)->debug.state & USLOCK_CHECKED))
340 * Trace activities of a particularly interesting lock.
342 void usl_trace(usimple_lock_t
, int, pc_t
, const char *);
346 * Initialize the debugging information contained
352 __unused
unsigned short tag
)
354 if (l
== USIMPLE_LOCK_NULL
)
355 panic("lock initialization: null lock pointer");
356 l
->lock_type
= USLOCK_TAG
;
357 l
->debug
.state
= uslock_check
? USLOCK_INITIALIZED
: 0;
358 l
->debug
.lock_cpu
= l
->debug
.unlock_cpu
= 0;
359 l
->debug
.lock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
360 l
->debug
.lock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
361 l
->debug
.duration
[0] = l
->debug
.duration
[1] = 0;
362 l
->debug
.unlock_cpu
= l
->debug
.unlock_cpu
= 0;
363 l
->debug
.unlock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
364 l
->debug
.unlock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
369 * These checks apply to all usimple_locks, not just
370 * those with USLOCK_CHECKED turned on.
373 usld_lock_common_checks(usimple_lock_t l
, const char *caller
)
375 if (l
== USIMPLE_LOCK_NULL
)
376 panic("%s: null lock pointer", caller
);
377 if (l
->lock_type
!= USLOCK_TAG
)
378 panic("%s: 0x%x is not a usimple lock", caller
, (integer_t
) l
);
379 if (!(l
->debug
.state
& USLOCK_INIT
))
380 panic("%s: 0x%x is not an initialized lock",
381 caller
, (integer_t
) l
);
382 return USLOCK_CHECKING(l
);
387 * Debug checks on a usimple_lock just before attempting
396 const char *caller
= "usimple_lock";
398 if (!usld_lock_common_checks(l
, caller
))
402 * Note that we have a weird case where we are getting a lock when we are]
403 * in the process of putting the system to sleep. We are running with no
404 * current threads, therefore we can't tell if we are trying to retake a lock
405 * we have or someone on the other processor has it. Therefore we just
406 * ignore this test if the locking thread is 0.
409 if ((l
->debug
.state
& USLOCK_TAKEN
) && l
->debug
.lock_thread
&&
410 l
->debug
.lock_thread
== (void *) current_thread()) {
411 printf("%s: lock 0x%x already locked (at %p) by",
412 caller
, (integer_t
) l
, l
->debug
.lock_pc
);
413 printf(" current thread %p (new attempt at pc %p)\n",
414 l
->debug
.lock_thread
, pc
);
417 mp_disable_preemption();
418 usl_trace(l
, cpu_number(), pc
, caller
);
419 mp_enable_preemption();
424 * Debug checks on a usimple_lock just after acquiring it.
426 * Pre-emption has been disabled at this point,
427 * so we are safe in using cpu_number.
435 const char *caller
= "successful usimple_lock";
438 if (!usld_lock_common_checks(l
, caller
))
441 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
442 panic("%s: lock 0x%x became uninitialized",
443 caller
, (integer_t
) l
);
444 if ((l
->debug
.state
& USLOCK_TAKEN
))
445 panic("%s: lock 0x%x became TAKEN by someone else",
446 caller
, (integer_t
) l
);
448 mycpu
= cpu_number();
449 l
->debug
.lock_thread
= (void *)current_thread();
450 l
->debug
.state
|= USLOCK_TAKEN
;
451 l
->debug
.lock_pc
= pc
;
452 l
->debug
.lock_cpu
= mycpu
;
454 usl_trace(l
, mycpu
, pc
, caller
);
459 * Debug checks on a usimple_lock just before
460 * releasing it. Note that the caller has not
461 * yet released the hardware lock.
463 * Preemption is still disabled, so there's
464 * no problem using cpu_number.
472 const char *caller
= "usimple_unlock";
475 if (!usld_lock_common_checks(l
, caller
))
478 mycpu
= cpu_number();
480 if (!(l
->debug
.state
& USLOCK_TAKEN
))
481 panic("%s: lock 0x%x hasn't been taken",
482 caller
, (integer_t
) l
);
483 if (l
->debug
.lock_thread
!= (void *) current_thread())
484 panic("%s: unlocking lock 0x%x, owned by thread %p",
485 caller
, (integer_t
) l
, l
->debug
.lock_thread
);
486 if (l
->debug
.lock_cpu
!= mycpu
) {
487 printf("%s: unlocking lock 0x%x on cpu 0x%x",
488 caller
, (integer_t
) l
, mycpu
);
489 printf(" (acquired on cpu 0x%x)\n", l
->debug
.lock_cpu
);
492 usl_trace(l
, mycpu
, pc
, caller
);
494 l
->debug
.unlock_thread
= l
->debug
.lock_thread
;
495 l
->debug
.lock_thread
= INVALID_PC
;
496 l
->debug
.state
&= ~USLOCK_TAKEN
;
497 l
->debug
.unlock_pc
= pc
;
498 l
->debug
.unlock_cpu
= mycpu
;
503 * Debug checks on a usimple_lock just before
504 * attempting to acquire it.
506 * Preemption isn't guaranteed to be disabled.
513 const char *caller
= "usimple_lock_try";
515 if (!usld_lock_common_checks(l
, caller
))
517 mp_disable_preemption();
518 usl_trace(l
, cpu_number(), pc
, caller
);
519 mp_enable_preemption();
524 * Debug checks on a usimple_lock just after
525 * successfully attempting to acquire it.
527 * Preemption has been disabled by the
528 * lock acquisition attempt, so it's safe
537 const char *caller
= "successful usimple_lock_try";
539 if (!usld_lock_common_checks(l
, caller
))
542 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
543 panic("%s: lock 0x%x became uninitialized",
544 caller
, (integer_t
) l
);
545 if ((l
->debug
.state
& USLOCK_TAKEN
))
546 panic("%s: lock 0x%x became TAKEN by someone else",
547 caller
, (integer_t
) l
);
549 mycpu
= cpu_number();
550 l
->debug
.lock_thread
= (void *) current_thread();
551 l
->debug
.state
|= USLOCK_TAKEN
;
552 l
->debug
.lock_pc
= pc
;
553 l
->debug
.lock_cpu
= mycpu
;
555 usl_trace(l
, mycpu
, pc
, caller
);
560 * For very special cases, set traced_lock to point to a
561 * specific lock of interest. The result is a series of
562 * XPRs showing lock operations on that lock. The lock_seq
563 * value is used to show the order of those operations.
565 usimple_lock_t traced_lock
;
566 unsigned int lock_seq
;
573 const char * op_name
)
575 if (traced_lock
== l
) {
577 "seq %d, cpu %d, %s @ %x\n",
578 (integer_t
) lock_seq
, (integer_t
) mycpu
,
579 (integer_t
) op_name
, (integer_t
) pc
, 0);
585 #endif /* USLOCK_DEBUG */
588 * The C portion of the shared/exclusive locks package.
595 void lck_rw_lock_exclusive_gen(
598 lck_rw_type_t
lck_rw_done_gen(
602 lck_rw_lock_shared_gen(
606 lck_rw_lock_shared_to_exclusive_gen(
610 lck_rw_lock_exclusive_to_shared_gen(
614 lck_rw_try_lock_exclusive_gen(
618 lck_rw_try_lock_shared_gen(
621 void lck_rw_ext_init(
626 void lck_rw_ext_backtrace(
629 void lck_rw_lock_exclusive_ext(
633 lck_rw_type_t
lck_rw_done_ext(
638 lck_rw_lock_shared_ext(
643 lck_rw_lock_shared_to_exclusive_ext(
648 lck_rw_lock_exclusive_to_shared_ext(
653 lck_rw_try_lock_exclusive_ext(
658 lck_rw_try_lock_shared_ext(
682 * Routine: lock_alloc
684 * Allocate a lock for external users who cannot
685 * hard-code the structure definition into their
687 * For now just use kalloc, but a zone is probably
693 __unused
unsigned short tag
,
694 __unused
unsigned short tag1
)
698 if ((lck
= (lock_t
*)kalloc(sizeof(lock_t
))) != 0)
699 lock_init(lck
, can_sleep
, tag
, tag1
);
706 * Initialize a lock; required before use.
707 * Note that clients declare the "struct lock"
708 * variables and then initialize them, rather
709 * than getting a new one from this module.
715 __unused
unsigned short tag
,
716 __unused
unsigned short tag1
)
719 panic("lock_init: sleep mode must be set to TRUE\n");
721 (void) memset((void *) lck
, 0, sizeof(lock_t
));
723 lck
->lck_rw_deb
.type
= RW_TAG
;
724 lck
->lck_rw_attr
|= (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_THREAD
|LCK_RW_ATTR_DIS_MYLOCK
);
725 lck
->lck_rw
.lck_rw_priv_excl
= TRUE
;
727 lck
->lck_rw_priv_excl
= TRUE
;
736 * Free a lock allocated for external users.
737 * For now just use kfree, but a zone is probably
744 kfree((void *)lck
, sizeof(lock_t
));
752 lck_rw_lock_exclusive_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
759 (void)lck_rw_done_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
766 lck_rw_lock_shared_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
773 return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
));
778 register lock_t
*lck
)
780 lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
785 * Routine: lck_rw_alloc_init
793 if ((lck
= (lck_rw_t
*)kalloc(sizeof(lck_rw_t
))) != 0)
794 lck_rw_init(lck
, grp
, attr
);
800 * Routine: lck_rw_free
806 lck_rw_destroy(lck
, grp
);
807 kfree((void *)lck
, sizeof(lck_rw_t
));
811 * Routine: lck_rw_init
818 lck_rw_ext_t
*lck_ext
;
819 lck_attr_t
*lck_attr
;
821 if (attr
!= LCK_ATTR_NULL
)
824 lck_attr
= &LockDefaultLckAttr
;
826 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
827 if ((lck_ext
= (lck_rw_ext_t
*)kalloc(sizeof(lck_rw_ext_t
))) != 0) {
828 lck_rw_ext_init(lck_ext
, grp
, lck_attr
);
829 lck
->lck_rw_tag
= LCK_RW_TAG_INDIRECT
;
830 lck
->lck_rw_ptr
= lck_ext
;
833 (void) memset((void *) lck
, 0, sizeof(lck_rw_t
));
834 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_RW_SHARED_PRIORITY
)
835 lck
->lck_rw_priv_excl
= FALSE
;
837 lck
->lck_rw_priv_excl
= TRUE
;
840 lck_grp_reference(grp
);
841 lck_grp_lckcnt_incr(grp
, LCK_TYPE_RW
);
845 * Routine: lck_rw_ext_init
853 bzero((void *)lck
, sizeof(lck_rw_ext_t
));
854 if ((attr
->lck_attr_val
) & LCK_ATTR_RW_SHARED_PRIORITY
)
855 lck
->lck_rw
.lck_rw_priv_excl
= FALSE
;
857 lck
->lck_rw
.lck_rw_priv_excl
= TRUE
;
859 if ((attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
860 lck
->lck_rw_deb
.type
= RW_TAG
;
861 lck
->lck_rw_attr
|= LCK_RW_ATTR_DEBUG
;
864 lck
->lck_rw_grp
= grp
;
866 if (grp
->lck_grp_attr
& LCK_GRP_ATTR_STAT
)
867 lck
->lck_rw_attr
|= LCK_RW_ATTR_STAT
;
871 * Routine: lck_rw_destroy
877 boolean_t lck_is_indirect
;
879 if (lck
->lck_rw_tag
== LCK_RW_TAG_DESTROYED
)
881 lck_is_indirect
= (lck
->lck_rw_tag
== LCK_RW_TAG_INDIRECT
);
882 lck
->lck_rw_tag
= LCK_RW_TAG_DESTROYED
;
884 kfree((void *)lck
->lck_rw_ptr
, sizeof(lck_rw_ext_t
));
886 lck_grp_lckcnt_decr(grp
, LCK_TYPE_RW
);
887 lck_grp_deallocate(grp
);
892 * Routine: lck_rw_lock
897 lck_rw_type_t lck_rw_type
)
899 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
900 lck_rw_lock_shared(lck
);
901 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
902 lck_rw_lock_exclusive(lck
);
904 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type
);
909 * Routine: lck_rw_unlock
914 lck_rw_type_t lck_rw_type
)
916 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
917 lck_rw_unlock_shared(lck
);
918 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
919 lck_rw_unlock_exclusive(lck
);
921 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type
);
926 * Routine: lck_rw_unlock_shared
929 lck_rw_unlock_shared(
934 ret
= lck_rw_done(lck
);
936 if (ret
!= LCK_RW_TYPE_SHARED
)
937 panic("lck_rw_unlock(): lock held in mode: %d\n", ret
);
942 * Routine: lck_rw_unlock_exclusive
945 lck_rw_unlock_exclusive(
950 ret
= lck_rw_done(lck
);
952 if (ret
!= LCK_RW_TYPE_EXCLUSIVE
)
953 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret
);
958 * Routine: lck_rw_try_lock
963 lck_rw_type_t lck_rw_type
)
965 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
966 return(lck_rw_try_lock_shared(lck
));
967 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
968 return(lck_rw_try_lock_exclusive(lck
));
970 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type
);
977 * Routine: lck_rw_lock_exclusive_gen
980 lck_rw_lock_exclusive_gen(
986 uint64_t wait_interval
= 0;
988 int readers_at_sleep
;
991 lck_rw_ilk_lock(lck
);
993 readers_at_sleep
= lck
->lck_rw_shared_cnt
;
997 * Try to acquire the lck_rw_want_excl bit.
999 while (lck
->lck_rw_want_excl
) {
1000 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_START
, (int)lck
, 0, 0, 0, 0);
1003 if ((lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_BLOCK
]) && wait_interval
== 0) {
1004 wait_interval
= mach_absolute_time();
1010 i
= lock_wait_time
[1];
1012 lck_rw_ilk_unlock(lck
);
1013 while (--i
!= 0 && lck
->lck_rw_want_excl
)
1015 lck_rw_ilk_lock(lck
);
1018 if (lck
->lck_rw_want_excl
) {
1019 lck
->lck_rw_waiting
= TRUE
;
1020 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1021 if (res
== THREAD_WAITING
) {
1022 lck_rw_ilk_unlock(lck
);
1023 res
= thread_block(THREAD_CONTINUE_NULL
);
1027 lck_rw_ilk_lock(lck
);
1030 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_END
, (int)lck
, res
, 0, 0, 0);
1032 lck
->lck_rw_want_excl
= TRUE
;
1034 /* Wait for readers (and upgrades) to finish */
1036 while ((lck
->lck_rw_shared_cnt
!= 0) || lck
->lck_rw_want_upgrade
) {
1038 i
= lock_wait_time
[1];
1040 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_START
,
1041 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, i
, 0);
1043 if ((lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_BLOCK
]) && wait_interval
== 0) {
1044 wait_interval
= mach_absolute_time();
1046 wait_interval
= (unsigned) -1;
1051 lck_rw_ilk_unlock(lck
);
1052 while (--i
!= 0 && (lck
->lck_rw_shared_cnt
!= 0 ||
1053 lck
->lck_rw_want_upgrade
))
1055 lck_rw_ilk_lock(lck
);
1058 if (lck
->lck_rw_shared_cnt
!= 0 || lck
->lck_rw_want_upgrade
) {
1059 lck
->lck_rw_waiting
= TRUE
;
1060 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1061 if (res
== THREAD_WAITING
) {
1062 lck_rw_ilk_unlock(lck
);
1063 res
= thread_block(THREAD_CONTINUE_NULL
);
1067 lck_rw_ilk_lock(lck
);
1070 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_END
,
1071 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, res
, 0);
1074 lck_rw_ilk_unlock(lck
);
1077 * Decide what latencies we suffered that are Dtrace events.
1078 * If we have set wait_interval, then we either spun or slept.
1079 * At least we get out from under the interlock before we record
1080 * which is the best we can do here to minimize the impact
1083 if (wait_interval
!= 0 && wait_interval
!= (unsigned) -1) {
1085 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN
, lck
,
1086 mach_absolute_time() - wait_interval
, 1);
1089 * For the blocking case, we also record if when we blocked
1090 * it was held for read or write, and how many readers.
1091 * Notice that above we recorded this before we dropped
1092 * the interlock so the count is accurate.
1094 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK
, lck
,
1095 mach_absolute_time() - wait_interval
, 1,
1096 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1099 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE
, lck
, 1);
1105 * Routine: lck_rw_done_gen
1111 boolean_t do_wakeup
= FALSE
;
1112 lck_rw_type_t lck_rw_type
;
1115 lck_rw_ilk_lock(lck
);
1117 if (lck
->lck_rw_shared_cnt
!= 0) {
1118 lck_rw_type
= LCK_RW_TYPE_SHARED
;
1119 lck
->lck_rw_shared_cnt
--;
1122 lck_rw_type
= LCK_RW_TYPE_EXCLUSIVE
;
1123 if (lck
->lck_rw_want_upgrade
)
1124 lck
->lck_rw_want_upgrade
= FALSE
;
1126 lck
->lck_rw_want_excl
= FALSE
;
1130 * There is no reason to wakeup a lck_rw_waiting thread
1131 * if the read-count is non-zero. Consider:
1132 * we must be dropping a read lock
1133 * threads are waiting only if one wants a write lock
1134 * if there are still readers, they can't proceed
1137 if (lck
->lck_rw_waiting
&& (lck
->lck_rw_shared_cnt
== 0)) {
1138 lck
->lck_rw_waiting
= FALSE
;
1142 lck_rw_ilk_unlock(lck
);
1145 thread_wakeup((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1146 LOCKSTAT_RECORD(LS_LCK_RW_DONE_RELEASE
, lck
, lck_rw_type
);
1147 return(lck_rw_type
);
1152 * Routine: lck_rw_lock_shared_gen
1155 lck_rw_lock_shared_gen(
1161 uint64_t wait_interval
= 0;
1163 int readers_at_sleep
;
1166 lck_rw_ilk_lock(lck
);
1168 readers_at_sleep
= lck
->lck_rw_shared_cnt
;
1171 while ((lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) &&
1172 ((lck
->lck_rw_shared_cnt
== 0) || (lck
->lck_rw_priv_excl
))) {
1173 i
= lock_wait_time
[1];
1175 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_START
,
1176 (int)lck
, lck
->lck_rw_want_excl
, lck
->lck_rw_want_upgrade
, i
, 0);
1178 if ((lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_BLOCK
]) && wait_interval
== 0) {
1179 wait_interval
= mach_absolute_time();
1181 wait_interval
= (unsigned) -1;
1186 lck_rw_ilk_unlock(lck
);
1188 (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) &&
1189 ((lck
->lck_rw_shared_cnt
== 0) || (lck
->lck_rw_priv_excl
)))
1191 lck_rw_ilk_lock(lck
);
1194 if ((lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) &&
1195 ((lck
->lck_rw_shared_cnt
== 0) || (lck
->lck_rw_priv_excl
))) {
1196 lck
->lck_rw_waiting
= TRUE
;
1197 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1198 if (res
== THREAD_WAITING
) {
1199 lck_rw_ilk_unlock(lck
);
1200 res
= thread_block(THREAD_CONTINUE_NULL
);
1204 lck_rw_ilk_lock(lck
);
1207 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_END
,
1208 (int)lck
, lck
->lck_rw_want_excl
, lck
->lck_rw_want_upgrade
, res
, 0);
1211 lck
->lck_rw_shared_cnt
++;
1213 lck_rw_ilk_unlock(lck
);
1215 if (wait_interval
!= 0 && wait_interval
!= (unsigned) -1) {
1217 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN
, lck
, mach_absolute_time() - wait_interval
, 0);
1219 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK
, lck
,
1220 mach_absolute_time() - wait_interval
, 0,
1221 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1224 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE
, lck
, 0);
1230 * Routine: lck_rw_lock_shared_to_exclusive_gen
1232 * Improves a read-only lock to one with
1233 * write permission. If another reader has
1234 * already requested an upgrade to a write lock,
1235 * no lock is held upon return.
1237 * Returns FALSE if the upgrade *failed*.
1241 lck_rw_lock_shared_to_exclusive_gen(
1245 boolean_t do_wakeup
= FALSE
;
1248 uint64_t wait_interval
= 0;
1250 int readers_at_sleep
= 0;
1253 lck_rw_ilk_lock(lck
);
1255 lck
->lck_rw_shared_cnt
--;
1257 if (lck
->lck_rw_want_upgrade
) {
1258 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_START
,
1259 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, 0, 0);
1262 * Someone else has requested upgrade.
1263 * Since we've released a read lock, wake
1266 if (lck
->lck_rw_waiting
&& (lck
->lck_rw_shared_cnt
== 0)) {
1267 lck
->lck_rw_waiting
= FALSE
;
1271 lck_rw_ilk_unlock(lck
);
1274 thread_wakeup((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1276 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_END
,
1277 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, 0, 0);
1282 lck
->lck_rw_want_upgrade
= TRUE
;
1284 while (lck
->lck_rw_shared_cnt
!= 0) {
1285 i
= lock_wait_time
[1];
1287 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_START
,
1288 (int)lck
, lck
->lck_rw_shared_cnt
, i
, 0, 0);
1291 readers_at_sleep
= lck
->lck_rw_shared_cnt
;
1292 if ((lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
]) && wait_interval
== 0) {
1293 wait_interval
= mach_absolute_time();
1295 wait_interval
= (unsigned) -1;
1299 lck_rw_ilk_unlock(lck
);
1300 while (--i
!= 0 && lck
->lck_rw_shared_cnt
!= 0)
1302 lck_rw_ilk_lock(lck
);
1305 if (lck
->lck_rw_shared_cnt
!= 0) {
1306 lck
->lck_rw_waiting
= TRUE
;
1307 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1308 if (res
== THREAD_WAITING
) {
1309 lck_rw_ilk_unlock(lck
);
1310 res
= thread_block(THREAD_CONTINUE_NULL
);
1314 lck_rw_ilk_lock(lck
);
1317 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_END
,
1318 (int)lck
, lck
->lck_rw_shared_cnt
, 0, 0, 0);
1321 lck_rw_ilk_unlock(lck
);
1325 * We infer if we took a sleep or spin path by whether readers_at_sleep
1328 if (wait_interval
!= 0 && wait_interval
!= (unsigned) -1 && readers_at_sleep
) {
1330 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, lck
, mach_absolute_time() - wait_interval
, 0);
1332 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, lck
,
1333 mach_absolute_time() - wait_interval
, 1,
1334 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1339 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, lck
, 1);
1344 * Routine: lck_rw_lock_exclusive_to_shared_gen
1347 lck_rw_lock_exclusive_to_shared_gen(
1350 boolean_t do_wakeup
= FALSE
;
1352 lck_rw_ilk_lock(lck
);
1354 lck
->lck_rw_shared_cnt
++;
1355 if (lck
->lck_rw_want_upgrade
)
1356 lck
->lck_rw_want_upgrade
= FALSE
;
1358 lck
->lck_rw_want_excl
= FALSE
;
1360 if (lck
->lck_rw_waiting
) {
1361 lck
->lck_rw_waiting
= FALSE
;
1365 lck_rw_ilk_unlock(lck
);
1368 thread_wakeup((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1370 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, lck
, 0);
1375 * Routine: lck_rw_try_lock_exclusive_gen
1377 * Tries to get a write lock.
1379 * Returns FALSE if the lock is not held on return.
1383 lck_rw_try_lock_exclusive_gen(
1386 lck_rw_ilk_lock(lck
);
1388 if (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
|| lck
->lck_rw_shared_cnt
) {
1392 lck_rw_ilk_unlock(lck
);
1400 lck
->lck_rw_want_excl
= TRUE
;
1402 lck_rw_ilk_unlock(lck
);
1404 LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, lck
, 1);
1409 * Routine: lck_rw_try_lock_shared_gen
1411 * Tries to get a read lock.
1413 * Returns FALSE if the lock is not held on return.
1417 lck_rw_try_lock_shared_gen(
1420 lck_rw_ilk_lock(lck
);
1422 if ((lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) &&
1423 ((lck
->lck_rw_shared_cnt
== 0) || (lck
->lck_rw_priv_excl
))) {
1424 lck_rw_ilk_unlock(lck
);
1428 lck
->lck_rw_shared_cnt
++;
1430 lck_rw_ilk_unlock(lck
);
1432 LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, lck
, 0);
1438 * Routine: lck_rw_ext_backtrace
1441 lck_rw_ext_backtrace(
1444 unsigned int *stackptr
, *stackptr_prev
;
1447 __asm__
volatile("mr %0,r1" : "=r" (stackptr
));
1449 while (frame
< LCK_FRAMES_MAX
) {
1450 stackptr_prev
= stackptr
;
1451 stackptr
= ( unsigned int *)*stackptr
;
1452 if ( (((unsigned int)stackptr_prev
) ^ ((unsigned int)stackptr
)) > 8192)
1454 lck
->lck_rw_deb
.stack
[frame
] = *(stackptr
+2);
1457 while (frame
< LCK_FRAMES_MAX
) {
1458 lck
->lck_rw_deb
.stack
[frame
] = 0;
1465 * Routine: lck_rw_lock_exclusive_ext
1468 lck_rw_lock_exclusive_ext(
1474 boolean_t lock_miss
= FALSE
;
1475 boolean_t lock_wait
= FALSE
;
1476 boolean_t lock_stat
;
1478 uint64_t wait_interval
= 0;
1480 int readers_at_sleep
;
1483 lck_rw_check_type(lck
, rlck
);
1485 if ( ((lck
->lck_rw_attr
& (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_MYLOCK
)) == LCK_RW_ATTR_DEBUG
)
1486 && (lck
->lck_rw_deb
.thread
== current_thread()))
1487 panic("rw lock (%p) recursive lock attempt\n", rlck
);
1489 lck_rw_ilk_lock(&lck
->lck_rw
);
1491 readers_at_sleep
= lck
->lck_rw
.lck_rw_shared_cnt
;
1494 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1497 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1500 * Try to acquire the lck_rw.lck_rw_want_excl bit.
1502 while (lck
->lck_rw
.lck_rw_want_excl
) {
1503 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_START
, (int)rlck
, 0, 0, 0, 0);
1505 if (lock_stat
&& !lock_miss
) {
1507 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1510 if ((lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_BLOCK
]) && wait_interval
== 0) {
1511 wait_interval
= mach_absolute_time();
1513 wait_interval
= (unsigned) -1;
1517 i
= lock_wait_time
[1];
1519 lck_rw_ilk_unlock(&lck
->lck_rw
);
1520 while (--i
!= 0 && lck
->lck_rw
.lck_rw_want_excl
)
1522 lck_rw_ilk_lock(&lck
->lck_rw
);
1525 if (lck
->lck_rw
.lck_rw_want_excl
) {
1526 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1527 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1528 if (res
== THREAD_WAITING
) {
1529 if (lock_stat
&& !lock_wait
) {
1531 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1533 lck_rw_ilk_unlock(&lck
->lck_rw
);
1534 res
= thread_block(THREAD_CONTINUE_NULL
);
1538 lck_rw_ilk_lock(&lck
->lck_rw
);
1541 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_END
, (int)rlck
, res
, 0, 0, 0);
1543 lck
->lck_rw
.lck_rw_want_excl
= TRUE
;
1545 /* Wait for readers (and upgrades) to finish */
1547 while ((lck
->lck_rw
.lck_rw_shared_cnt
!= 0) || lck
->lck_rw
.lck_rw_want_upgrade
) {
1548 i
= lock_wait_time
[1];
1550 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_START
,
1551 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, i
, 0);
1553 if ((lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_EXCL_BLOCK
]) && wait_interval
== 0) {
1554 wait_interval
= mach_absolute_time();
1556 wait_interval
= (unsigned) -1;
1560 if (lock_stat
&& !lock_miss
) {
1562 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1566 lck_rw_ilk_unlock(&lck
->lck_rw
);
1567 while (--i
!= 0 && (lck
->lck_rw
.lck_rw_shared_cnt
!= 0 ||
1568 lck
->lck_rw
.lck_rw_want_upgrade
))
1570 lck_rw_ilk_lock(&lck
->lck_rw
);
1573 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0 || lck
->lck_rw
.lck_rw_want_upgrade
) {
1574 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1575 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1576 if (res
== THREAD_WAITING
) {
1577 if (lock_stat
&& !lock_wait
) {
1579 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1581 lck_rw_ilk_unlock(&lck
->lck_rw
);
1582 res
= thread_block(THREAD_CONTINUE_NULL
);
1586 lck_rw_ilk_lock(&lck
->lck_rw
);
1589 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_END
,
1590 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, res
, 0);
1593 lck
->lck_rw_deb
.pc_excl
= __builtin_return_address(0);
1594 if (LcksOpts
& enaLkExtStck
)
1595 lck_rw_ext_backtrace(lck
);
1596 lck
->lck_rw_deb
.thread
= current_thread();
1598 lck_rw_ilk_unlock(&lck
->lck_rw
);
1601 * Decide what latencies we suffered that are Dtrace events.
1602 * If we have set wait_interval, then we either spun or slept.
1603 * At least we get out from under the interlock before we record
1604 * which is the best we can do here to minimize the impact
1607 if (wait_interval
!= 0 && wait_interval
!= (unsigned) -1) {
1609 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN
, lck
,
1610 mach_absolute_time() - wait_interval
, 1);
1613 * For the blocking case, we also record if when we blocked
1614 * it was held for read or write, and how many readers.
1615 * Notice that above we recorded this before we dropped
1616 * the interlock so the count is accurate.
1618 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK
, lck
,
1619 mach_absolute_time() - wait_interval
, 1,
1620 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1623 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE
, lck
, 1);
1629 * Routine: lck_rw_done_ext
1636 boolean_t do_wakeup
= FALSE
;
1637 lck_rw_type_t lck_rw_type
;
1640 lck_rw_check_type(lck
, rlck
);
1642 lck_rw_ilk_lock(&lck
->lck_rw
);
1644 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
1645 lck_rw_type
= LCK_RW_TYPE_SHARED
;
1646 lck
->lck_rw
.lck_rw_shared_cnt
--;
1649 lck_rw_type
= LCK_RW_TYPE_EXCLUSIVE
;
1650 if (lck
->lck_rw
.lck_rw_want_upgrade
)
1651 lck
->lck_rw
.lck_rw_want_upgrade
= FALSE
;
1652 else if (lck
->lck_rw
.lck_rw_want_excl
)
1653 lck
->lck_rw
.lck_rw_want_excl
= FALSE
;
1655 panic("rw lock (%p) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1656 rlck
, lck
->lck_rw
.lck_rw_tag
);
1657 if (lck
->lck_rw_deb
.thread
== THREAD_NULL
)
1658 panic("rw lock (%p) not held\n",
1660 else if ( ((lck
->lck_rw_attr
& (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_THREAD
)) == LCK_RW_ATTR_DEBUG
)
1661 && (lck
->lck_rw_deb
.thread
!= current_thread()))
1662 panic("rw lock (%p) unlocked by non-owner(%p), current owner(%p)\n",
1663 rlck
, current_thread(), lck
->lck_rw_deb
.thread
);
1664 lck
->lck_rw_deb
.thread
= THREAD_NULL
;
1667 if (lck
->lck_rw_attr
& LCK_RW_ATTR_DEBUG
)
1668 lck
->lck_rw_deb
.pc_done
= __builtin_return_address(0);
1671 * There is no reason to wakeup a waiting thread
1672 * if the read-count is non-zero. Consider:
1673 * we must be dropping a read lock
1674 * threads are waiting only if one wants a write lock
1675 * if there are still readers, they can't proceed
1678 if (lck
->lck_rw
.lck_rw_waiting
&& (lck
->lck_rw
.lck_rw_shared_cnt
== 0)) {
1679 lck
->lck_rw
.lck_rw_waiting
= FALSE
;
1683 lck_rw_ilk_unlock(&lck
->lck_rw
);
1686 thread_wakeup((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1687 LOCKSTAT_RECORD(LS_LCK_RW_DONE_RELEASE
, lck
, lck_rw_type
);
1688 return(lck_rw_type
);
1693 * Routine: lck_rw_lock_shared_ext
1696 lck_rw_lock_shared_ext(
1702 boolean_t lock_miss
= FALSE
;
1703 boolean_t lock_wait
= FALSE
;
1704 boolean_t lock_stat
;
1706 uint64_t wait_interval
= 0;
1708 int readers_at_sleep
;
1711 lck_rw_check_type(lck
, rlck
);
1713 lck_rw_ilk_lock(&lck
->lck_rw
);
1715 readers_at_sleep
= lck
->lck_rw
.lck_rw_shared_cnt
;
1718 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1721 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1723 while ((lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) &&
1724 ((lck
->lck_rw
.lck_rw_shared_cnt
== 0) || (lck
->lck_rw
.lck_rw_priv_excl
))) {
1725 i
= lock_wait_time
[1];
1727 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_START
,
1728 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, i
, 0);
1730 if ((lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_BLOCK
]) && wait_interval
== 0) {
1731 wait_interval
= mach_absolute_time();
1733 wait_interval
= (unsigned) -1;
1737 if (lock_stat
&& !lock_miss
) {
1739 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1743 lck_rw_ilk_unlock(&lck
->lck_rw
);
1745 (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) &&
1746 ((lck
->lck_rw
.lck_rw_shared_cnt
== 0) || (lck
->lck_rw
.lck_rw_priv_excl
)))
1748 lck_rw_ilk_lock(&lck
->lck_rw
);
1751 if ((lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) &&
1752 ((lck
->lck_rw
.lck_rw_shared_cnt
== 0) || (lck
->lck_rw
.lck_rw_priv_excl
))) {
1753 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1754 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1755 if (res
== THREAD_WAITING
) {
1756 if (lock_stat
&& !lock_wait
) {
1758 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1760 lck_rw_ilk_unlock(&lck
->lck_rw
);
1761 res
= thread_block(THREAD_CONTINUE_NULL
);
1765 lck_rw_ilk_lock(&lck
->lck_rw
);
1768 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_END
,
1769 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, res
, 0);
1772 lck
->lck_rw
.lck_rw_shared_cnt
++;
1774 lck_rw_ilk_unlock(&lck
->lck_rw
);
1776 if (wait_interval
!= 0 && wait_interval
!= (unsigned) -1) {
1778 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN
, lck
, mach_absolute_time() - wait_interval
, 0);
1780 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK
, lck
,
1781 mach_absolute_time() - wait_interval
, 0,
1782 (readers_at_sleep
== 0 ? 1 : 0), readers_at_sleep
);
1785 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE
, lck
, 0);
1791 * Routine: lck_rw_lock_shared_to_exclusive_ext
1793 * Improves a read-only lock to one with
1794 * write permission. If another reader has
1795 * already requested an upgrade to a write lock,
1796 * no lock is held upon return.
1798 * Returns FALSE if the upgrade *failed*.
1802 lck_rw_lock_shared_to_exclusive_ext(
1807 boolean_t do_wakeup
= FALSE
;
1809 boolean_t lock_miss
= FALSE
;
1810 boolean_t lock_wait
= FALSE
;
1811 boolean_t lock_stat
;
1813 uint64_t wait_interval
= 0;
1817 lck_rw_check_type(lck
, rlck
);
1819 if (lck
->lck_rw_deb
.thread
== current_thread())
1820 panic("rw lock (%p) recursive lock attempt\n", rlck
);
1822 lck_rw_ilk_lock(&lck
->lck_rw
);
1824 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1827 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1829 lck
->lck_rw
.lck_rw_shared_cnt
--;
1831 if (lck
->lck_rw
.lck_rw_want_upgrade
) {
1832 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_START
,
1833 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, 0, 0);
1836 * Someone else has requested upgrade.
1837 * Since we've released a read lock, wake
1840 if (lck
->lck_rw
.lck_rw_waiting
&& (lck
->lck_rw
.lck_rw_shared_cnt
== 0)) {
1841 lck
->lck_rw
.lck_rw_waiting
= FALSE
;
1845 lck_rw_ilk_unlock(&lck
->lck_rw
);
1848 thread_wakeup((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1850 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_END
,
1851 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, 0, 0);
1856 lck
->lck_rw
.lck_rw_want_upgrade
= TRUE
;
1858 while (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
1859 i
= lock_wait_time
[1];
1861 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_START
,
1862 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, i
, 0, 0);
1864 if (lock_stat
&& !lock_miss
) {
1866 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1869 if ((lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
] || lockstat_probemap
[LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
]) && wait_interval
== 0) {
1870 wait_interval
= mach_absolute_time();
1872 wait_interval
= (unsigned) -1;
1877 lck_rw_ilk_unlock(&lck
->lck_rw
);
1878 while (--i
!= 0 && lck
->lck_rw
.lck_rw_shared_cnt
!= 0)
1880 lck_rw_ilk_lock(&lck
->lck_rw
);
1883 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
1884 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1885 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1886 if (res
== THREAD_WAITING
) {
1887 if (lock_stat
&& !lock_wait
) {
1889 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1891 lck_rw_ilk_unlock(&lck
->lck_rw
);
1892 res
= thread_block(THREAD_CONTINUE_NULL
);
1896 lck_rw_ilk_lock(&lck
->lck_rw
);
1899 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_END
,
1900 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, 0, 0, 0);
1903 lck
->lck_rw_deb
.pc_excl
= __builtin_return_address(0);
1904 if (LcksOpts
& enaLkExtStck
)
1905 lck_rw_ext_backtrace(lck
);
1906 lck
->lck_rw_deb
.thread
= current_thread();
1908 lck_rw_ilk_unlock(&lck
->lck_rw
);
1912 * If we've travelled a path with no spin or sleep, then wait_interval
1915 if (wait_interval
!= 0 && wait_interval
!= (unsigned) -1) {
1917 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN
, lck
, mach_absolute_time() - wait_interval
, 0);
1919 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK
, lck
, mach_absolute_time() - wait_interval
, 0);
1924 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE
, lck
, 1);
1930 * Routine: lck_rw_lock_exclusive_to_shared_ext
1933 lck_rw_lock_exclusive_to_shared_ext(
1937 boolean_t do_wakeup
= FALSE
;
1939 lck_rw_check_type(lck
, rlck
);
1941 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_TO_SH_CODE
) | DBG_FUNC_START
,
1942 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, 0, 0);
1944 lck_rw_ilk_lock(&lck
->lck_rw
);
1946 lck
->lck_rw
.lck_rw_shared_cnt
++;
1947 if (lck
->lck_rw
.lck_rw_want_upgrade
)
1948 lck
->lck_rw
.lck_rw_want_upgrade
= FALSE
;
1949 else if (lck
->lck_rw
.lck_rw_want_excl
)
1950 lck
->lck_rw
.lck_rw_want_excl
= FALSE
;
1952 panic("rw lock (%p) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1953 rlck
, lck
->lck_rw
.lck_rw_tag
);
1954 if (lck
->lck_rw_deb
.thread
== THREAD_NULL
)
1955 panic("rw lock (%p) not held\n",
1957 else if ( ((lck
->lck_rw_attr
& (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_THREAD
)) == LCK_RW_ATTR_DEBUG
)
1958 && (lck
->lck_rw_deb
.thread
!= current_thread()))
1959 panic("rw lock (%p) unlocked by non-owner(%p), current owner(%p)\n",
1960 rlck
, current_thread(), lck
->lck_rw_deb
.thread
);
1962 lck
->lck_rw_deb
.thread
= THREAD_NULL
;
1964 if (lck
->lck_rw
.lck_rw_waiting
) {
1965 lck
->lck_rw
.lck_rw_waiting
= FALSE
;
1969 lck_rw_ilk_unlock(&lck
->lck_rw
);
1972 thread_wakeup((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1974 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_TO_SH_CODE
) | DBG_FUNC_END
,
1975 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, lck
->lck_rw
.lck_rw_shared_cnt
, 0);
1977 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE
, lck
, 0);
1982 * Routine: lck_rw_try_lock_exclusive_ext
1984 * Tries to get a write lock.
1986 * Returns FALSE if the lock is not held on return.
1990 lck_rw_try_lock_exclusive_ext(
1994 boolean_t lock_stat
;
1996 lck_rw_check_type(lck
, rlck
);
1998 lck_rw_ilk_lock(&lck
->lck_rw
);
2000 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
2003 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
2005 if (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
|| lck
->lck_rw
.lck_rw_shared_cnt
) {
2010 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
2012 lck_rw_ilk_unlock(&lck
->lck_rw
);
2020 lck
->lck_rw
.lck_rw_want_excl
= TRUE
;
2021 lck
->lck_rw_deb
.pc_excl
= __builtin_return_address(0);
2022 if (LcksOpts
& enaLkExtStck
)
2023 lck_rw_ext_backtrace(lck
);
2024 lck
->lck_rw_deb
.thread
= current_thread();
2026 lck_rw_ilk_unlock(&lck
->lck_rw
);
2028 LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE
, lck
, 1);
2034 * Routine: lck_rw_try_lock_shared_ext
2036 * Tries to get a read lock.
2038 * Returns FALSE if the lock is not held on return.
2042 lck_rw_try_lock_shared_ext(
2046 boolean_t lock_stat
;
2048 lck_rw_check_type(lck
, rlck
);
2050 lck_rw_ilk_lock(&lck
->lck_rw
);
2052 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
2055 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
2057 if ((lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) &&
2058 ((lck
->lck_rw
.lck_rw_shared_cnt
== 0) || (lck
->lck_rw
.lck_rw_priv_excl
))) {
2060 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
2062 lck_rw_ilk_unlock(&lck
->lck_rw
);
2066 lck
->lck_rw
.lck_rw_shared_cnt
++;
2068 lck_rw_ilk_unlock(&lck
->lck_rw
);
2070 LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE
, lck
, 0);
2080 if (lck
->lck_rw_deb
.type
!= RW_TAG
)
2081 panic("rw lock (%p) not a rw lock type (0x%08X)\n",rlck
, lck
->lck_rw_deb
.type
);
2090 lck_rw_check_type(lck
, rlck
);
2093 case LCK_RW_ASSERT_SHARED
:
2094 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
2098 case LCK_RW_ASSERT_EXCLUSIVE
:
2099 if ((lck
->lck_rw
.lck_rw_want_excl
||
2100 lck
->lck_rw
.lck_rw_want_upgrade
) &&
2101 lck
->lck_rw
.lck_rw_shared_cnt
== 0) {
2105 case LCK_RW_ASSERT_HELD
:
2106 if (lck
->lck_rw
.lck_rw_want_excl
||
2107 lck
->lck_rw
.lck_rw_want_upgrade
||
2108 lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
2116 panic("rw lock (%p -> %p) not held (mode=%u)\n", rlck
, lck
, type
);
2124 if (lck
->lck_rw_tag
!= LCK_RW_TAG_INDIRECT
) {
2126 case LCK_RW_ASSERT_SHARED
:
2127 if (lck
->lck_rw_shared_cnt
!= 0) {
2131 case LCK_RW_ASSERT_EXCLUSIVE
:
2132 if (lck
->lck_rw_shared_cnt
== 0 &&
2133 (lck
->lck_rw_want_excl
||
2134 lck
->lck_rw_want_upgrade
)) {
2138 case LCK_RW_ASSERT_HELD
:
2139 if (lck
->lck_rw_shared_cnt
!= 0 ||
2140 lck
->lck_rw_want_excl
||
2141 lck
->lck_rw_want_upgrade
) {
2148 panic("rw lock (%p) not held (mode=%u)\n", lck
, type
);
2150 lck_rw_assert_ext((lck_rw_ext_t
*)lck
->lck_rw_ptr
,
2157 * The C portion of the mutex package. These routines are only invoked
2158 * if the optimized assembler routines can't do the work.
2162 * Forward definition
2165 void lck_mtx_ext_init(
2171 * Routine: mutex_alloc
2173 * Allocate a mutex for external users who cannot
2174 * hard-code the structure definition into their
2176 * For now just use kalloc, but a zone is probably
2185 if ((m
= (mutex_t
*)kalloc(sizeof(mutex_t
))) != 0)
2191 * Routine: mutex_free
2197 kfree((void *)m
, sizeof(mutex_t
));
2201 * Routine: lck_mtx_alloc_init
2209 if ((lck
= (lck_mtx_t
*)kalloc(sizeof(lck_mtx_t
))) != 0)
2210 lck_mtx_init(lck
, grp
, attr
);
2216 * Routine: lck_mtx_free
2222 lck_mtx_destroy(lck
, grp
);
2223 kfree((void *)lck
, sizeof(lck_mtx_t
));
2227 * Routine: lck_mtx_init
2234 lck_mtx_ext_t
*lck_ext
;
2235 lck_attr_t
*lck_attr
;
2237 if (attr
!= LCK_ATTR_NULL
)
2240 lck_attr
= &LockDefaultLckAttr
;
2242 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
2243 if ((lck_ext
= (lck_mtx_ext_t
*)kalloc(sizeof(lck_mtx_ext_t
))) != 0) {
2244 lck_mtx_ext_init(lck_ext
, grp
, lck_attr
);
2245 lck
->lck_mtx_tag
= LCK_MTX_TAG_INDIRECT
;
2246 lck
->lck_mtx_ptr
= lck_ext
;
2249 lck
->lck_mtx_data
= 0;
2250 lck
->lck_mtx_waiters
= 0;
2251 lck
->lck_mtx_pri
= 0;
2253 lck_grp_reference(grp
);
2254 lck_grp_lckcnt_incr(grp
, LCK_TYPE_MTX
);
2258 * Routine: lck_mtx_init_ext
2263 lck_mtx_ext_t
*lck_ext
,
2267 lck_attr_t
*lck_attr
;
2269 if (attr
!= LCK_ATTR_NULL
)
2272 lck_attr
= &LockDefaultLckAttr
;
2274 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
2275 lck_mtx_ext_init(lck_ext
, grp
, lck_attr
);
2276 lck
->lck_mtx_tag
= LCK_MTX_TAG_INDIRECT
;
2277 lck
->lck_mtx_ptr
= lck_ext
;
2279 lck
->lck_mtx_data
= 0;
2280 lck
->lck_mtx_waiters
= 0;
2281 lck
->lck_mtx_pri
= 0;
2283 lck_grp_reference(grp
);
2284 lck_grp_lckcnt_incr(grp
, LCK_TYPE_MTX
);
2288 * Routine: lck_mtx_ext_init
2296 bzero((void *)lck
, sizeof(lck_mtx_ext_t
));
2298 if ((attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
2299 lck
->lck_mtx_deb
.type
= MUTEX_TAG
;
2300 lck
->lck_mtx_attr
|= LCK_MTX_ATTR_DEBUG
;
2303 lck
->lck_mtx_grp
= grp
;
2305 if (grp
->lck_grp_attr
& LCK_GRP_ATTR_STAT
)
2306 lck
->lck_mtx_attr
|= LCK_MTX_ATTR_STAT
;
2310 * Routine: lck_mtx_destroy
2316 boolean_t lck_is_indirect
;
2318 if (lck
->lck_mtx_tag
== LCK_MTX_TAG_DESTROYED
)
2320 lck_is_indirect
= (lck
->lck_mtx_tag
== LCK_MTX_TAG_INDIRECT
);
2321 lck
->lck_mtx_tag
= LCK_MTX_TAG_DESTROYED
;
2322 if (lck_is_indirect
)
2323 kfree((void *)lck
->lck_mtx_ptr
, sizeof(lck_mtx_ext_t
));
2325 lck_grp_lckcnt_decr(grp
, LCK_TYPE_MTX
);
2326 lck_grp_deallocate(grp
);
2333 * Routines to print out simple_locks and mutexes in a nicely-formatted
2337 const char *simple_lock_labels
= "ENTRY ILK THREAD DURATION CALLER";
2338 const char *mutex_labels
= "ENTRY LOCKED WAITERS THREAD CALLER";
2340 void db_print_simple_lock(
2341 simple_lock_t addr
);
2343 void db_print_mutex(
2347 db_show_one_simple_lock (db_expr_t addr
, boolean_t have_addr
,
2348 __unused db_expr_t count
,
2349 __unused
char *modif
)
2351 simple_lock_t saddr
= (simple_lock_t
)(unsigned long)addr
;
2353 if (saddr
== (simple_lock_t
)0 || !have_addr
) {
2354 db_error ("No simple_lock\n");
2357 else if (saddr
->lock_type
!= USLOCK_TAG
)
2358 db_error ("Not a simple_lock\n");
2359 #endif /* USLOCK_DEBUG */
2361 db_printf ("%s\n", simple_lock_labels
);
2362 db_print_simple_lock (saddr
);
2366 db_print_simple_lock (
2370 db_printf ("%08x %3d", addr
, *hw_lock_addr(addr
->interlock
));
2372 db_printf (" %08x", addr
->debug
.lock_thread
);
2373 db_printf (" %08x ", addr
->debug
.duration
[1]);
2374 db_printsym ((int)addr
->debug
.lock_pc
, DB_STGY_ANY
);
2375 #endif /* USLOCK_DEBUG */
2380 db_show_one_mutex (db_expr_t addr
, boolean_t have_addr
,
2381 __unused db_expr_t count
,
2382 __unused
char *modif
)
2384 mutex_t
* maddr
= (mutex_t
*)(unsigned long)addr
;
2386 if (maddr
== (mutex_t
*)0 || !have_addr
)
2387 db_error ("No mutex\n");
2389 else if (maddr
->lck_mtx_deb
.type
!= MUTEX_TAG
)
2390 db_error ("Not a mutex\n");
2391 #endif /* MACH_LDEBUG */
2393 db_printf ("%s\n", mutex_labels
);
2394 db_print_mutex (maddr
);
2401 db_printf ("%08x %6d %7d",
2402 addr
, *addr
, addr
->lck_mtx
.lck_mtx_waiters
);
2404 db_printf (" %08x ", addr
->lck_mtx_deb
.thread
);
2405 db_printsym (addr
->lck_mtx_deb
.stack
[0], DB_STGY_ANY
);
2406 #endif /* MACH_LDEBUG */
2414 db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
2415 lock
->lck_rw
.lck_rw_shared_cnt
,
2416 lock
->lck_rw
.lck_rw_want_upgrade
? "" : "!",
2417 lock
->lck_rw
.lck_rw_want_excl
? "" : "!");
2418 db_printf("%swaiting\n",
2419 lock
->lck_rw
.lck_rw_waiting
? "" : "!");
2420 db_printf("%sInterlock\n",
2421 lock
->lck_rw
.lck_rw_interlock
? "" : "!");
2424 #endif /* MACH_KDB */