2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
53 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Locking primitives implementation
60 #include <mach_ldebug.h>
62 #include <kern/kalloc.h>
63 #include <kern/lock.h>
64 #include <kern/locks.h>
65 #include <kern/misc_protos.h>
66 #include <kern/thread.h>
67 #include <kern/processor.h>
68 #include <kern/sched_prim.h>
70 #include <kern/debug.h>
74 #include <ddb/db_command.h>
75 #include <ddb/db_output.h>
76 #include <ddb/db_sym.h>
77 #include <ddb/db_print.h>
81 #include <ppc/Firmware.h>
84 #include <sys/kdebug.h>
86 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
87 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
88 #define LCK_RW_LCK_SHARED_CODE 0x102
89 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
90 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
91 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
94 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
96 unsigned int lock_wait_time
[2] = { (unsigned int)-1, 0 } ;
103 * Perform simple lock checks.
105 int uslock_check
= 1;
106 int max_lock_loops
= 100000000;
107 decl_simple_lock_data(extern , printf_lock
)
108 decl_simple_lock_data(extern , panic_lock
)
110 decl_simple_lock_data(extern , kdb_lock
)
111 #endif /* MACH_KDB */
112 #endif /* USLOCK_DEBUG */
116 * We often want to know the addresses of the callers
117 * of the various lock routines. However, this information
118 * is only used for debugging and statistics.
121 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
122 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
124 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
125 #else /* ANY_LOCK_DEBUG */
128 * Eliminate lint complaints about unused local pc variables.
130 #define OBTAIN_PC(pc,l) ++pc
132 #define OBTAIN_PC(pc,l)
134 #endif /* USLOCK_DEBUG */
138 * Portable lock package implementation of usimple_locks.
142 #define USLDBG(stmt) stmt
143 void usld_lock_init(usimple_lock_t
, unsigned short);
144 void usld_lock_pre(usimple_lock_t
, pc_t
);
145 void usld_lock_post(usimple_lock_t
, pc_t
);
146 void usld_unlock(usimple_lock_t
, pc_t
);
147 void usld_lock_try_pre(usimple_lock_t
, pc_t
);
148 void usld_lock_try_post(usimple_lock_t
, pc_t
);
149 int usld_lock_common_checks(usimple_lock_t
, char *);
150 #else /* USLOCK_DEBUG */
152 #endif /* USLOCK_DEBUG */
155 * Routine: lck_spin_alloc_init
163 if ((lck
= (lck_spin_t
*)kalloc(sizeof(lck_spin_t
))) != 0)
164 lck_spin_init(lck
, grp
, attr
);
170 * Routine: lck_spin_free
176 lck_spin_destroy(lck
, grp
);
177 kfree((void *)lck
, sizeof(lck_spin_t
));
181 * Routine: lck_spin_init
187 __unused lck_attr_t
*attr
) {
190 lck_grp_reference(grp
);
191 lck_grp_lckcnt_incr(grp
, LCK_TYPE_SPIN
);
195 * Routine: lck_spin_destroy
201 if (lck
->interlock
== LCK_SPIN_TAG_DESTROYED
)
203 lck
->interlock
= LCK_SPIN_TAG_DESTROYED
;
204 lck_grp_lckcnt_decr(grp
, LCK_TYPE_SPIN
);
205 lck_grp_deallocate(grp
);
209 * Initialize a usimple_lock.
211 * No change in preemption state.
218 #ifndef MACHINE_SIMPLE_LOCK
219 USLDBG(usld_lock_init(l
, tag
));
220 hw_lock_init(&l
->interlock
);
222 simple_lock_init((simple_lock_t
)l
,tag
);
228 * Acquire a usimple_lock.
230 * Returns with preemption disabled. Note
231 * that the hw_lock routines are responsible for
232 * maintaining preemption state.
238 #ifndef MACHINE_SIMPLE_LOCK
243 #endif /* USLOCK_DEBUG */
246 USLDBG(usld_lock_pre(l
, pc
));
248 if(!hw_lock_to(&l
->interlock
, LockTimeOut
)) /* Try to get the lock with a timeout */
249 panic("simple lock deadlock detection - l=0x%08X, cpu=%d, ret=0x%08X", l
, cpu_number(), pc
);
251 USLDBG(usld_lock_post(l
, pc
));
253 simple_lock((simple_lock_t
)l
);
259 * Release a usimple_lock.
261 * Returns with preemption enabled. Note
262 * that the hw_lock routines are responsible for
263 * maintaining preemption state.
269 #ifndef MACHINE_SIMPLE_LOCK
273 USLDBG(usld_unlock(l
, pc
));
275 hw_lock_unlock(&l
->interlock
);
277 simple_unlock_rwmb((simple_lock_t
)l
);
283 * Conditionally acquire a usimple_lock.
285 * On success, returns with preemption disabled.
286 * On failure, returns with preemption in the same state
287 * as when first invoked. Note that the hw_lock routines
288 * are responsible for maintaining preemption state.
290 * XXX No stats are gathered on a miss; I preserved this
291 * behavior from the original assembly-language code, but
292 * doesn't it make sense to log misses? XXX
298 #ifndef MACHINE_SIMPLE_LOCK
300 unsigned int success
;
303 USLDBG(usld_lock_try_pre(l
, pc
));
304 if (success
= hw_lock_try(&l
->interlock
)) {
305 USLDBG(usld_lock_try_post(l
, pc
));
309 return(simple_lock_try((simple_lock_t
)l
));
315 * States of a usimple_lock. The default when initializing
316 * a usimple_lock is setting it up for debug checking.
318 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
319 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
320 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
321 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
322 #define USLOCK_CHECKING(l) (uslock_check && \
323 ((l)->debug.state & USLOCK_CHECKED))
326 * Trace activities of a particularly interesting lock.
328 void usl_trace(usimple_lock_t
, int, pc_t
, const char *);
332 * Initialize the debugging information contained
340 if (l
== USIMPLE_LOCK_NULL
)
341 panic("lock initialization: null lock pointer");
342 l
->lock_type
= USLOCK_TAG
;
343 l
->debug
.state
= uslock_check
? USLOCK_INITIALIZED
: 0;
344 l
->debug
.lock_cpu
= l
->debug
.unlock_cpu
= 0;
345 l
->debug
.lock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
346 l
->debug
.lock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
347 l
->debug
.duration
[0] = l
->debug
.duration
[1] = 0;
348 l
->debug
.unlock_cpu
= l
->debug
.unlock_cpu
= 0;
349 l
->debug
.unlock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
350 l
->debug
.unlock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
355 * These checks apply to all usimple_locks, not just
356 * those with USLOCK_CHECKED turned on.
359 usld_lock_common_checks(
363 if (l
== USIMPLE_LOCK_NULL
)
364 panic("%s: null lock pointer", caller
);
365 if (l
->lock_type
!= USLOCK_TAG
)
366 panic("%s: 0x%x is not a usimple lock", caller
, (integer_t
) l
);
367 if (!(l
->debug
.state
& USLOCK_INIT
))
368 panic("%s: 0x%x is not an initialized lock",
369 caller
, (integer_t
) l
);
370 return USLOCK_CHECKING(l
);
375 * Debug checks on a usimple_lock just before attempting
384 char *caller
= "usimple_lock";
387 if (!usld_lock_common_checks(l
, caller
))
391 * Note that we have a weird case where we are getting a lock when we are]
392 * in the process of putting the system to sleep. We are running with no
393 * current threads, therefore we can't tell if we are trying to retake a lock
394 * we have or someone on the other processor has it. Therefore we just
395 * ignore this test if the locking thread is 0.
398 if ((l
->debug
.state
& USLOCK_TAKEN
) && l
->debug
.lock_thread
&&
399 l
->debug
.lock_thread
== (void *) current_thread()) {
400 printf("%s: lock 0x%x already locked (at 0x%x) by",
401 caller
, (integer_t
) l
, l
->debug
.lock_pc
);
402 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
403 l
->debug
.lock_thread
, pc
);
406 mp_disable_preemption();
407 usl_trace(l
, cpu_number(), pc
, caller
);
408 mp_enable_preemption();
413 * Debug checks on a usimple_lock just after acquiring it.
415 * Pre-emption has been disabled at this point,
416 * so we are safe in using cpu_number.
424 char *caller
= "successful usimple_lock";
427 if (!usld_lock_common_checks(l
, caller
))
430 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
431 panic("%s: lock 0x%x became uninitialized",
432 caller
, (integer_t
) l
);
433 if ((l
->debug
.state
& USLOCK_TAKEN
))
434 panic("%s: lock 0x%x became TAKEN by someone else",
435 caller
, (integer_t
) l
);
437 mycpu
= cpu_number();
438 l
->debug
.lock_thread
= (void *)current_thread();
439 l
->debug
.state
|= USLOCK_TAKEN
;
440 l
->debug
.lock_pc
= pc
;
441 l
->debug
.lock_cpu
= mycpu
;
443 usl_trace(l
, mycpu
, pc
, caller
);
448 * Debug checks on a usimple_lock just before
449 * releasing it. Note that the caller has not
450 * yet released the hardware lock.
452 * Preemption is still disabled, so there's
453 * no problem using cpu_number.
461 char *caller
= "usimple_unlock";
464 if (!usld_lock_common_checks(l
, caller
))
467 mycpu
= cpu_number();
469 if (!(l
->debug
.state
& USLOCK_TAKEN
))
470 panic("%s: lock 0x%x hasn't been taken",
471 caller
, (integer_t
) l
);
472 if (l
->debug
.lock_thread
!= (void *) current_thread())
473 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
474 caller
, (integer_t
) l
, l
->debug
.lock_thread
);
475 if (l
->debug
.lock_cpu
!= mycpu
) {
476 printf("%s: unlocking lock 0x%x on cpu 0x%x",
477 caller
, (integer_t
) l
, mycpu
);
478 printf(" (acquired on cpu 0x%x)\n", l
->debug
.lock_cpu
);
481 usl_trace(l
, mycpu
, pc
, caller
);
483 l
->debug
.unlock_thread
= l
->debug
.lock_thread
;
484 l
->debug
.lock_thread
= INVALID_PC
;
485 l
->debug
.state
&= ~USLOCK_TAKEN
;
486 l
->debug
.unlock_pc
= pc
;
487 l
->debug
.unlock_cpu
= mycpu
;
492 * Debug checks on a usimple_lock just before
493 * attempting to acquire it.
495 * Preemption isn't guaranteed to be disabled.
502 char *caller
= "usimple_lock_try";
504 if (!usld_lock_common_checks(l
, caller
))
506 mp_disable_preemption();
507 usl_trace(l
, cpu_number(), pc
, caller
);
508 mp_enable_preemption();
513 * Debug checks on a usimple_lock just after
514 * successfully attempting to acquire it.
516 * Preemption has been disabled by the
517 * lock acquisition attempt, so it's safe
526 char *caller
= "successful usimple_lock_try";
528 if (!usld_lock_common_checks(l
, caller
))
531 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
532 panic("%s: lock 0x%x became uninitialized",
533 caller
, (integer_t
) l
);
534 if ((l
->debug
.state
& USLOCK_TAKEN
))
535 panic("%s: lock 0x%x became TAKEN by someone else",
536 caller
, (integer_t
) l
);
538 mycpu
= cpu_number();
539 l
->debug
.lock_thread
= (void *) current_thread();
540 l
->debug
.state
|= USLOCK_TAKEN
;
541 l
->debug
.lock_pc
= pc
;
542 l
->debug
.lock_cpu
= mycpu
;
544 usl_trace(l
, mycpu
, pc
, caller
);
549 * For very special cases, set traced_lock to point to a
550 * specific lock of interest. The result is a series of
551 * XPRs showing lock operations on that lock. The lock_seq
552 * value is used to show the order of those operations.
554 usimple_lock_t traced_lock
;
555 unsigned int lock_seq
;
562 const char * op_name
)
564 if (traced_lock
== l
) {
566 "seq %d, cpu %d, %s @ %x\n",
567 (integer_t
) lock_seq
, (integer_t
) mycpu
,
568 (integer_t
) op_name
, (integer_t
) pc
, 0);
574 #endif /* USLOCK_DEBUG */
577 * The C portion of the shared/exclusive locks package.
584 void lck_rw_lock_exclusive_gen(
587 lck_rw_type_t
lck_rw_done_gen(
591 lck_rw_lock_shared_gen(
595 lck_rw_lock_shared_to_exclusive_gen(
599 lck_rw_lock_exclusive_to_shared_gen(
603 lck_rw_try_lock_exclusive_gen(
607 lck_rw_try_lock_shared_gen(
610 void lck_rw_ext_init(
615 void lck_rw_ext_backtrace(
618 void lck_rw_lock_exclusive_ext(
622 lck_rw_type_t
lck_rw_done_ext(
627 lck_rw_lock_shared_ext(
632 lck_rw_lock_shared_to_exclusive_ext(
637 lck_rw_lock_exclusive_to_shared_ext(
642 lck_rw_try_lock_exclusive_ext(
647 lck_rw_try_lock_shared_ext(
665 * Routine: lock_alloc
667 * Allocate a lock for external users who cannot
668 * hard-code the structure definition into their
670 * For now just use kalloc, but a zone is probably
676 __unused
unsigned short tag
,
677 __unused
unsigned short tag1
)
681 if ((lck
= (lock_t
*)kalloc(sizeof(lock_t
))) != 0)
682 lock_init(lck
, can_sleep
, tag
, tag1
);
689 * Initialize a lock; required before use.
690 * Note that clients declare the "struct lock"
691 * variables and then initialize them, rather
692 * than getting a new one from this module.
698 __unused
unsigned short tag
,
699 __unused
unsigned short tag1
)
702 panic("lock_init: sleep mode must be set to TRUE\n");
704 (void) memset((void *) lck
, 0, sizeof(lock_t
));
706 lck
->lck_rw_deb
.type
= RW_TAG
;
707 lck
->lck_rw_attr
|= (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_THREAD
|LCK_RW_ATTR_DIS_MYLOCK
);
716 * Free a lock allocated for external users.
717 * For now just use kfree, but a zone is probably
724 kfree((void *)lck
, sizeof(lock_t
));
732 lck_rw_lock_exclusive_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
739 (void)lck_rw_done_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
746 lck_rw_lock_shared_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
753 return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
));
758 register lock_t
*lck
)
760 lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t
*)lck
, (lck_rw_t
*)lck
);
765 * Routine: lck_rw_alloc_init
773 if ((lck
= (lck_rw_t
*)kalloc(sizeof(lck_rw_t
))) != 0)
774 lck_rw_init(lck
, grp
, attr
);
780 * Routine: lck_rw_free
786 lck_rw_destroy(lck
, grp
);
787 kfree((void *)lck
, sizeof(lck_rw_t
));
791 * Routine: lck_rw_init
798 lck_rw_ext_t
*lck_ext
;
799 lck_attr_t
*lck_attr
;
801 if (attr
!= LCK_ATTR_NULL
)
804 lck_attr
= &LockDefaultLckAttr
;
806 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
807 if ((lck_ext
= (lck_rw_ext_t
*)kalloc(sizeof(lck_rw_ext_t
))) != 0) {
808 lck_rw_ext_init(lck_ext
, grp
, lck_attr
);
809 lck
->lck_rw_tag
= LCK_RW_TAG_INDIRECT
;
810 lck
->lck_rw_ptr
= lck_ext
;
813 (void) memset((void *) lck
, 0, sizeof(lck_rw_t
));
816 lck_grp_reference(grp
);
817 lck_grp_lckcnt_incr(grp
, LCK_TYPE_RW
);
821 * Routine: lck_rw_ext_init
829 bzero((void *)lck
, sizeof(lck_rw_ext_t
));
831 if ((attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
832 lck
->lck_rw_deb
.type
= RW_TAG
;
833 lck
->lck_rw_attr
|= LCK_RW_ATTR_DEBUG
;
836 lck
->lck_rw_grp
= grp
;
838 if (grp
->lck_grp_attr
& LCK_GRP_ATTR_STAT
)
839 lck
->lck_rw_attr
|= LCK_RW_ATTR_STAT
;
843 * Routine: lck_rw_destroy
849 boolean_t lck_is_indirect
;
851 if (lck
->lck_rw_tag
== LCK_RW_TAG_DESTROYED
)
853 lck_is_indirect
= (lck
->lck_rw_tag
== LCK_RW_TAG_INDIRECT
);
854 lck
->lck_rw_tag
= LCK_RW_TAG_DESTROYED
;
856 kfree((void *)lck
->lck_rw_ptr
, sizeof(lck_rw_ext_t
));
858 lck_grp_lckcnt_decr(grp
, LCK_TYPE_RW
);
859 lck_grp_deallocate(grp
);
864 * Routine: lck_rw_lock
869 lck_rw_type_t lck_rw_type
)
871 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
872 lck_rw_lock_shared(lck
);
873 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
874 lck_rw_lock_exclusive(lck
);
876 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type
);
881 * Routine: lck_rw_unlock
886 lck_rw_type_t lck_rw_type
)
888 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
889 lck_rw_unlock_shared(lck
);
890 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
891 lck_rw_unlock_exclusive(lck
);
893 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type
);
898 * Routine: lck_rw_unlock_shared
901 lck_rw_unlock_shared(
906 ret
= lck_rw_done(lck
);
908 if (ret
!= LCK_RW_TYPE_SHARED
)
909 panic("lck_rw_unlock(): lock held in mode: %d\n", ret
);
914 * Routine: lck_rw_unlock_exclusive
917 lck_rw_unlock_exclusive(
922 ret
= lck_rw_done(lck
);
924 if (ret
!= LCK_RW_TYPE_EXCLUSIVE
)
925 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret
);
930 * Routine: lck_rw_try_lock
935 lck_rw_type_t lck_rw_type
)
937 if (lck_rw_type
== LCK_RW_TYPE_SHARED
)
938 return(lck_rw_try_lock_shared(lck
));
939 else if (lck_rw_type
== LCK_RW_TYPE_EXCLUSIVE
)
940 return(lck_rw_try_lock_exclusive(lck
));
942 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type
);
949 * Routine: lck_rw_lock_exclusive_gen
952 lck_rw_lock_exclusive_gen(
956 boolean_t lock_miss
= FALSE
;
959 lck_rw_ilk_lock(lck
);
962 * Try to acquire the lck_rw_want_excl bit.
964 while (lck
->lck_rw_want_excl
) {
965 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_START
, (int)lck
, 0, 0, 0, 0);
971 i
= lock_wait_time
[1];
973 lck_rw_ilk_unlock(lck
);
974 while (--i
!= 0 && lck
->lck_rw_want_excl
)
976 lck_rw_ilk_lock(lck
);
979 if (lck
->lck_rw_want_excl
) {
980 lck
->lck_rw_waiting
= TRUE
;
981 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
982 if (res
== THREAD_WAITING
) {
983 lck_rw_ilk_unlock(lck
);
984 res
= thread_block(THREAD_CONTINUE_NULL
);
985 lck_rw_ilk_lock(lck
);
988 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_END
, (int)lck
, res
, 0, 0, 0);
990 lck
->lck_rw_want_excl
= TRUE
;
992 /* Wait for readers (and upgrades) to finish */
994 while ((lck
->lck_rw_shared_cnt
!= 0) || lck
->lck_rw_want_upgrade
) {
999 i
= lock_wait_time
[1];
1001 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_START
,
1002 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, i
, 0);
1005 lck_rw_ilk_unlock(lck
);
1006 while (--i
!= 0 && (lck
->lck_rw_shared_cnt
!= 0 ||
1007 lck
->lck_rw_want_upgrade
))
1009 lck_rw_ilk_lock(lck
);
1012 if (lck
->lck_rw_shared_cnt
!= 0 || lck
->lck_rw_want_upgrade
) {
1013 lck
->lck_rw_waiting
= TRUE
;
1014 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1015 if (res
== THREAD_WAITING
) {
1016 lck_rw_ilk_unlock(lck
);
1017 res
= thread_block(THREAD_CONTINUE_NULL
);
1018 lck_rw_ilk_lock(lck
);
1021 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_END
,
1022 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, res
, 0);
1025 lck_rw_ilk_unlock(lck
);
1030 * Routine: lck_rw_done_gen
1036 boolean_t do_wakeup
= FALSE
;
1037 lck_rw_type_t lck_rw_type
;
1040 lck_rw_ilk_lock(lck
);
1042 if (lck
->lck_rw_shared_cnt
!= 0) {
1043 lck_rw_type
= LCK_RW_TYPE_SHARED
;
1044 lck
->lck_rw_shared_cnt
--;
1047 lck_rw_type
= LCK_RW_TYPE_EXCLUSIVE
;
1048 if (lck
->lck_rw_want_upgrade
)
1049 lck
->lck_rw_want_upgrade
= FALSE
;
1051 lck
->lck_rw_want_excl
= FALSE
;
1055 * There is no reason to wakeup a lck_rw_waiting thread
1056 * if the read-count is non-zero. Consider:
1057 * we must be dropping a read lock
1058 * threads are waiting only if one wants a write lock
1059 * if there are still readers, they can't proceed
1062 if (lck
->lck_rw_waiting
&& (lck
->lck_rw_shared_cnt
== 0)) {
1063 lck
->lck_rw_waiting
= FALSE
;
1067 lck_rw_ilk_unlock(lck
);
1070 thread_wakeup((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1071 return(lck_rw_type
);
1076 * Routine: lck_rw_lock_shared_gen
1079 lck_rw_lock_shared_gen(
1085 lck_rw_ilk_lock(lck
);
1087 while (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) {
1088 i
= lock_wait_time
[1];
1090 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_START
,
1091 (int)lck
, lck
->lck_rw_want_excl
, lck
->lck_rw_want_upgrade
, i
, 0);
1094 lck_rw_ilk_unlock(lck
);
1095 while (--i
!= 0 && (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
))
1097 lck_rw_ilk_lock(lck
);
1100 if (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) {
1101 lck
->lck_rw_waiting
= TRUE
;
1102 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1103 if (res
== THREAD_WAITING
) {
1104 lck_rw_ilk_unlock(lck
);
1105 res
= thread_block(THREAD_CONTINUE_NULL
);
1106 lck_rw_ilk_lock(lck
);
1109 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_END
,
1110 (int)lck
, lck
->lck_rw_want_excl
, lck
->lck_rw_want_upgrade
, res
, 0);
1113 lck
->lck_rw_shared_cnt
++;
1115 lck_rw_ilk_unlock(lck
);
1120 * Routine: lck_rw_lock_shared_to_exclusive_gen
1122 * Improves a read-only lock to one with
1123 * write permission. If another reader has
1124 * already requested an upgrade to a write lock,
1125 * no lock is held upon return.
1127 * Returns TRUE if the upgrade *failed*.
1131 lck_rw_lock_shared_to_exclusive_gen(
1135 boolean_t do_wakeup
= FALSE
;
1138 lck_rw_ilk_lock(lck
);
1140 lck
->lck_rw_shared_cnt
--;
1142 if (lck
->lck_rw_want_upgrade
) {
1143 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_START
,
1144 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, 0, 0);
1147 * Someone else has requested upgrade.
1148 * Since we've released a read lock, wake
1151 if (lck
->lck_rw_waiting
&& (lck
->lck_rw_shared_cnt
== 0)) {
1152 lck
->lck_rw_waiting
= FALSE
;
1156 lck_rw_ilk_unlock(lck
);
1159 thread_wakeup((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1161 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_END
,
1162 (int)lck
, lck
->lck_rw_shared_cnt
, lck
->lck_rw_want_upgrade
, 0, 0);
1167 lck
->lck_rw_want_upgrade
= TRUE
;
1169 while (lck
->lck_rw_shared_cnt
!= 0) {
1170 i
= lock_wait_time
[1];
1172 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_START
,
1173 (int)lck
, lck
->lck_rw_shared_cnt
, i
, 0, 0);
1176 lck_rw_ilk_unlock(lck
);
1177 while (--i
!= 0 && lck
->lck_rw_shared_cnt
!= 0)
1179 lck_rw_ilk_lock(lck
);
1182 if (lck
->lck_rw_shared_cnt
!= 0) {
1183 lck
->lck_rw_waiting
= TRUE
;
1184 res
= assert_wait((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1185 if (res
== THREAD_WAITING
) {
1186 lck_rw_ilk_unlock(lck
);
1187 res
= thread_block(THREAD_CONTINUE_NULL
);
1188 lck_rw_ilk_lock(lck
);
1191 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_END
,
1192 (int)lck
, lck
->lck_rw_shared_cnt
, 0, 0, 0);
1195 lck_rw_ilk_unlock(lck
);
1201 * Routine: lck_rw_lock_exclusive_to_shared_gen
1204 lck_rw_lock_exclusive_to_shared_gen(
1207 boolean_t do_wakeup
= FALSE
;
1209 lck_rw_ilk_lock(lck
);
1211 lck
->lck_rw_shared_cnt
++;
1212 if (lck
->lck_rw_want_upgrade
)
1213 lck
->lck_rw_want_upgrade
= FALSE
;
1215 lck
->lck_rw_want_excl
= FALSE
;
1217 if (lck
->lck_rw_waiting
) {
1218 lck
->lck_rw_waiting
= FALSE
;
1222 lck_rw_ilk_unlock(lck
);
1225 thread_wakeup((event_t
)(((unsigned int*)lck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1231 * Routine: lck_rw_try_lock_exclusive_gen
1233 * Tries to get a write lock.
1235 * Returns FALSE if the lock is not held on return.
1239 lck_rw_try_lock_exclusive_gen(
1242 lck_rw_ilk_lock(lck
);
1244 if (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
|| lck
->lck_rw_shared_cnt
) {
1248 lck_rw_ilk_unlock(lck
);
1256 lck
->lck_rw_want_excl
= TRUE
;
1258 lck_rw_ilk_unlock(lck
);
1264 * Routine: lck_rw_try_lock_shared_gen
1266 * Tries to get a read lock.
1268 * Returns FALSE if the lock is not held on return.
1272 lck_rw_try_lock_shared_gen(
1275 lck_rw_ilk_lock(lck
);
1277 if (lck
->lck_rw_want_excl
|| lck
->lck_rw_want_upgrade
) {
1278 lck_rw_ilk_unlock(lck
);
1282 lck
->lck_rw_shared_cnt
++;
1284 lck_rw_ilk_unlock(lck
);
1291 * Routine: lck_rw_ext_backtrace
1294 lck_rw_ext_backtrace(
1297 unsigned int *stackptr
, *stackptr_prev
;
1300 __asm__
volatile("mr %0,r1" : "=r" (stackptr
));
1302 while (frame
< LCK_FRAMES_MAX
) {
1303 stackptr_prev
= stackptr
;
1304 stackptr
= ( unsigned int *)*stackptr
;
1305 if ( (((unsigned int)stackptr_prev
) ^ ((unsigned int)stackptr
)) > 8192)
1307 lck
->lck_rw_deb
.stack
[frame
] = *(stackptr
+2);
1310 while (frame
< LCK_FRAMES_MAX
) {
1311 lck
->lck_rw_deb
.stack
[frame
] = 0;
1318 * Routine: lck_rw_lock_exclusive_ext
1321 lck_rw_lock_exclusive_ext(
1327 boolean_t lock_miss
= FALSE
;
1328 boolean_t lock_wait
= FALSE
;
1329 boolean_t lock_stat
;
1331 lck_rw_check_type(lck
, rlck
);
1333 if ( ((lck
->lck_rw_attr
& (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_MYLOCK
)) == LCK_RW_ATTR_DEBUG
)
1334 && (lck
->lck_rw_deb
.thread
== current_thread()))
1335 panic("rw lock (0x%08X) recursive lock attempt\n", rlck
);
1337 lck_rw_ilk_lock(&lck
->lck_rw
);
1339 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1342 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1345 * Try to acquire the lck_rw.lck_rw_want_excl bit.
1347 while (lck
->lck_rw
.lck_rw_want_excl
) {
1348 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_START
, (int)rlck
, 0, 0, 0, 0);
1350 if (lock_stat
&& !lock_miss
) {
1352 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1355 i
= lock_wait_time
[1];
1357 lck_rw_ilk_unlock(&lck
->lck_rw
);
1358 while (--i
!= 0 && lck
->lck_rw
.lck_rw_want_excl
)
1360 lck_rw_ilk_lock(&lck
->lck_rw
);
1363 if (lck
->lck_rw
.lck_rw_want_excl
) {
1364 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1365 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1366 if (res
== THREAD_WAITING
) {
1367 if (lock_stat
&& !lock_wait
) {
1369 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1371 lck_rw_ilk_unlock(&lck
->lck_rw
);
1372 res
= thread_block(THREAD_CONTINUE_NULL
);
1373 lck_rw_ilk_lock(&lck
->lck_rw
);
1376 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE_CODE
) | DBG_FUNC_END
, (int)rlck
, res
, 0, 0, 0);
1378 lck
->lck_rw
.lck_rw_want_excl
= TRUE
;
1380 /* Wait for readers (and upgrades) to finish */
1382 while ((lck
->lck_rw
.lck_rw_shared_cnt
!= 0) || lck
->lck_rw
.lck_rw_want_upgrade
) {
1383 i
= lock_wait_time
[1];
1385 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_START
,
1386 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, i
, 0);
1388 if (lock_stat
&& !lock_miss
) {
1390 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1394 lck_rw_ilk_unlock(&lck
->lck_rw
);
1395 while (--i
!= 0 && (lck
->lck_rw
.lck_rw_shared_cnt
!= 0 ||
1396 lck
->lck_rw
.lck_rw_want_upgrade
))
1398 lck_rw_ilk_lock(&lck
->lck_rw
);
1401 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0 || lck
->lck_rw
.lck_rw_want_upgrade
) {
1402 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1403 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1404 if (res
== THREAD_WAITING
) {
1405 if (lock_stat
&& !lock_wait
) {
1407 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1409 lck_rw_ilk_unlock(&lck
->lck_rw
);
1410 res
= thread_block(THREAD_CONTINUE_NULL
);
1411 lck_rw_ilk_lock(&lck
->lck_rw
);
1414 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EXCLUSIVE1_CODE
) | DBG_FUNC_END
,
1415 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, res
, 0);
1418 lck
->lck_rw_deb
.pc_excl
= __builtin_return_address(0);
1419 if (LcksOpts
& enaLkExtStck
)
1420 lck_rw_ext_backtrace(lck
);
1421 lck
->lck_rw_deb
.thread
= current_thread();
1423 lck_rw_ilk_unlock(&lck
->lck_rw
);
1428 * Routine: lck_rw_done_ext
1435 boolean_t do_wakeup
= FALSE
;
1436 lck_rw_type_t lck_rw_type
;
1439 lck_rw_check_type(lck
, rlck
);
1441 lck_rw_ilk_lock(&lck
->lck_rw
);
1443 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
1444 lck_rw_type
= LCK_RW_TYPE_SHARED
;
1445 lck
->lck_rw
.lck_rw_shared_cnt
--;
1448 lck_rw_type
= LCK_RW_TYPE_EXCLUSIVE
;
1449 if (lck
->lck_rw
.lck_rw_want_upgrade
)
1450 lck
->lck_rw
.lck_rw_want_upgrade
= FALSE
;
1451 else if (lck
->lck_rw
.lck_rw_want_excl
)
1452 lck
->lck_rw
.lck_rw_want_excl
= FALSE
;
1454 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1456 if (lck
->lck_rw_deb
.thread
== THREAD_NULL
)
1457 panic("rw lock (0x%08X) not held\n",
1459 else if ( ((lck
->lck_rw_attr
& (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_THREAD
)) == LCK_RW_ATTR_DEBUG
)
1460 && (lck
->lck_rw_deb
.thread
!= current_thread()))
1461 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1462 rlck
, current_thread(), lck
->lck_rw_deb
.thread
);
1463 lck
->lck_rw_deb
.thread
= THREAD_NULL
;
1466 if (lck
->lck_rw_attr
& LCK_RW_ATTR_DEBUG
)
1467 lck
->lck_rw_deb
.pc_done
= __builtin_return_address(0);
1470 * There is no reason to wakeup a waiting thread
1471 * if the read-count is non-zero. Consider:
1472 * we must be dropping a read lock
1473 * threads are waiting only if one wants a write lock
1474 * if there are still readers, they can't proceed
1477 if (lck
->lck_rw
.lck_rw_waiting
&& (lck
->lck_rw
.lck_rw_shared_cnt
== 0)) {
1478 lck
->lck_rw
.lck_rw_waiting
= FALSE
;
1482 lck_rw_ilk_unlock(&lck
->lck_rw
);
1485 thread_wakeup((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1486 return(lck_rw_type
);
1491 * Routine: lck_rw_lock_shared_ext
1494 lck_rw_lock_shared_ext(
1500 boolean_t lock_miss
= FALSE
;
1501 boolean_t lock_wait
= FALSE
;
1502 boolean_t lock_stat
;
1504 lck_rw_check_type(lck
, rlck
);
1506 lck_rw_ilk_lock(&lck
->lck_rw
);
1508 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1511 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1513 while (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) {
1514 i
= lock_wait_time
[1];
1516 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_START
,
1517 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, i
, 0);
1519 if (lock_stat
&& !lock_miss
) {
1521 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1525 lck_rw_ilk_unlock(&lck
->lck_rw
);
1526 while (--i
!= 0 && (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
))
1528 lck_rw_ilk_lock(&lck
->lck_rw
);
1531 if (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) {
1532 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1533 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1534 if (res
== THREAD_WAITING
) {
1535 if (lock_stat
&& !lock_wait
) {
1537 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1539 lck_rw_ilk_unlock(&lck
->lck_rw
);
1540 res
= thread_block(THREAD_CONTINUE_NULL
);
1541 lck_rw_ilk_lock(&lck
->lck_rw
);
1544 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SHARED_CODE
) | DBG_FUNC_END
,
1545 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, res
, 0);
1548 lck
->lck_rw
.lck_rw_shared_cnt
++;
1550 lck_rw_ilk_unlock(&lck
->lck_rw
);
1555 * Routine: lck_rw_lock_shared_to_exclusive_ext
1557 * Improves a read-only lock to one with
1558 * write permission. If another reader has
1559 * already requested an upgrade to a write lock,
1560 * no lock is held upon return.
1562 * Returns TRUE if the upgrade *failed*.
1566 lck_rw_lock_shared_to_exclusive_ext(
1571 boolean_t do_wakeup
= FALSE
;
1573 boolean_t lock_miss
= FALSE
;
1574 boolean_t lock_wait
= FALSE
;
1575 boolean_t lock_stat
;
1577 lck_rw_check_type(lck
, rlck
);
1579 if (lck
->lck_rw_deb
.thread
== current_thread())
1580 panic("rw lock (0x%08X) recursive lock attempt\n", rlck
);
1582 lck_rw_ilk_lock(&lck
->lck_rw
);
1584 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1587 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1589 lck
->lck_rw
.lck_rw_shared_cnt
--;
1591 if (lck
->lck_rw
.lck_rw_want_upgrade
) {
1592 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_START
,
1593 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, 0, 0);
1596 * Someone else has requested upgrade.
1597 * Since we've released a read lock, wake
1600 if (lck
->lck_rw
.lck_rw_waiting
&& (lck
->lck_rw
.lck_rw_shared_cnt
== 0)) {
1601 lck
->lck_rw
.lck_rw_waiting
= FALSE
;
1605 lck_rw_ilk_unlock(&lck
->lck_rw
);
1608 thread_wakeup((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1610 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX_CODE
) | DBG_FUNC_END
,
1611 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, lck
->lck_rw
.lck_rw_want_upgrade
, 0, 0);
1616 lck
->lck_rw
.lck_rw_want_upgrade
= TRUE
;
1618 while (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
1619 i
= lock_wait_time
[1];
1621 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_START
,
1622 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, i
, 0, 0);
1624 if (lock_stat
&& !lock_miss
) {
1626 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1630 lck_rw_ilk_unlock(&lck
->lck_rw
);
1631 while (--i
!= 0 && lck
->lck_rw
.lck_rw_shared_cnt
!= 0)
1633 lck_rw_ilk_lock(&lck
->lck_rw
);
1636 if (lck
->lck_rw
.lck_rw_shared_cnt
!= 0) {
1637 lck
->lck_rw
.lck_rw_waiting
= TRUE
;
1638 res
= assert_wait((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))), THREAD_UNINT
);
1639 if (res
== THREAD_WAITING
) {
1640 if (lock_stat
&& !lock_wait
) {
1642 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_wait_cnt
++;
1644 lck_rw_ilk_unlock(&lck
->lck_rw
);
1645 res
= thread_block(THREAD_CONTINUE_NULL
);
1646 lck_rw_ilk_lock(&lck
->lck_rw
);
1649 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_SH_TO_EX1_CODE
) | DBG_FUNC_END
,
1650 (int)rlck
, lck
->lck_rw
.lck_rw_shared_cnt
, 0, 0, 0);
1653 lck
->lck_rw_deb
.pc_excl
= __builtin_return_address(0);
1654 if (LcksOpts
& enaLkExtStck
)
1655 lck_rw_ext_backtrace(lck
);
1656 lck
->lck_rw_deb
.thread
= current_thread();
1658 lck_rw_ilk_unlock(&lck
->lck_rw
);
1664 * Routine: lck_rw_lock_exclusive_to_shared_ext
1667 lck_rw_lock_exclusive_to_shared_ext(
1671 boolean_t do_wakeup
= FALSE
;
1673 lck_rw_check_type(lck
, rlck
);
1675 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_TO_SH_CODE
) | DBG_FUNC_START
,
1676 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, 0, 0);
1678 lck_rw_ilk_lock(&lck
->lck_rw
);
1680 lck
->lck_rw
.lck_rw_shared_cnt
++;
1681 if (lck
->lck_rw
.lck_rw_want_upgrade
)
1682 lck
->lck_rw
.lck_rw_want_upgrade
= FALSE
;
1683 else if (lck
->lck_rw
.lck_rw_want_excl
)
1684 lck
->lck_rw
.lck_rw_want_excl
= FALSE
;
1686 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1688 if (lck
->lck_rw_deb
.thread
== THREAD_NULL
)
1689 panic("rw lock (0x%08X) not held\n",
1691 else if ( ((lck
->lck_rw_attr
& (LCK_RW_ATTR_DEBUG
|LCK_RW_ATTR_DIS_THREAD
)) == LCK_RW_ATTR_DEBUG
)
1692 && (lck
->lck_rw_deb
.thread
!= current_thread()))
1693 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1694 rlck
, current_thread(), lck
->lck_rw_deb
.thread
);
1696 lck
->lck_rw_deb
.thread
= THREAD_NULL
;
1698 if (lck
->lck_rw
.lck_rw_waiting
) {
1699 lck
->lck_rw
.lck_rw_waiting
= FALSE
;
1703 lck_rw_ilk_unlock(&lck
->lck_rw
);
1706 thread_wakeup((event_t
)(((unsigned int*)rlck
)+((sizeof(lck_rw_t
)-1)/sizeof(unsigned int))));
1708 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS
, LCK_RW_LCK_EX_TO_SH_CODE
) | DBG_FUNC_END
,
1709 (int)rlck
, lck
->lck_rw
.lck_rw_want_excl
, lck
->lck_rw
.lck_rw_want_upgrade
, lck
->lck_rw
.lck_rw_shared_cnt
, 0);
1715 * Routine: lck_rw_try_lock_exclusive_ext
1717 * Tries to get a write lock.
1719 * Returns FALSE if the lock is not held on return.
1723 lck_rw_try_lock_exclusive_ext(
1727 boolean_t lock_stat
;
1729 lck_rw_check_type(lck
, rlck
);
1731 lck_rw_ilk_lock(&lck
->lck_rw
);
1733 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1736 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1738 if (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
|| lck
->lck_rw
.lck_rw_shared_cnt
) {
1743 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1745 lck_rw_ilk_unlock(&lck
->lck_rw
);
1753 lck
->lck_rw
.lck_rw_want_excl
= TRUE
;
1754 lck
->lck_rw_deb
.pc_excl
= __builtin_return_address(0);
1755 if (LcksOpts
& enaLkExtStck
)
1756 lck_rw_ext_backtrace(lck
);
1757 lck
->lck_rw_deb
.thread
= current_thread();
1759 lck_rw_ilk_unlock(&lck
->lck_rw
);
1765 * Routine: lck_rw_try_lock_shared_ext
1767 * Tries to get a read lock.
1769 * Returns FALSE if the lock is not held on return.
1773 lck_rw_try_lock_shared_ext(
1777 boolean_t lock_stat
;
1779 lck_rw_check_type(lck
, rlck
);
1781 lck_rw_ilk_lock(&lck
->lck_rw
);
1783 lock_stat
= (lck
->lck_rw_attr
& LCK_RW_ATTR_STAT
) ? TRUE
: FALSE
;
1786 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_util_cnt
++;
1788 if (lck
->lck_rw
.lck_rw_want_excl
|| lck
->lck_rw
.lck_rw_want_upgrade
) {
1790 lck
->lck_rw_grp
->lck_grp_stat
.lck_grp_rw_stat
.lck_grp_rw_miss_cnt
++;
1792 lck_rw_ilk_unlock(&lck
->lck_rw
);
1796 lck
->lck_rw
.lck_rw_shared_cnt
++;
1798 lck_rw_ilk_unlock(&lck
->lck_rw
);
1808 if (lck
->lck_rw_deb
.type
!= RW_TAG
)
1809 panic("rw lock (0x%08X) not a rw lock type (0x%08X)\n",rlck
, lck
->lck_rw_deb
.type
);
1813 * The C portion of the mutex package. These routines are only invoked
1814 * if the optimized assembler routines can't do the work.
1818 * Forward definition
1821 void lck_mtx_ext_init(
1827 * Routine: mutex_alloc
1829 * Allocate a mutex for external users who cannot
1830 * hard-code the structure definition into their
1832 * For now just use kalloc, but a zone is probably
1841 if ((m
= (mutex_t
*)kalloc(sizeof(mutex_t
))) != 0)
1847 * Routine: mutex_free
1853 kfree((void *)m
, sizeof(mutex_t
));
1857 * Routine: lck_mtx_alloc_init
1865 if ((lck
= (lck_mtx_t
*)kalloc(sizeof(lck_mtx_t
))) != 0)
1866 lck_mtx_init(lck
, grp
, attr
);
1872 * Routine: lck_mtx_free
1878 lck_mtx_destroy(lck
, grp
);
1879 kfree((void *)lck
, sizeof(lck_mtx_t
));
1883 * Routine: lck_mtx_init
1890 lck_mtx_ext_t
*lck_ext
;
1891 lck_attr_t
*lck_attr
;
1893 if (attr
!= LCK_ATTR_NULL
)
1896 lck_attr
= &LockDefaultLckAttr
;
1898 if ((lck_attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
1899 if ((lck_ext
= (lck_mtx_ext_t
*)kalloc(sizeof(lck_mtx_ext_t
))) != 0) {
1900 lck_mtx_ext_init(lck_ext
, grp
, lck_attr
);
1901 lck
->lck_mtx_tag
= LCK_MTX_TAG_INDIRECT
;
1902 lck
->lck_mtx_ptr
= lck_ext
;
1905 lck
->lck_mtx_data
= 0;
1906 lck
->lck_mtx_waiters
= 0;
1907 lck
->lck_mtx_pri
= 0;
1909 lck_grp_reference(grp
);
1910 lck_grp_lckcnt_incr(grp
, LCK_TYPE_MTX
);
1914 * Routine: lck_mtx_ext_init
1922 bzero((void *)lck
, sizeof(lck_mtx_ext_t
));
1924 if ((attr
->lck_attr_val
) & LCK_ATTR_DEBUG
) {
1925 lck
->lck_mtx_deb
.type
= MUTEX_TAG
;
1926 lck
->lck_mtx_attr
|= LCK_MTX_ATTR_DEBUG
;
1929 lck
->lck_mtx_grp
= grp
;
1931 if (grp
->lck_grp_attr
& LCK_GRP_ATTR_STAT
)
1932 lck
->lck_mtx_attr
|= LCK_MTX_ATTR_STAT
;
1936 * Routine: lck_mtx_destroy
1942 boolean_t lck_is_indirect
;
1944 if (lck
->lck_mtx_tag
== LCK_MTX_TAG_DESTROYED
)
1946 lck_is_indirect
= (lck
->lck_mtx_tag
== LCK_MTX_TAG_INDIRECT
);
1947 lck
->lck_mtx_tag
= LCK_MTX_TAG_DESTROYED
;
1948 if (lck_is_indirect
)
1949 kfree((void *)lck
->lck_mtx_ptr
, sizeof(lck_mtx_ext_t
));
1951 lck_grp_lckcnt_decr(grp
, LCK_TYPE_MTX
);
1952 lck_grp_deallocate(grp
);
1959 * Routines to print out simple_locks and mutexes in a nicely-formatted
1963 char *simple_lock_labels
= "ENTRY ILK THREAD DURATION CALLER";
1964 char *mutex_labels
= "ENTRY LOCKED WAITERS THREAD CALLER";
1966 void db_print_simple_lock(
1967 simple_lock_t addr
);
1969 void db_print_mutex(
1973 db_show_one_simple_lock (
1975 boolean_t have_addr
,
1979 simple_lock_t saddr
= (simple_lock_t
)addr
;
1981 if (saddr
== (simple_lock_t
)0 || !have_addr
) {
1982 db_error ("No simple_lock\n");
1985 else if (saddr
->lock_type
!= USLOCK_TAG
)
1986 db_error ("Not a simple_lock\n");
1987 #endif /* USLOCK_DEBUG */
1989 db_printf ("%s\n", simple_lock_labels
);
1990 db_print_simple_lock (saddr
);
1994 db_print_simple_lock (
1998 db_printf ("%08x %3d", addr
, *hw_lock_addr(addr
->interlock
));
2000 db_printf (" %08x", addr
->debug
.lock_thread
);
2001 db_printf (" %08x ", addr
->debug
.duration
[1]);
2002 db_printsym ((int)addr
->debug
.lock_pc
, DB_STGY_ANY
);
2003 #endif /* USLOCK_DEBUG */
2010 boolean_t have_addr
,
2014 mutex_t
* maddr
= (mutex_t
*)addr
;
2016 if (maddr
== (mutex_t
*)0 || !have_addr
)
2017 db_error ("No mutex\n");
2019 else if (maddr
->lck_mtx_deb
.type
!= MUTEX_TAG
)
2020 db_error ("Not a mutex\n");
2021 #endif /* MACH_LDEBUG */
2023 db_printf ("%s\n", mutex_labels
);
2024 db_print_mutex (maddr
);
2031 db_printf ("%08x %6d %7d",
2032 addr
, *addr
, addr
->lck_mtx
.lck_mtx_waiters
);
2034 db_printf (" %08x ", addr
->lck_mtx_deb
.thread
);
2035 db_printsym (addr
->lck_mtx_deb
.stack
[0], DB_STGY_ANY
);
2036 #endif /* MACH_LDEBUG */
2044 db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
2045 lock
->lck_rw
.lck_rw_shared_cnt
,
2046 lock
->lck_rw
.lck_rw_want_upgrade
? "" : "!",
2047 lock
->lck_rw
.lck_rw_want_excl
? "" : "!");
2048 db_printf("%swaiting\n",
2049 lock
->lck_rw
.lck_rw_waiting
? "" : "!");
2050 db_printf("%sInterlock\n",
2051 lock
->lck_rw
.lck_rw_interlock
? "" : "!");
2054 #endif /* MACH_KDB */