]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/locks_ppc.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / ppc / locks_ppc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 * File: kern/lock.c
53 * Author: Avadis Tevanian, Jr., Michael Wayne Young
54 * Date: 1985
55 *
56 * Locking primitives implementation
57 */
58
59 #include <mach_kdb.h>
60 #include <mach_ldebug.h>
61
62 #include <kern/kalloc.h>
63 #include <kern/lock.h>
64 #include <kern/locks.h>
65 #include <kern/misc_protos.h>
66 #include <kern/thread.h>
67 #include <kern/processor.h>
68 #include <kern/sched_prim.h>
69 #include <kern/xpr.h>
70 #include <kern/debug.h>
71 #include <string.h>
72
73 #if MACH_KDB
74 #include <ddb/db_command.h>
75 #include <ddb/db_output.h>
76 #include <ddb/db_sym.h>
77 #include <ddb/db_print.h>
78 #endif /* MACH_KDB */
79
80 #ifdef __ppc__
81 #include <ppc/Firmware.h>
82 #endif
83
84 #include <sys/kdebug.h>
85
86 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
87 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
88 #define LCK_RW_LCK_SHARED_CODE 0x102
89 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
90 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
91 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
92
93
94 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
95
96 unsigned int lock_wait_time[2] = { (unsigned int)-1, 0 } ;
97
98 /* Forwards */
99
100
101 #if USLOCK_DEBUG
102 /*
103 * Perform simple lock checks.
104 */
105 int uslock_check = 1;
106 int max_lock_loops = 100000000;
107 decl_simple_lock_data(extern , printf_lock)
108 decl_simple_lock_data(extern , panic_lock)
109 #if MACH_KDB
110 decl_simple_lock_data(extern , kdb_lock)
111 #endif /* MACH_KDB */
112 #endif /* USLOCK_DEBUG */
113
114
115 /*
116 * We often want to know the addresses of the callers
117 * of the various lock routines. However, this information
118 * is only used for debugging and statistics.
119 */
120 typedef void *pc_t;
121 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
122 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
123 #if ANY_LOCK_DEBUG
124 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
125 #else /* ANY_LOCK_DEBUG */
126 #ifdef lint
127 /*
128 * Eliminate lint complaints about unused local pc variables.
129 */
130 #define OBTAIN_PC(pc,l) ++pc
131 #else /* lint */
132 #define OBTAIN_PC(pc,l)
133 #endif /* lint */
134 #endif /* USLOCK_DEBUG */
135
136
137 /*
138 * Portable lock package implementation of usimple_locks.
139 */
140
141 #if USLOCK_DEBUG
142 #define USLDBG(stmt) stmt
143 void usld_lock_init(usimple_lock_t, unsigned short);
144 void usld_lock_pre(usimple_lock_t, pc_t);
145 void usld_lock_post(usimple_lock_t, pc_t);
146 void usld_unlock(usimple_lock_t, pc_t);
147 void usld_lock_try_pre(usimple_lock_t, pc_t);
148 void usld_lock_try_post(usimple_lock_t, pc_t);
149 int usld_lock_common_checks(usimple_lock_t, char *);
150 #else /* USLOCK_DEBUG */
151 #define USLDBG(stmt)
152 #endif /* USLOCK_DEBUG */
153
154 /*
155 * Routine: lck_spin_alloc_init
156 */
157 lck_spin_t *
158 lck_spin_alloc_init(
159 lck_grp_t *grp,
160 lck_attr_t *attr) {
161 lck_spin_t *lck;
162
163 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
164 lck_spin_init(lck, grp, attr);
165
166 return(lck);
167 }
168
169 /*
170 * Routine: lck_spin_free
171 */
172 void
173 lck_spin_free(
174 lck_spin_t *lck,
175 lck_grp_t *grp) {
176 lck_spin_destroy(lck, grp);
177 kfree((void *)lck, sizeof(lck_spin_t));
178 }
179
180 /*
181 * Routine: lck_spin_init
182 */
183 void
184 lck_spin_init(
185 lck_spin_t *lck,
186 lck_grp_t *grp,
187 __unused lck_attr_t *attr) {
188
189 lck->interlock = 0;
190 lck_grp_reference(grp);
191 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
192 }
193
194 /*
195 * Routine: lck_spin_destroy
196 */
197 void
198 lck_spin_destroy(
199 lck_spin_t *lck,
200 lck_grp_t *grp) {
201 if (lck->interlock == LCK_SPIN_TAG_DESTROYED)
202 return;
203 lck->interlock = LCK_SPIN_TAG_DESTROYED;
204 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
205 lck_grp_deallocate(grp);
206 }
207
208 /*
209 * Initialize a usimple_lock.
210 *
211 * No change in preemption state.
212 */
213 void
214 usimple_lock_init(
215 usimple_lock_t l,
216 unsigned short tag)
217 {
218 #ifndef MACHINE_SIMPLE_LOCK
219 USLDBG(usld_lock_init(l, tag));
220 hw_lock_init(&l->interlock);
221 #else
222 simple_lock_init((simple_lock_t)l,tag);
223 #endif
224 }
225
226
227 /*
228 * Acquire a usimple_lock.
229 *
230 * Returns with preemption disabled. Note
231 * that the hw_lock routines are responsible for
232 * maintaining preemption state.
233 */
234 void
235 usimple_lock(
236 usimple_lock_t l)
237 {
238 #ifndef MACHINE_SIMPLE_LOCK
239 int i;
240 pc_t pc;
241 #if USLOCK_DEBUG
242 int count = 0;
243 #endif /* USLOCK_DEBUG */
244
245 OBTAIN_PC(pc, l);
246 USLDBG(usld_lock_pre(l, pc));
247
248 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
249 panic("simple lock deadlock detection - l=0x%08X, cpu=%d, ret=0x%08X", l, cpu_number(), pc);
250
251 USLDBG(usld_lock_post(l, pc));
252 #else
253 simple_lock((simple_lock_t)l);
254 #endif
255 }
256
257
258 /*
259 * Release a usimple_lock.
260 *
261 * Returns with preemption enabled. Note
262 * that the hw_lock routines are responsible for
263 * maintaining preemption state.
264 */
265 void
266 usimple_unlock(
267 usimple_lock_t l)
268 {
269 #ifndef MACHINE_SIMPLE_LOCK
270 pc_t pc;
271
272 OBTAIN_PC(pc, l);
273 USLDBG(usld_unlock(l, pc));
274 sync();
275 hw_lock_unlock(&l->interlock);
276 #else
277 simple_unlock_rwmb((simple_lock_t)l);
278 #endif
279 }
280
281
282 /*
283 * Conditionally acquire a usimple_lock.
284 *
285 * On success, returns with preemption disabled.
286 * On failure, returns with preemption in the same state
287 * as when first invoked. Note that the hw_lock routines
288 * are responsible for maintaining preemption state.
289 *
290 * XXX No stats are gathered on a miss; I preserved this
291 * behavior from the original assembly-language code, but
292 * doesn't it make sense to log misses? XXX
293 */
294 unsigned int
295 usimple_lock_try(
296 usimple_lock_t l)
297 {
298 #ifndef MACHINE_SIMPLE_LOCK
299 pc_t pc;
300 unsigned int success;
301
302 OBTAIN_PC(pc, l);
303 USLDBG(usld_lock_try_pre(l, pc));
304 if (success = hw_lock_try(&l->interlock)) {
305 USLDBG(usld_lock_try_post(l, pc));
306 }
307 return success;
308 #else
309 return(simple_lock_try((simple_lock_t)l));
310 #endif
311 }
312
313 #if USLOCK_DEBUG
314 /*
315 * States of a usimple_lock. The default when initializing
316 * a usimple_lock is setting it up for debug checking.
317 */
318 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
319 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
320 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
321 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
322 #define USLOCK_CHECKING(l) (uslock_check && \
323 ((l)->debug.state & USLOCK_CHECKED))
324
325 /*
326 * Trace activities of a particularly interesting lock.
327 */
328 void usl_trace(usimple_lock_t, int, pc_t, const char *);
329
330
331 /*
332 * Initialize the debugging information contained
333 * in a usimple_lock.
334 */
335 void
336 usld_lock_init(
337 usimple_lock_t l,
338 unsigned short tag)
339 {
340 if (l == USIMPLE_LOCK_NULL)
341 panic("lock initialization: null lock pointer");
342 l->lock_type = USLOCK_TAG;
343 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
344 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
345 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
346 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
347 l->debug.duration[0] = l->debug.duration[1] = 0;
348 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
349 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
350 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
351 }
352
353
354 /*
355 * These checks apply to all usimple_locks, not just
356 * those with USLOCK_CHECKED turned on.
357 */
358 int
359 usld_lock_common_checks(
360 usimple_lock_t l,
361 char *caller)
362 {
363 if (l == USIMPLE_LOCK_NULL)
364 panic("%s: null lock pointer", caller);
365 if (l->lock_type != USLOCK_TAG)
366 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
367 if (!(l->debug.state & USLOCK_INIT))
368 panic("%s: 0x%x is not an initialized lock",
369 caller, (integer_t) l);
370 return USLOCK_CHECKING(l);
371 }
372
373
374 /*
375 * Debug checks on a usimple_lock just before attempting
376 * to acquire it.
377 */
378 /* ARGSUSED */
379 void
380 usld_lock_pre(
381 usimple_lock_t l,
382 pc_t pc)
383 {
384 char *caller = "usimple_lock";
385
386
387 if (!usld_lock_common_checks(l, caller))
388 return;
389
390 /*
391 * Note that we have a weird case where we are getting a lock when we are]
392 * in the process of putting the system to sleep. We are running with no
393 * current threads, therefore we can't tell if we are trying to retake a lock
394 * we have or someone on the other processor has it. Therefore we just
395 * ignore this test if the locking thread is 0.
396 */
397
398 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
399 l->debug.lock_thread == (void *) current_thread()) {
400 printf("%s: lock 0x%x already locked (at 0x%x) by",
401 caller, (integer_t) l, l->debug.lock_pc);
402 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
403 l->debug.lock_thread, pc);
404 panic(caller);
405 }
406 mp_disable_preemption();
407 usl_trace(l, cpu_number(), pc, caller);
408 mp_enable_preemption();
409 }
410
411
412 /*
413 * Debug checks on a usimple_lock just after acquiring it.
414 *
415 * Pre-emption has been disabled at this point,
416 * so we are safe in using cpu_number.
417 */
418 void
419 usld_lock_post(
420 usimple_lock_t l,
421 pc_t pc)
422 {
423 register int mycpu;
424 char *caller = "successful usimple_lock";
425
426
427 if (!usld_lock_common_checks(l, caller))
428 return;
429
430 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
431 panic("%s: lock 0x%x became uninitialized",
432 caller, (integer_t) l);
433 if ((l->debug.state & USLOCK_TAKEN))
434 panic("%s: lock 0x%x became TAKEN by someone else",
435 caller, (integer_t) l);
436
437 mycpu = cpu_number();
438 l->debug.lock_thread = (void *)current_thread();
439 l->debug.state |= USLOCK_TAKEN;
440 l->debug.lock_pc = pc;
441 l->debug.lock_cpu = mycpu;
442
443 usl_trace(l, mycpu, pc, caller);
444 }
445
446
447 /*
448 * Debug checks on a usimple_lock just before
449 * releasing it. Note that the caller has not
450 * yet released the hardware lock.
451 *
452 * Preemption is still disabled, so there's
453 * no problem using cpu_number.
454 */
455 void
456 usld_unlock(
457 usimple_lock_t l,
458 pc_t pc)
459 {
460 register int mycpu;
461 char *caller = "usimple_unlock";
462
463
464 if (!usld_lock_common_checks(l, caller))
465 return;
466
467 mycpu = cpu_number();
468
469 if (!(l->debug.state & USLOCK_TAKEN))
470 panic("%s: lock 0x%x hasn't been taken",
471 caller, (integer_t) l);
472 if (l->debug.lock_thread != (void *) current_thread())
473 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
474 caller, (integer_t) l, l->debug.lock_thread);
475 if (l->debug.lock_cpu != mycpu) {
476 printf("%s: unlocking lock 0x%x on cpu 0x%x",
477 caller, (integer_t) l, mycpu);
478 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
479 panic(caller);
480 }
481 usl_trace(l, mycpu, pc, caller);
482
483 l->debug.unlock_thread = l->debug.lock_thread;
484 l->debug.lock_thread = INVALID_PC;
485 l->debug.state &= ~USLOCK_TAKEN;
486 l->debug.unlock_pc = pc;
487 l->debug.unlock_cpu = mycpu;
488 }
489
490
491 /*
492 * Debug checks on a usimple_lock just before
493 * attempting to acquire it.
494 *
495 * Preemption isn't guaranteed to be disabled.
496 */
497 void
498 usld_lock_try_pre(
499 usimple_lock_t l,
500 pc_t pc)
501 {
502 char *caller = "usimple_lock_try";
503
504 if (!usld_lock_common_checks(l, caller))
505 return;
506 mp_disable_preemption();
507 usl_trace(l, cpu_number(), pc, caller);
508 mp_enable_preemption();
509 }
510
511
512 /*
513 * Debug checks on a usimple_lock just after
514 * successfully attempting to acquire it.
515 *
516 * Preemption has been disabled by the
517 * lock acquisition attempt, so it's safe
518 * to use cpu_number.
519 */
520 void
521 usld_lock_try_post(
522 usimple_lock_t l,
523 pc_t pc)
524 {
525 register int mycpu;
526 char *caller = "successful usimple_lock_try";
527
528 if (!usld_lock_common_checks(l, caller))
529 return;
530
531 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
532 panic("%s: lock 0x%x became uninitialized",
533 caller, (integer_t) l);
534 if ((l->debug.state & USLOCK_TAKEN))
535 panic("%s: lock 0x%x became TAKEN by someone else",
536 caller, (integer_t) l);
537
538 mycpu = cpu_number();
539 l->debug.lock_thread = (void *) current_thread();
540 l->debug.state |= USLOCK_TAKEN;
541 l->debug.lock_pc = pc;
542 l->debug.lock_cpu = mycpu;
543
544 usl_trace(l, mycpu, pc, caller);
545 }
546
547
548 /*
549 * For very special cases, set traced_lock to point to a
550 * specific lock of interest. The result is a series of
551 * XPRs showing lock operations on that lock. The lock_seq
552 * value is used to show the order of those operations.
553 */
554 usimple_lock_t traced_lock;
555 unsigned int lock_seq;
556
557 void
558 usl_trace(
559 usimple_lock_t l,
560 int mycpu,
561 pc_t pc,
562 const char * op_name)
563 {
564 if (traced_lock == l) {
565 XPR(XPR_SLOCK,
566 "seq %d, cpu %d, %s @ %x\n",
567 (integer_t) lock_seq, (integer_t) mycpu,
568 (integer_t) op_name, (integer_t) pc, 0);
569 lock_seq++;
570 }
571 }
572
573
574 #endif /* USLOCK_DEBUG */
575
576 /*
577 * The C portion of the shared/exclusive locks package.
578 */
579
580 /*
581 * Forward definition
582 */
583
584 void lck_rw_lock_exclusive_gen(
585 lck_rw_t *lck);
586
587 lck_rw_type_t lck_rw_done_gen(
588 lck_rw_t *lck);
589
590 void
591 lck_rw_lock_shared_gen(
592 lck_rw_t *lck);
593
594 boolean_t
595 lck_rw_lock_shared_to_exclusive_gen(
596 lck_rw_t *lck);
597
598 void
599 lck_rw_lock_exclusive_to_shared_gen(
600 lck_rw_t *lck);
601
602 boolean_t
603 lck_rw_try_lock_exclusive_gen(
604 lck_rw_t *lck);
605
606 boolean_t
607 lck_rw_try_lock_shared_gen(
608 lck_rw_t *lck);
609
610 void lck_rw_ext_init(
611 lck_rw_ext_t *lck,
612 lck_grp_t *grp,
613 lck_attr_t *attr);
614
615 void lck_rw_ext_backtrace(
616 lck_rw_ext_t *lck);
617
618 void lck_rw_lock_exclusive_ext(
619 lck_rw_ext_t *lck,
620 lck_rw_t *rlck);
621
622 lck_rw_type_t lck_rw_done_ext(
623 lck_rw_ext_t *lck,
624 lck_rw_t *rlck);
625
626 void
627 lck_rw_lock_shared_ext(
628 lck_rw_ext_t *lck,
629 lck_rw_t *rlck);
630
631 boolean_t
632 lck_rw_lock_shared_to_exclusive_ext(
633 lck_rw_ext_t *lck,
634 lck_rw_t *rlck);
635
636 void
637 lck_rw_lock_exclusive_to_shared_ext(
638 lck_rw_ext_t *lck,
639 lck_rw_t *rlck);
640
641 boolean_t
642 lck_rw_try_lock_exclusive_ext(
643 lck_rw_ext_t *lck,
644 lck_rw_t *rlck);
645
646 boolean_t
647 lck_rw_try_lock_shared_ext(
648 lck_rw_ext_t *lck,
649 lck_rw_t *rlck);
650
651 void
652 lck_rw_ilk_lock(
653 lck_rw_t *lck);
654
655 void
656 lck_rw_ilk_unlock(
657 lck_rw_t *lck);
658
659 void
660 lck_rw_check_type(
661 lck_rw_ext_t *lck,
662 lck_rw_t *rlck);
663
664 /*
665 * Routine: lock_alloc
666 * Function:
667 * Allocate a lock for external users who cannot
668 * hard-code the structure definition into their
669 * objects.
670 * For now just use kalloc, but a zone is probably
671 * warranted.
672 */
673 lock_t *
674 lock_alloc(
675 boolean_t can_sleep,
676 __unused unsigned short tag,
677 __unused unsigned short tag1)
678 {
679 lock_t *lck;
680
681 if ((lck = (lock_t *)kalloc(sizeof(lock_t))) != 0)
682 lock_init(lck, can_sleep, tag, tag1);
683 return(lck);
684 }
685
686 /*
687 * Routine: lock_init
688 * Function:
689 * Initialize a lock; required before use.
690 * Note that clients declare the "struct lock"
691 * variables and then initialize them, rather
692 * than getting a new one from this module.
693 */
694 void
695 lock_init(
696 lock_t *lck,
697 boolean_t can_sleep,
698 __unused unsigned short tag,
699 __unused unsigned short tag1)
700 {
701 if (!can_sleep)
702 panic("lock_init: sleep mode must be set to TRUE\n");
703
704 (void) memset((void *) lck, 0, sizeof(lock_t));
705 #if MACH_LDEBUG
706 lck->lck_rw_deb.type = RW_TAG;
707 lck->lck_rw_attr |= (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD|LCK_RW_ATTR_DIS_MYLOCK);
708 #endif
709
710 }
711
712
713 /*
714 * Routine: lock_free
715 * Function:
716 * Free a lock allocated for external users.
717 * For now just use kfree, but a zone is probably
718 * warranted.
719 */
720 void
721 lock_free(
722 lock_t *lck)
723 {
724 kfree((void *)lck, sizeof(lock_t));
725 }
726
727 #if MACH_LDEBUG
728 void
729 lock_write(
730 lock_t *lck)
731 {
732 lck_rw_lock_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
733 }
734
735 void
736 lock_done(
737 lock_t *lck)
738 {
739 (void)lck_rw_done_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
740 }
741
742 void
743 lock_read(
744 lock_t *lck)
745 {
746 lck_rw_lock_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
747 }
748
749 boolean_t
750 lock_read_to_write(
751 lock_t *lck)
752 {
753 return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck));
754 }
755
756 void
757 lock_write_to_read(
758 register lock_t *lck)
759 {
760 lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
761 }
762 #endif
763
764 /*
765 * Routine: lck_rw_alloc_init
766 */
767 lck_rw_t *
768 lck_rw_alloc_init(
769 lck_grp_t *grp,
770 lck_attr_t *attr) {
771 lck_rw_t *lck;
772
773 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
774 lck_rw_init(lck, grp, attr);
775
776 return(lck);
777 }
778
779 /*
780 * Routine: lck_rw_free
781 */
782 void
783 lck_rw_free(
784 lck_rw_t *lck,
785 lck_grp_t *grp) {
786 lck_rw_destroy(lck, grp);
787 kfree((void *)lck, sizeof(lck_rw_t));
788 }
789
790 /*
791 * Routine: lck_rw_init
792 */
793 void
794 lck_rw_init(
795 lck_rw_t *lck,
796 lck_grp_t *grp,
797 lck_attr_t *attr) {
798 lck_rw_ext_t *lck_ext;
799 lck_attr_t *lck_attr;
800
801 if (attr != LCK_ATTR_NULL)
802 lck_attr = attr;
803 else
804 lck_attr = &LockDefaultLckAttr;
805
806 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
807 if ((lck_ext = (lck_rw_ext_t *)kalloc(sizeof(lck_rw_ext_t))) != 0) {
808 lck_rw_ext_init(lck_ext, grp, lck_attr);
809 lck->lck_rw_tag = LCK_RW_TAG_INDIRECT;
810 lck->lck_rw_ptr = lck_ext;
811 }
812 } else {
813 (void) memset((void *) lck, 0, sizeof(lck_rw_t));
814 }
815
816 lck_grp_reference(grp);
817 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
818 }
819
820 /*
821 * Routine: lck_rw_ext_init
822 */
823 void
824 lck_rw_ext_init(
825 lck_rw_ext_t *lck,
826 lck_grp_t *grp,
827 lck_attr_t *attr) {
828
829 bzero((void *)lck, sizeof(lck_rw_ext_t));
830
831 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
832 lck->lck_rw_deb.type = RW_TAG;
833 lck->lck_rw_attr |= LCK_RW_ATTR_DEBUG;
834 }
835
836 lck->lck_rw_grp = grp;
837
838 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
839 lck->lck_rw_attr |= LCK_RW_ATTR_STAT;
840 }
841
842 /*
843 * Routine: lck_rw_destroy
844 */
845 void
846 lck_rw_destroy(
847 lck_rw_t *lck,
848 lck_grp_t *grp) {
849 boolean_t lck_is_indirect;
850
851 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
852 return;
853 lck_is_indirect = (lck->lck_rw_tag == LCK_RW_TAG_INDIRECT);
854 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
855 if (lck_is_indirect)
856 kfree((void *)lck->lck_rw_ptr, sizeof(lck_rw_ext_t));
857
858 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
859 lck_grp_deallocate(grp);
860 return;
861 }
862
863 /*
864 * Routine: lck_rw_lock
865 */
866 void
867 lck_rw_lock(
868 lck_rw_t *lck,
869 lck_rw_type_t lck_rw_type)
870 {
871 if (lck_rw_type == LCK_RW_TYPE_SHARED)
872 lck_rw_lock_shared(lck);
873 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
874 lck_rw_lock_exclusive(lck);
875 else
876 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type);
877 }
878
879
880 /*
881 * Routine: lck_rw_unlock
882 */
883 void
884 lck_rw_unlock(
885 lck_rw_t *lck,
886 lck_rw_type_t lck_rw_type)
887 {
888 if (lck_rw_type == LCK_RW_TYPE_SHARED)
889 lck_rw_unlock_shared(lck);
890 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
891 lck_rw_unlock_exclusive(lck);
892 else
893 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
894 }
895
896
897 /*
898 * Routine: lck_rw_unlock_shared
899 */
900 void
901 lck_rw_unlock_shared(
902 lck_rw_t *lck)
903 {
904 lck_rw_type_t ret;
905
906 ret = lck_rw_done(lck);
907
908 if (ret != LCK_RW_TYPE_SHARED)
909 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
910 }
911
912
913 /*
914 * Routine: lck_rw_unlock_exclusive
915 */
916 void
917 lck_rw_unlock_exclusive(
918 lck_rw_t *lck)
919 {
920 lck_rw_type_t ret;
921
922 ret = lck_rw_done(lck);
923
924 if (ret != LCK_RW_TYPE_EXCLUSIVE)
925 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
926 }
927
928
929 /*
930 * Routine: lck_rw_try_lock
931 */
932 boolean_t
933 lck_rw_try_lock(
934 lck_rw_t *lck,
935 lck_rw_type_t lck_rw_type)
936 {
937 if (lck_rw_type == LCK_RW_TYPE_SHARED)
938 return(lck_rw_try_lock_shared(lck));
939 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
940 return(lck_rw_try_lock_exclusive(lck));
941 else
942 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
943 return(FALSE);
944 }
945
946
947
948 /*
949 * Routine: lck_rw_lock_exclusive_gen
950 */
951 void
952 lck_rw_lock_exclusive_gen(
953 lck_rw_t *lck)
954 {
955 int i;
956 boolean_t lock_miss = FALSE;
957 wait_result_t res;
958
959 lck_rw_ilk_lock(lck);
960
961 /*
962 * Try to acquire the lck_rw_want_excl bit.
963 */
964 while (lck->lck_rw_want_excl) {
965 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
966
967 if (!lock_miss) {
968 lock_miss = TRUE;
969 }
970
971 i = lock_wait_time[1];
972 if (i != 0) {
973 lck_rw_ilk_unlock(lck);
974 while (--i != 0 && lck->lck_rw_want_excl)
975 continue;
976 lck_rw_ilk_lock(lck);
977 }
978
979 if (lck->lck_rw_want_excl) {
980 lck->lck_rw_waiting = TRUE;
981 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
982 if (res == THREAD_WAITING) {
983 lck_rw_ilk_unlock(lck);
984 res = thread_block(THREAD_CONTINUE_NULL);
985 lck_rw_ilk_lock(lck);
986 }
987 }
988 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
989 }
990 lck->lck_rw_want_excl = TRUE;
991
992 /* Wait for readers (and upgrades) to finish */
993
994 while ((lck->lck_rw_shared_cnt != 0) || lck->lck_rw_want_upgrade) {
995 if (!lock_miss) {
996 lock_miss = TRUE;
997 }
998
999 i = lock_wait_time[1];
1000
1001 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1002 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, i, 0);
1003
1004 if (i != 0) {
1005 lck_rw_ilk_unlock(lck);
1006 while (--i != 0 && (lck->lck_rw_shared_cnt != 0 ||
1007 lck->lck_rw_want_upgrade))
1008 continue;
1009 lck_rw_ilk_lock(lck);
1010 }
1011
1012 if (lck->lck_rw_shared_cnt != 0 || lck->lck_rw_want_upgrade) {
1013 lck->lck_rw_waiting = TRUE;
1014 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1015 if (res == THREAD_WAITING) {
1016 lck_rw_ilk_unlock(lck);
1017 res = thread_block(THREAD_CONTINUE_NULL);
1018 lck_rw_ilk_lock(lck);
1019 }
1020 }
1021 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1022 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, res, 0);
1023 }
1024
1025 lck_rw_ilk_unlock(lck);
1026 }
1027
1028
1029 /*
1030 * Routine: lck_rw_done_gen
1031 */
1032 lck_rw_type_t
1033 lck_rw_done_gen(
1034 lck_rw_t *lck)
1035 {
1036 boolean_t do_wakeup = FALSE;
1037 lck_rw_type_t lck_rw_type;
1038
1039
1040 lck_rw_ilk_lock(lck);
1041
1042 if (lck->lck_rw_shared_cnt != 0) {
1043 lck_rw_type = LCK_RW_TYPE_SHARED;
1044 lck->lck_rw_shared_cnt--;
1045 }
1046 else {
1047 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1048 if (lck->lck_rw_want_upgrade)
1049 lck->lck_rw_want_upgrade = FALSE;
1050 else
1051 lck->lck_rw_want_excl = FALSE;
1052 }
1053
1054 /*
1055 * There is no reason to wakeup a lck_rw_waiting thread
1056 * if the read-count is non-zero. Consider:
1057 * we must be dropping a read lock
1058 * threads are waiting only if one wants a write lock
1059 * if there are still readers, they can't proceed
1060 */
1061
1062 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1063 lck->lck_rw_waiting = FALSE;
1064 do_wakeup = TRUE;
1065 }
1066
1067 lck_rw_ilk_unlock(lck);
1068
1069 if (do_wakeup)
1070 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1071 return(lck_rw_type);
1072 }
1073
1074
1075 /*
1076 * Routine: lck_rw_lock_shared_gen
1077 */
1078 void
1079 lck_rw_lock_shared_gen(
1080 lck_rw_t *lck)
1081 {
1082 int i;
1083 wait_result_t res;
1084
1085 lck_rw_ilk_lock(lck);
1086
1087 while (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
1088 i = lock_wait_time[1];
1089
1090 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1091 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, i, 0);
1092
1093 if (i != 0) {
1094 lck_rw_ilk_unlock(lck);
1095 while (--i != 0 && (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade))
1096 continue;
1097 lck_rw_ilk_lock(lck);
1098 }
1099
1100 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
1101 lck->lck_rw_waiting = TRUE;
1102 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1103 if (res == THREAD_WAITING) {
1104 lck_rw_ilk_unlock(lck);
1105 res = thread_block(THREAD_CONTINUE_NULL);
1106 lck_rw_ilk_lock(lck);
1107 }
1108 }
1109 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1110 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, res, 0);
1111 }
1112
1113 lck->lck_rw_shared_cnt++;
1114
1115 lck_rw_ilk_unlock(lck);
1116 }
1117
1118
1119 /*
1120 * Routine: lck_rw_lock_shared_to_exclusive_gen
1121 * Function:
1122 * Improves a read-only lock to one with
1123 * write permission. If another reader has
1124 * already requested an upgrade to a write lock,
1125 * no lock is held upon return.
1126 *
1127 * Returns TRUE if the upgrade *failed*.
1128 */
1129
1130 boolean_t
1131 lck_rw_lock_shared_to_exclusive_gen(
1132 lck_rw_t *lck)
1133 {
1134 int i;
1135 boolean_t do_wakeup = FALSE;
1136 wait_result_t res;
1137
1138 lck_rw_ilk_lock(lck);
1139
1140 lck->lck_rw_shared_cnt--;
1141
1142 if (lck->lck_rw_want_upgrade) {
1143 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1144 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1145
1146 /*
1147 * Someone else has requested upgrade.
1148 * Since we've released a read lock, wake
1149 * him up.
1150 */
1151 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1152 lck->lck_rw_waiting = FALSE;
1153 do_wakeup = TRUE;
1154 }
1155
1156 lck_rw_ilk_unlock(lck);
1157
1158 if (do_wakeup)
1159 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1160
1161 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1162 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1163
1164 return (TRUE);
1165 }
1166
1167 lck->lck_rw_want_upgrade = TRUE;
1168
1169 while (lck->lck_rw_shared_cnt != 0) {
1170 i = lock_wait_time[1];
1171
1172 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1173 (int)lck, lck->lck_rw_shared_cnt, i, 0, 0);
1174
1175 if (i != 0) {
1176 lck_rw_ilk_unlock(lck);
1177 while (--i != 0 && lck->lck_rw_shared_cnt != 0)
1178 continue;
1179 lck_rw_ilk_lock(lck);
1180 }
1181
1182 if (lck->lck_rw_shared_cnt != 0) {
1183 lck->lck_rw_waiting = TRUE;
1184 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1185 if (res == THREAD_WAITING) {
1186 lck_rw_ilk_unlock(lck);
1187 res = thread_block(THREAD_CONTINUE_NULL);
1188 lck_rw_ilk_lock(lck);
1189 }
1190 }
1191 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1192 (int)lck, lck->lck_rw_shared_cnt, 0, 0, 0);
1193 }
1194
1195 lck_rw_ilk_unlock(lck);
1196
1197 return (FALSE);
1198 }
1199
1200 /*
1201 * Routine: lck_rw_lock_exclusive_to_shared_gen
1202 */
1203 void
1204 lck_rw_lock_exclusive_to_shared_gen(
1205 lck_rw_t *lck)
1206 {
1207 boolean_t do_wakeup = FALSE;
1208
1209 lck_rw_ilk_lock(lck);
1210
1211 lck->lck_rw_shared_cnt++;
1212 if (lck->lck_rw_want_upgrade)
1213 lck->lck_rw_want_upgrade = FALSE;
1214 else
1215 lck->lck_rw_want_excl = FALSE;
1216
1217 if (lck->lck_rw_waiting) {
1218 lck->lck_rw_waiting = FALSE;
1219 do_wakeup = TRUE;
1220 }
1221
1222 lck_rw_ilk_unlock(lck);
1223
1224 if (do_wakeup)
1225 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1226
1227 }
1228
1229
1230 /*
1231 * Routine: lck_rw_try_lock_exclusive_gen
1232 * Function:
1233 * Tries to get a write lock.
1234 *
1235 * Returns FALSE if the lock is not held on return.
1236 */
1237
1238 boolean_t
1239 lck_rw_try_lock_exclusive_gen(
1240 lck_rw_t *lck)
1241 {
1242 lck_rw_ilk_lock(lck);
1243
1244 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade || lck->lck_rw_shared_cnt) {
1245 /*
1246 * Can't get lock.
1247 */
1248 lck_rw_ilk_unlock(lck);
1249 return(FALSE);
1250 }
1251
1252 /*
1253 * Have lock.
1254 */
1255
1256 lck->lck_rw_want_excl = TRUE;
1257
1258 lck_rw_ilk_unlock(lck);
1259
1260 return(TRUE);
1261 }
1262
1263 /*
1264 * Routine: lck_rw_try_lock_shared_gen
1265 * Function:
1266 * Tries to get a read lock.
1267 *
1268 * Returns FALSE if the lock is not held on return.
1269 */
1270
1271 boolean_t
1272 lck_rw_try_lock_shared_gen(
1273 lck_rw_t *lck)
1274 {
1275 lck_rw_ilk_lock(lck);
1276
1277 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
1278 lck_rw_ilk_unlock(lck);
1279 return(FALSE);
1280 }
1281
1282 lck->lck_rw_shared_cnt++;
1283
1284 lck_rw_ilk_unlock(lck);
1285
1286 return(TRUE);
1287 }
1288
1289
1290 /*
1291 * Routine: lck_rw_ext_backtrace
1292 */
1293 void
1294 lck_rw_ext_backtrace(
1295 lck_rw_ext_t *lck)
1296 {
1297 unsigned int *stackptr, *stackptr_prev;
1298 unsigned int frame;
1299
1300 __asm__ volatile("mr %0,r1" : "=r" (stackptr));
1301 frame = 0;
1302 while (frame < LCK_FRAMES_MAX) {
1303 stackptr_prev = stackptr;
1304 stackptr = ( unsigned int *)*stackptr;
1305 if ( (((unsigned int)stackptr_prev) ^ ((unsigned int)stackptr)) > 8192)
1306 break;
1307 lck->lck_rw_deb.stack[frame] = *(stackptr+2);
1308 frame++;
1309 }
1310 while (frame < LCK_FRAMES_MAX) {
1311 lck->lck_rw_deb.stack[frame] = 0;
1312 frame++;
1313 }
1314 }
1315
1316
1317 /*
1318 * Routine: lck_rw_lock_exclusive_ext
1319 */
1320 void
1321 lck_rw_lock_exclusive_ext(
1322 lck_rw_ext_t *lck,
1323 lck_rw_t *rlck)
1324 {
1325 int i;
1326 wait_result_t res;
1327 boolean_t lock_miss = FALSE;
1328 boolean_t lock_wait = FALSE;
1329 boolean_t lock_stat;
1330
1331 lck_rw_check_type(lck, rlck);
1332
1333 if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_MYLOCK)) == LCK_RW_ATTR_DEBUG)
1334 && (lck->lck_rw_deb.thread == current_thread()))
1335 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1336
1337 lck_rw_ilk_lock(&lck->lck_rw);
1338
1339 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1340
1341 if (lock_stat)
1342 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1343
1344 /*
1345 * Try to acquire the lck_rw.lck_rw_want_excl bit.
1346 */
1347 while (lck->lck_rw.lck_rw_want_excl) {
1348 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)rlck, 0, 0, 0, 0);
1349
1350 if (lock_stat && !lock_miss) {
1351 lock_miss = TRUE;
1352 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1353 }
1354
1355 i = lock_wait_time[1];
1356 if (i != 0) {
1357 lck_rw_ilk_unlock(&lck->lck_rw);
1358 while (--i != 0 && lck->lck_rw.lck_rw_want_excl)
1359 continue;
1360 lck_rw_ilk_lock(&lck->lck_rw);
1361 }
1362
1363 if (lck->lck_rw.lck_rw_want_excl) {
1364 lck->lck_rw.lck_rw_waiting = TRUE;
1365 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1366 if (res == THREAD_WAITING) {
1367 if (lock_stat && !lock_wait) {
1368 lock_wait = TRUE;
1369 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1370 }
1371 lck_rw_ilk_unlock(&lck->lck_rw);
1372 res = thread_block(THREAD_CONTINUE_NULL);
1373 lck_rw_ilk_lock(&lck->lck_rw);
1374 }
1375 }
1376 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)rlck, res, 0, 0, 0);
1377 }
1378 lck->lck_rw.lck_rw_want_excl = TRUE;
1379
1380 /* Wait for readers (and upgrades) to finish */
1381
1382 while ((lck->lck_rw.lck_rw_shared_cnt != 0) || lck->lck_rw.lck_rw_want_upgrade) {
1383 i = lock_wait_time[1];
1384
1385 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1386 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1387
1388 if (lock_stat && !lock_miss) {
1389 lock_miss = TRUE;
1390 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1391 }
1392
1393 if (i != 0) {
1394 lck_rw_ilk_unlock(&lck->lck_rw);
1395 while (--i != 0 && (lck->lck_rw.lck_rw_shared_cnt != 0 ||
1396 lck->lck_rw.lck_rw_want_upgrade))
1397 continue;
1398 lck_rw_ilk_lock(&lck->lck_rw);
1399 }
1400
1401 if (lck->lck_rw.lck_rw_shared_cnt != 0 || lck->lck_rw.lck_rw_want_upgrade) {
1402 lck->lck_rw.lck_rw_waiting = TRUE;
1403 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1404 if (res == THREAD_WAITING) {
1405 if (lock_stat && !lock_wait) {
1406 lock_wait = TRUE;
1407 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1408 }
1409 lck_rw_ilk_unlock(&lck->lck_rw);
1410 res = thread_block(THREAD_CONTINUE_NULL);
1411 lck_rw_ilk_lock(&lck->lck_rw);
1412 }
1413 }
1414 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1415 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1416 }
1417
1418 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1419 if (LcksOpts & enaLkExtStck)
1420 lck_rw_ext_backtrace(lck);
1421 lck->lck_rw_deb.thread = current_thread();
1422
1423 lck_rw_ilk_unlock(&lck->lck_rw);
1424 }
1425
1426
1427 /*
1428 * Routine: lck_rw_done_ext
1429 */
1430 lck_rw_type_t
1431 lck_rw_done_ext(
1432 lck_rw_ext_t *lck,
1433 lck_rw_t *rlck)
1434 {
1435 boolean_t do_wakeup = FALSE;
1436 lck_rw_type_t lck_rw_type;
1437
1438
1439 lck_rw_check_type(lck, rlck);
1440
1441 lck_rw_ilk_lock(&lck->lck_rw);
1442
1443 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1444 lck_rw_type = LCK_RW_TYPE_SHARED;
1445 lck->lck_rw.lck_rw_shared_cnt--;
1446 }
1447 else {
1448 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1449 if (lck->lck_rw.lck_rw_want_upgrade)
1450 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1451 else if (lck->lck_rw.lck_rw_want_excl)
1452 lck->lck_rw.lck_rw_want_excl = FALSE;
1453 else
1454 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1455 rlck, lck->lck_rw);
1456 if (lck->lck_rw_deb.thread == THREAD_NULL)
1457 panic("rw lock (0x%08X) not held\n",
1458 rlck);
1459 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1460 && (lck->lck_rw_deb.thread != current_thread()))
1461 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1462 rlck, current_thread(), lck->lck_rw_deb.thread);
1463 lck->lck_rw_deb.thread = THREAD_NULL;
1464 }
1465
1466 if (lck->lck_rw_attr & LCK_RW_ATTR_DEBUG)
1467 lck->lck_rw_deb.pc_done = __builtin_return_address(0);
1468
1469 /*
1470 * There is no reason to wakeup a waiting thread
1471 * if the read-count is non-zero. Consider:
1472 * we must be dropping a read lock
1473 * threads are waiting only if one wants a write lock
1474 * if there are still readers, they can't proceed
1475 */
1476
1477 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1478 lck->lck_rw.lck_rw_waiting = FALSE;
1479 do_wakeup = TRUE;
1480 }
1481
1482 lck_rw_ilk_unlock(&lck->lck_rw);
1483
1484 if (do_wakeup)
1485 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1486 return(lck_rw_type);
1487 }
1488
1489
1490 /*
1491 * Routine: lck_rw_lock_shared_ext
1492 */
1493 void
1494 lck_rw_lock_shared_ext(
1495 lck_rw_ext_t *lck,
1496 lck_rw_t *rlck)
1497 {
1498 int i;
1499 wait_result_t res;
1500 boolean_t lock_miss = FALSE;
1501 boolean_t lock_wait = FALSE;
1502 boolean_t lock_stat;
1503
1504 lck_rw_check_type(lck, rlck);
1505
1506 lck_rw_ilk_lock(&lck->lck_rw);
1507
1508 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1509
1510 if (lock_stat)
1511 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1512
1513 while (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
1514 i = lock_wait_time[1];
1515
1516 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1517 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1518
1519 if (lock_stat && !lock_miss) {
1520 lock_miss = TRUE;
1521 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1522 }
1523
1524 if (i != 0) {
1525 lck_rw_ilk_unlock(&lck->lck_rw);
1526 while (--i != 0 && (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade))
1527 continue;
1528 lck_rw_ilk_lock(&lck->lck_rw);
1529 }
1530
1531 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
1532 lck->lck_rw.lck_rw_waiting = TRUE;
1533 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1534 if (res == THREAD_WAITING) {
1535 if (lock_stat && !lock_wait) {
1536 lock_wait = TRUE;
1537 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1538 }
1539 lck_rw_ilk_unlock(&lck->lck_rw);
1540 res = thread_block(THREAD_CONTINUE_NULL);
1541 lck_rw_ilk_lock(&lck->lck_rw);
1542 }
1543 }
1544 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1545 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1546 }
1547
1548 lck->lck_rw.lck_rw_shared_cnt++;
1549
1550 lck_rw_ilk_unlock(&lck->lck_rw);
1551 }
1552
1553
1554 /*
1555 * Routine: lck_rw_lock_shared_to_exclusive_ext
1556 * Function:
1557 * Improves a read-only lock to one with
1558 * write permission. If another reader has
1559 * already requested an upgrade to a write lock,
1560 * no lock is held upon return.
1561 *
1562 * Returns TRUE if the upgrade *failed*.
1563 */
1564
1565 boolean_t
1566 lck_rw_lock_shared_to_exclusive_ext(
1567 lck_rw_ext_t *lck,
1568 lck_rw_t *rlck)
1569 {
1570 int i;
1571 boolean_t do_wakeup = FALSE;
1572 wait_result_t res;
1573 boolean_t lock_miss = FALSE;
1574 boolean_t lock_wait = FALSE;
1575 boolean_t lock_stat;
1576
1577 lck_rw_check_type(lck, rlck);
1578
1579 if (lck->lck_rw_deb.thread == current_thread())
1580 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1581
1582 lck_rw_ilk_lock(&lck->lck_rw);
1583
1584 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1585
1586 if (lock_stat)
1587 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1588
1589 lck->lck_rw.lck_rw_shared_cnt--;
1590
1591 if (lck->lck_rw.lck_rw_want_upgrade) {
1592 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1593 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1594
1595 /*
1596 * Someone else has requested upgrade.
1597 * Since we've released a read lock, wake
1598 * him up.
1599 */
1600 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1601 lck->lck_rw.lck_rw_waiting = FALSE;
1602 do_wakeup = TRUE;
1603 }
1604
1605 lck_rw_ilk_unlock(&lck->lck_rw);
1606
1607 if (do_wakeup)
1608 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1609
1610 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1611 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1612
1613 return (TRUE);
1614 }
1615
1616 lck->lck_rw.lck_rw_want_upgrade = TRUE;
1617
1618 while (lck->lck_rw.lck_rw_shared_cnt != 0) {
1619 i = lock_wait_time[1];
1620
1621 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1622 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, i, 0, 0);
1623
1624 if (lock_stat && !lock_miss) {
1625 lock_miss = TRUE;
1626 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1627 }
1628
1629 if (i != 0) {
1630 lck_rw_ilk_unlock(&lck->lck_rw);
1631 while (--i != 0 && lck->lck_rw.lck_rw_shared_cnt != 0)
1632 continue;
1633 lck_rw_ilk_lock(&lck->lck_rw);
1634 }
1635
1636 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1637 lck->lck_rw.lck_rw_waiting = TRUE;
1638 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1639 if (res == THREAD_WAITING) {
1640 if (lock_stat && !lock_wait) {
1641 lock_wait = TRUE;
1642 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1643 }
1644 lck_rw_ilk_unlock(&lck->lck_rw);
1645 res = thread_block(THREAD_CONTINUE_NULL);
1646 lck_rw_ilk_lock(&lck->lck_rw);
1647 }
1648 }
1649 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1650 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, 0, 0, 0);
1651 }
1652
1653 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1654 if (LcksOpts & enaLkExtStck)
1655 lck_rw_ext_backtrace(lck);
1656 lck->lck_rw_deb.thread = current_thread();
1657
1658 lck_rw_ilk_unlock(&lck->lck_rw);
1659
1660 return (FALSE);
1661 }
1662
1663 /*
1664 * Routine: lck_rw_lock_exclusive_to_shared_ext
1665 */
1666 void
1667 lck_rw_lock_exclusive_to_shared_ext(
1668 lck_rw_ext_t *lck,
1669 lck_rw_t *rlck)
1670 {
1671 boolean_t do_wakeup = FALSE;
1672
1673 lck_rw_check_type(lck, rlck);
1674
1675 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1676 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1677
1678 lck_rw_ilk_lock(&lck->lck_rw);
1679
1680 lck->lck_rw.lck_rw_shared_cnt++;
1681 if (lck->lck_rw.lck_rw_want_upgrade)
1682 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1683 else if (lck->lck_rw.lck_rw_want_excl)
1684 lck->lck_rw.lck_rw_want_excl = FALSE;
1685 else
1686 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1687 rlck, lck->lck_rw);
1688 if (lck->lck_rw_deb.thread == THREAD_NULL)
1689 panic("rw lock (0x%08X) not held\n",
1690 rlck);
1691 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1692 && (lck->lck_rw_deb.thread != current_thread()))
1693 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1694 rlck, current_thread(), lck->lck_rw_deb.thread);
1695
1696 lck->lck_rw_deb.thread = THREAD_NULL;
1697
1698 if (lck->lck_rw.lck_rw_waiting) {
1699 lck->lck_rw.lck_rw_waiting = FALSE;
1700 do_wakeup = TRUE;
1701 }
1702
1703 lck_rw_ilk_unlock(&lck->lck_rw);
1704
1705 if (do_wakeup)
1706 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1707
1708 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1709 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, lck->lck_rw.lck_rw_shared_cnt, 0);
1710
1711 }
1712
1713
1714 /*
1715 * Routine: lck_rw_try_lock_exclusive_ext
1716 * Function:
1717 * Tries to get a write lock.
1718 *
1719 * Returns FALSE if the lock is not held on return.
1720 */
1721
1722 boolean_t
1723 lck_rw_try_lock_exclusive_ext(
1724 lck_rw_ext_t *lck,
1725 lck_rw_t *rlck)
1726 {
1727 boolean_t lock_stat;
1728
1729 lck_rw_check_type(lck, rlck);
1730
1731 lck_rw_ilk_lock(&lck->lck_rw);
1732
1733 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1734
1735 if (lock_stat)
1736 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1737
1738 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade || lck->lck_rw.lck_rw_shared_cnt) {
1739 /*
1740 * Can't get lock.
1741 */
1742 if (lock_stat) {
1743 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1744 }
1745 lck_rw_ilk_unlock(&lck->lck_rw);
1746 return(FALSE);
1747 }
1748
1749 /*
1750 * Have lock.
1751 */
1752
1753 lck->lck_rw.lck_rw_want_excl = TRUE;
1754 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1755 if (LcksOpts & enaLkExtStck)
1756 lck_rw_ext_backtrace(lck);
1757 lck->lck_rw_deb.thread = current_thread();
1758
1759 lck_rw_ilk_unlock(&lck->lck_rw);
1760
1761 return(TRUE);
1762 }
1763
1764 /*
1765 * Routine: lck_rw_try_lock_shared_ext
1766 * Function:
1767 * Tries to get a read lock.
1768 *
1769 * Returns FALSE if the lock is not held on return.
1770 */
1771
1772 boolean_t
1773 lck_rw_try_lock_shared_ext(
1774 lck_rw_ext_t *lck,
1775 lck_rw_t *rlck)
1776 {
1777 boolean_t lock_stat;
1778
1779 lck_rw_check_type(lck, rlck);
1780
1781 lck_rw_ilk_lock(&lck->lck_rw);
1782
1783 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1784
1785 if (lock_stat)
1786 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1787
1788 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
1789 if (lock_stat) {
1790 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1791 }
1792 lck_rw_ilk_unlock(&lck->lck_rw);
1793 return(FALSE);
1794 }
1795
1796 lck->lck_rw.lck_rw_shared_cnt++;
1797
1798 lck_rw_ilk_unlock(&lck->lck_rw);
1799
1800 return(TRUE);
1801 }
1802
1803 void
1804 lck_rw_check_type(
1805 lck_rw_ext_t *lck,
1806 lck_rw_t *rlck)
1807 {
1808 if (lck->lck_rw_deb.type != RW_TAG)
1809 panic("rw lock (0x%08X) not a rw lock type (0x%08X)\n",rlck, lck->lck_rw_deb.type);
1810 }
1811
1812 /*
1813 * The C portion of the mutex package. These routines are only invoked
1814 * if the optimized assembler routines can't do the work.
1815 */
1816
1817 /*
1818 * Forward definition
1819 */
1820
1821 void lck_mtx_ext_init(
1822 lck_mtx_ext_t *lck,
1823 lck_grp_t *grp,
1824 lck_attr_t *attr);
1825
1826 /*
1827 * Routine: mutex_alloc
1828 * Function:
1829 * Allocate a mutex for external users who cannot
1830 * hard-code the structure definition into their
1831 * objects.
1832 * For now just use kalloc, but a zone is probably
1833 * warranted.
1834 */
1835 mutex_t *
1836 mutex_alloc(
1837 unsigned short tag)
1838 {
1839 mutex_t *m;
1840
1841 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1842 mutex_init(m, tag);
1843 return(m);
1844 }
1845
1846 /*
1847 * Routine: mutex_free
1848 */
1849 void
1850 mutex_free(
1851 mutex_t *m)
1852 {
1853 kfree((void *)m, sizeof(mutex_t));
1854 }
1855
1856 /*
1857 * Routine: lck_mtx_alloc_init
1858 */
1859 lck_mtx_t *
1860 lck_mtx_alloc_init(
1861 lck_grp_t *grp,
1862 lck_attr_t *attr) {
1863 lck_mtx_t *lck;
1864
1865 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1866 lck_mtx_init(lck, grp, attr);
1867
1868 return(lck);
1869 }
1870
1871 /*
1872 * Routine: lck_mtx_free
1873 */
1874 void
1875 lck_mtx_free(
1876 lck_mtx_t *lck,
1877 lck_grp_t *grp) {
1878 lck_mtx_destroy(lck, grp);
1879 kfree((void *)lck, sizeof(lck_mtx_t));
1880 }
1881
1882 /*
1883 * Routine: lck_mtx_init
1884 */
1885 void
1886 lck_mtx_init(
1887 lck_mtx_t *lck,
1888 lck_grp_t *grp,
1889 lck_attr_t *attr) {
1890 lck_mtx_ext_t *lck_ext;
1891 lck_attr_t *lck_attr;
1892
1893 if (attr != LCK_ATTR_NULL)
1894 lck_attr = attr;
1895 else
1896 lck_attr = &LockDefaultLckAttr;
1897
1898 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1899 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1900 lck_mtx_ext_init(lck_ext, grp, lck_attr);
1901 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1902 lck->lck_mtx_ptr = lck_ext;
1903 }
1904 } else {
1905 lck->lck_mtx_data = 0;
1906 lck->lck_mtx_waiters = 0;
1907 lck->lck_mtx_pri = 0;
1908 }
1909 lck_grp_reference(grp);
1910 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1911 }
1912
1913 /*
1914 * Routine: lck_mtx_ext_init
1915 */
1916 void
1917 lck_mtx_ext_init(
1918 lck_mtx_ext_t *lck,
1919 lck_grp_t *grp,
1920 lck_attr_t *attr) {
1921
1922 bzero((void *)lck, sizeof(lck_mtx_ext_t));
1923
1924 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1925 lck->lck_mtx_deb.type = MUTEX_TAG;
1926 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1927 }
1928
1929 lck->lck_mtx_grp = grp;
1930
1931 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
1932 lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
1933 }
1934
1935 /*
1936 * Routine: lck_mtx_destroy
1937 */
1938 void
1939 lck_mtx_destroy(
1940 lck_mtx_t *lck,
1941 lck_grp_t *grp) {
1942 boolean_t lck_is_indirect;
1943
1944 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1945 return;
1946 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1947 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1948 if (lck_is_indirect)
1949 kfree((void *)lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1950
1951 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1952 lck_grp_deallocate(grp);
1953 return;
1954 }
1955
1956
1957 #if MACH_KDB
1958 /*
1959 * Routines to print out simple_locks and mutexes in a nicely-formatted
1960 * fashion.
1961 */
1962
1963 char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1964 char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1965
1966 void db_print_simple_lock(
1967 simple_lock_t addr);
1968
1969 void db_print_mutex(
1970 mutex_t * addr);
1971
1972 void
1973 db_show_one_simple_lock (
1974 db_expr_t addr,
1975 boolean_t have_addr,
1976 db_expr_t count,
1977 char * modif)
1978 {
1979 simple_lock_t saddr = (simple_lock_t)addr;
1980
1981 if (saddr == (simple_lock_t)0 || !have_addr) {
1982 db_error ("No simple_lock\n");
1983 }
1984 #if USLOCK_DEBUG
1985 else if (saddr->lock_type != USLOCK_TAG)
1986 db_error ("Not a simple_lock\n");
1987 #endif /* USLOCK_DEBUG */
1988
1989 db_printf ("%s\n", simple_lock_labels);
1990 db_print_simple_lock (saddr);
1991 }
1992
1993 void
1994 db_print_simple_lock (
1995 simple_lock_t addr)
1996 {
1997
1998 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
1999 #if USLOCK_DEBUG
2000 db_printf (" %08x", addr->debug.lock_thread);
2001 db_printf (" %08x ", addr->debug.duration[1]);
2002 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
2003 #endif /* USLOCK_DEBUG */
2004 db_printf ("\n");
2005 }
2006
2007 void
2008 db_show_one_mutex (
2009 db_expr_t addr,
2010 boolean_t have_addr,
2011 db_expr_t count,
2012 char * modif)
2013 {
2014 mutex_t * maddr = (mutex_t *)addr;
2015
2016 if (maddr == (mutex_t *)0 || !have_addr)
2017 db_error ("No mutex\n");
2018 #if MACH_LDEBUG
2019 else if (maddr->lck_mtx_deb.type != MUTEX_TAG)
2020 db_error ("Not a mutex\n");
2021 #endif /* MACH_LDEBUG */
2022
2023 db_printf ("%s\n", mutex_labels);
2024 db_print_mutex (maddr);
2025 }
2026
2027 void
2028 db_print_mutex (
2029 mutex_t * addr)
2030 {
2031 db_printf ("%08x %6d %7d",
2032 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
2033 #if MACH_LDEBUG
2034 db_printf (" %08x ", addr->lck_mtx_deb.thread);
2035 db_printsym (addr->lck_mtx_deb.stack[0], DB_STGY_ANY);
2036 #endif /* MACH_LDEBUG */
2037 db_printf ("\n");
2038 }
2039
2040 void
2041 db_show_one_lock(
2042 lock_t *lock)
2043 {
2044 db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
2045 lock->lck_rw.lck_rw_shared_cnt,
2046 lock->lck_rw.lck_rw_want_upgrade ? "" : "!",
2047 lock->lck_rw.lck_rw_want_excl ? "" : "!");
2048 db_printf("%swaiting\n",
2049 lock->lck_rw.lck_rw_waiting ? "" : "!");
2050 db_printf("%sInterlock\n",
2051 lock->lck_rw.lck_rw_interlock ? "" : "!");
2052 }
2053
2054 #endif /* MACH_KDB */
2055