]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/locks_ppc.c
eebd348c47b7d208a34ceb9980b608392f8c397b
[apple/xnu.git] / osfmk / ppc / locks_ppc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 * File: kern/lock.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Locking primitives implementation
64 */
65
66 #include <mach_kdb.h>
67 #include <mach_ldebug.h>
68
69 #include <kern/kalloc.h>
70 #include <kern/lock.h>
71 #include <kern/locks.h>
72 #include <kern/misc_protos.h>
73 #include <kern/thread.h>
74 #include <kern/processor.h>
75 #include <kern/sched_prim.h>
76 #include <kern/xpr.h>
77 #include <kern/debug.h>
78 #include <string.h>
79
80 #if MACH_KDB
81 #include <ddb/db_command.h>
82 #include <ddb/db_output.h>
83 #include <ddb/db_sym.h>
84 #include <ddb/db_print.h>
85 #endif /* MACH_KDB */
86
87 #ifdef __ppc__
88 #include <ppc/Firmware.h>
89 #endif
90
91 #include <sys/kdebug.h>
92
93 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
94 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
95 #define LCK_RW_LCK_SHARED_CODE 0x102
96 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
97 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
98 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
99
100
101 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
102
103 unsigned int lock_wait_time[2] = { (unsigned int)-1, 0 } ;
104
105 /* Forwards */
106
107
108 #if USLOCK_DEBUG
109 /*
110 * Perform simple lock checks.
111 */
112 int uslock_check = 1;
113 int max_lock_loops = 100000000;
114 decl_simple_lock_data(extern , printf_lock)
115 decl_simple_lock_data(extern , panic_lock)
116 #if MACH_KDB
117 decl_simple_lock_data(extern , kdb_lock)
118 #endif /* MACH_KDB */
119 #endif /* USLOCK_DEBUG */
120
121
122 /*
123 * We often want to know the addresses of the callers
124 * of the various lock routines. However, this information
125 * is only used for debugging and statistics.
126 */
127 typedef void *pc_t;
128 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
129 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
130 #if ANY_LOCK_DEBUG
131 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
132 #else /* ANY_LOCK_DEBUG */
133 #ifdef lint
134 /*
135 * Eliminate lint complaints about unused local pc variables.
136 */
137 #define OBTAIN_PC(pc,l) ++pc
138 #else /* lint */
139 #define OBTAIN_PC(pc,l)
140 #endif /* lint */
141 #endif /* USLOCK_DEBUG */
142
143
144 /*
145 * Portable lock package implementation of usimple_locks.
146 */
147
148 #if USLOCK_DEBUG
149 #define USLDBG(stmt) stmt
150 void usld_lock_init(usimple_lock_t, unsigned short);
151 void usld_lock_pre(usimple_lock_t, pc_t);
152 void usld_lock_post(usimple_lock_t, pc_t);
153 void usld_unlock(usimple_lock_t, pc_t);
154 void usld_lock_try_pre(usimple_lock_t, pc_t);
155 void usld_lock_try_post(usimple_lock_t, pc_t);
156 int usld_lock_common_checks(usimple_lock_t, char *);
157 #else /* USLOCK_DEBUG */
158 #define USLDBG(stmt)
159 #endif /* USLOCK_DEBUG */
160
161 /*
162 * Routine: lck_spin_alloc_init
163 */
164 lck_spin_t *
165 lck_spin_alloc_init(
166 lck_grp_t *grp,
167 lck_attr_t *attr) {
168 lck_spin_t *lck;
169
170 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
171 lck_spin_init(lck, grp, attr);
172
173 return(lck);
174 }
175
176 /*
177 * Routine: lck_spin_free
178 */
179 void
180 lck_spin_free(
181 lck_spin_t *lck,
182 lck_grp_t *grp) {
183 lck_spin_destroy(lck, grp);
184 kfree((void *)lck, sizeof(lck_spin_t));
185 }
186
187 /*
188 * Routine: lck_spin_init
189 */
190 void
191 lck_spin_init(
192 lck_spin_t *lck,
193 lck_grp_t *grp,
194 __unused lck_attr_t *attr) {
195
196 lck->interlock = 0;
197 lck_grp_reference(grp);
198 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
199 }
200
201 /*
202 * Routine: lck_spin_destroy
203 */
204 void
205 lck_spin_destroy(
206 lck_spin_t *lck,
207 lck_grp_t *grp) {
208 if (lck->interlock == LCK_SPIN_TAG_DESTROYED)
209 return;
210 lck->interlock = LCK_SPIN_TAG_DESTROYED;
211 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
212 lck_grp_deallocate(grp);
213 }
214
215 /*
216 * Initialize a usimple_lock.
217 *
218 * No change in preemption state.
219 */
220 void
221 usimple_lock_init(
222 usimple_lock_t l,
223 unsigned short tag)
224 {
225 #ifndef MACHINE_SIMPLE_LOCK
226 USLDBG(usld_lock_init(l, tag));
227 hw_lock_init(&l->interlock);
228 #else
229 simple_lock_init((simple_lock_t)l,tag);
230 #endif
231 }
232
233
234 /*
235 * Acquire a usimple_lock.
236 *
237 * Returns with preemption disabled. Note
238 * that the hw_lock routines are responsible for
239 * maintaining preemption state.
240 */
241 void
242 usimple_lock(
243 usimple_lock_t l)
244 {
245 #ifndef MACHINE_SIMPLE_LOCK
246 int i;
247 pc_t pc;
248 #if USLOCK_DEBUG
249 int count = 0;
250 #endif /* USLOCK_DEBUG */
251
252 OBTAIN_PC(pc, l);
253 USLDBG(usld_lock_pre(l, pc));
254
255 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
256 panic("simple lock deadlock detection - l=0x%08X, cpu=%d, ret=0x%08X", l, cpu_number(), pc);
257
258 USLDBG(usld_lock_post(l, pc));
259 #else
260 simple_lock((simple_lock_t)l);
261 #endif
262 }
263
264
265 /*
266 * Release a usimple_lock.
267 *
268 * Returns with preemption enabled. Note
269 * that the hw_lock routines are responsible for
270 * maintaining preemption state.
271 */
272 void
273 usimple_unlock(
274 usimple_lock_t l)
275 {
276 #ifndef MACHINE_SIMPLE_LOCK
277 pc_t pc;
278
279 OBTAIN_PC(pc, l);
280 USLDBG(usld_unlock(l, pc));
281 sync();
282 hw_lock_unlock(&l->interlock);
283 #else
284 simple_unlock_rwmb((simple_lock_t)l);
285 #endif
286 }
287
288
289 /*
290 * Conditionally acquire a usimple_lock.
291 *
292 * On success, returns with preemption disabled.
293 * On failure, returns with preemption in the same state
294 * as when first invoked. Note that the hw_lock routines
295 * are responsible for maintaining preemption state.
296 *
297 * XXX No stats are gathered on a miss; I preserved this
298 * behavior from the original assembly-language code, but
299 * doesn't it make sense to log misses? XXX
300 */
301 unsigned int
302 usimple_lock_try(
303 usimple_lock_t l)
304 {
305 #ifndef MACHINE_SIMPLE_LOCK
306 pc_t pc;
307 unsigned int success;
308
309 OBTAIN_PC(pc, l);
310 USLDBG(usld_lock_try_pre(l, pc));
311 if (success = hw_lock_try(&l->interlock)) {
312 USLDBG(usld_lock_try_post(l, pc));
313 }
314 return success;
315 #else
316 return(simple_lock_try((simple_lock_t)l));
317 #endif
318 }
319
320 #if USLOCK_DEBUG
321 /*
322 * States of a usimple_lock. The default when initializing
323 * a usimple_lock is setting it up for debug checking.
324 */
325 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
326 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
327 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
328 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
329 #define USLOCK_CHECKING(l) (uslock_check && \
330 ((l)->debug.state & USLOCK_CHECKED))
331
332 /*
333 * Trace activities of a particularly interesting lock.
334 */
335 void usl_trace(usimple_lock_t, int, pc_t, const char *);
336
337
338 /*
339 * Initialize the debugging information contained
340 * in a usimple_lock.
341 */
342 void
343 usld_lock_init(
344 usimple_lock_t l,
345 unsigned short tag)
346 {
347 if (l == USIMPLE_LOCK_NULL)
348 panic("lock initialization: null lock pointer");
349 l->lock_type = USLOCK_TAG;
350 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
351 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
352 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
353 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
354 l->debug.duration[0] = l->debug.duration[1] = 0;
355 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
356 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
357 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
358 }
359
360
361 /*
362 * These checks apply to all usimple_locks, not just
363 * those with USLOCK_CHECKED turned on.
364 */
365 int
366 usld_lock_common_checks(
367 usimple_lock_t l,
368 char *caller)
369 {
370 if (l == USIMPLE_LOCK_NULL)
371 panic("%s: null lock pointer", caller);
372 if (l->lock_type != USLOCK_TAG)
373 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
374 if (!(l->debug.state & USLOCK_INIT))
375 panic("%s: 0x%x is not an initialized lock",
376 caller, (integer_t) l);
377 return USLOCK_CHECKING(l);
378 }
379
380
381 /*
382 * Debug checks on a usimple_lock just before attempting
383 * to acquire it.
384 */
385 /* ARGSUSED */
386 void
387 usld_lock_pre(
388 usimple_lock_t l,
389 pc_t pc)
390 {
391 char *caller = "usimple_lock";
392
393
394 if (!usld_lock_common_checks(l, caller))
395 return;
396
397 /*
398 * Note that we have a weird case where we are getting a lock when we are]
399 * in the process of putting the system to sleep. We are running with no
400 * current threads, therefore we can't tell if we are trying to retake a lock
401 * we have or someone on the other processor has it. Therefore we just
402 * ignore this test if the locking thread is 0.
403 */
404
405 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
406 l->debug.lock_thread == (void *) current_thread()) {
407 printf("%s: lock 0x%x already locked (at 0x%x) by",
408 caller, (integer_t) l, l->debug.lock_pc);
409 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
410 l->debug.lock_thread, pc);
411 panic(caller);
412 }
413 mp_disable_preemption();
414 usl_trace(l, cpu_number(), pc, caller);
415 mp_enable_preemption();
416 }
417
418
419 /*
420 * Debug checks on a usimple_lock just after acquiring it.
421 *
422 * Pre-emption has been disabled at this point,
423 * so we are safe in using cpu_number.
424 */
425 void
426 usld_lock_post(
427 usimple_lock_t l,
428 pc_t pc)
429 {
430 register int mycpu;
431 char *caller = "successful usimple_lock";
432
433
434 if (!usld_lock_common_checks(l, caller))
435 return;
436
437 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
438 panic("%s: lock 0x%x became uninitialized",
439 caller, (integer_t) l);
440 if ((l->debug.state & USLOCK_TAKEN))
441 panic("%s: lock 0x%x became TAKEN by someone else",
442 caller, (integer_t) l);
443
444 mycpu = cpu_number();
445 l->debug.lock_thread = (void *)current_thread();
446 l->debug.state |= USLOCK_TAKEN;
447 l->debug.lock_pc = pc;
448 l->debug.lock_cpu = mycpu;
449
450 usl_trace(l, mycpu, pc, caller);
451 }
452
453
454 /*
455 * Debug checks on a usimple_lock just before
456 * releasing it. Note that the caller has not
457 * yet released the hardware lock.
458 *
459 * Preemption is still disabled, so there's
460 * no problem using cpu_number.
461 */
462 void
463 usld_unlock(
464 usimple_lock_t l,
465 pc_t pc)
466 {
467 register int mycpu;
468 char *caller = "usimple_unlock";
469
470
471 if (!usld_lock_common_checks(l, caller))
472 return;
473
474 mycpu = cpu_number();
475
476 if (!(l->debug.state & USLOCK_TAKEN))
477 panic("%s: lock 0x%x hasn't been taken",
478 caller, (integer_t) l);
479 if (l->debug.lock_thread != (void *) current_thread())
480 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
481 caller, (integer_t) l, l->debug.lock_thread);
482 if (l->debug.lock_cpu != mycpu) {
483 printf("%s: unlocking lock 0x%x on cpu 0x%x",
484 caller, (integer_t) l, mycpu);
485 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
486 panic(caller);
487 }
488 usl_trace(l, mycpu, pc, caller);
489
490 l->debug.unlock_thread = l->debug.lock_thread;
491 l->debug.lock_thread = INVALID_PC;
492 l->debug.state &= ~USLOCK_TAKEN;
493 l->debug.unlock_pc = pc;
494 l->debug.unlock_cpu = mycpu;
495 }
496
497
498 /*
499 * Debug checks on a usimple_lock just before
500 * attempting to acquire it.
501 *
502 * Preemption isn't guaranteed to be disabled.
503 */
504 void
505 usld_lock_try_pre(
506 usimple_lock_t l,
507 pc_t pc)
508 {
509 char *caller = "usimple_lock_try";
510
511 if (!usld_lock_common_checks(l, caller))
512 return;
513 mp_disable_preemption();
514 usl_trace(l, cpu_number(), pc, caller);
515 mp_enable_preemption();
516 }
517
518
519 /*
520 * Debug checks on a usimple_lock just after
521 * successfully attempting to acquire it.
522 *
523 * Preemption has been disabled by the
524 * lock acquisition attempt, so it's safe
525 * to use cpu_number.
526 */
527 void
528 usld_lock_try_post(
529 usimple_lock_t l,
530 pc_t pc)
531 {
532 register int mycpu;
533 char *caller = "successful usimple_lock_try";
534
535 if (!usld_lock_common_checks(l, caller))
536 return;
537
538 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
539 panic("%s: lock 0x%x became uninitialized",
540 caller, (integer_t) l);
541 if ((l->debug.state & USLOCK_TAKEN))
542 panic("%s: lock 0x%x became TAKEN by someone else",
543 caller, (integer_t) l);
544
545 mycpu = cpu_number();
546 l->debug.lock_thread = (void *) current_thread();
547 l->debug.state |= USLOCK_TAKEN;
548 l->debug.lock_pc = pc;
549 l->debug.lock_cpu = mycpu;
550
551 usl_trace(l, mycpu, pc, caller);
552 }
553
554
555 /*
556 * For very special cases, set traced_lock to point to a
557 * specific lock of interest. The result is a series of
558 * XPRs showing lock operations on that lock. The lock_seq
559 * value is used to show the order of those operations.
560 */
561 usimple_lock_t traced_lock;
562 unsigned int lock_seq;
563
564 void
565 usl_trace(
566 usimple_lock_t l,
567 int mycpu,
568 pc_t pc,
569 const char * op_name)
570 {
571 if (traced_lock == l) {
572 XPR(XPR_SLOCK,
573 "seq %d, cpu %d, %s @ %x\n",
574 (integer_t) lock_seq, (integer_t) mycpu,
575 (integer_t) op_name, (integer_t) pc, 0);
576 lock_seq++;
577 }
578 }
579
580
581 #endif /* USLOCK_DEBUG */
582
583 /*
584 * The C portion of the shared/exclusive locks package.
585 */
586
587 /*
588 * Forward definition
589 */
590
591 void lck_rw_lock_exclusive_gen(
592 lck_rw_t *lck);
593
594 lck_rw_type_t lck_rw_done_gen(
595 lck_rw_t *lck);
596
597 void
598 lck_rw_lock_shared_gen(
599 lck_rw_t *lck);
600
601 boolean_t
602 lck_rw_lock_shared_to_exclusive_gen(
603 lck_rw_t *lck);
604
605 void
606 lck_rw_lock_exclusive_to_shared_gen(
607 lck_rw_t *lck);
608
609 boolean_t
610 lck_rw_try_lock_exclusive_gen(
611 lck_rw_t *lck);
612
613 boolean_t
614 lck_rw_try_lock_shared_gen(
615 lck_rw_t *lck);
616
617 void lck_rw_ext_init(
618 lck_rw_ext_t *lck,
619 lck_grp_t *grp,
620 lck_attr_t *attr);
621
622 void lck_rw_ext_backtrace(
623 lck_rw_ext_t *lck);
624
625 void lck_rw_lock_exclusive_ext(
626 lck_rw_ext_t *lck,
627 lck_rw_t *rlck);
628
629 lck_rw_type_t lck_rw_done_ext(
630 lck_rw_ext_t *lck,
631 lck_rw_t *rlck);
632
633 void
634 lck_rw_lock_shared_ext(
635 lck_rw_ext_t *lck,
636 lck_rw_t *rlck);
637
638 boolean_t
639 lck_rw_lock_shared_to_exclusive_ext(
640 lck_rw_ext_t *lck,
641 lck_rw_t *rlck);
642
643 void
644 lck_rw_lock_exclusive_to_shared_ext(
645 lck_rw_ext_t *lck,
646 lck_rw_t *rlck);
647
648 boolean_t
649 lck_rw_try_lock_exclusive_ext(
650 lck_rw_ext_t *lck,
651 lck_rw_t *rlck);
652
653 boolean_t
654 lck_rw_try_lock_shared_ext(
655 lck_rw_ext_t *lck,
656 lck_rw_t *rlck);
657
658 void
659 lck_rw_ilk_lock(
660 lck_rw_t *lck);
661
662 void
663 lck_rw_ilk_unlock(
664 lck_rw_t *lck);
665
666 void
667 lck_rw_check_type(
668 lck_rw_ext_t *lck,
669 lck_rw_t *rlck);
670
671 /*
672 * Routine: lock_alloc
673 * Function:
674 * Allocate a lock for external users who cannot
675 * hard-code the structure definition into their
676 * objects.
677 * For now just use kalloc, but a zone is probably
678 * warranted.
679 */
680 lock_t *
681 lock_alloc(
682 boolean_t can_sleep,
683 __unused unsigned short tag,
684 __unused unsigned short tag1)
685 {
686 lock_t *lck;
687
688 if ((lck = (lock_t *)kalloc(sizeof(lock_t))) != 0)
689 lock_init(lck, can_sleep, tag, tag1);
690 return(lck);
691 }
692
693 /*
694 * Routine: lock_init
695 * Function:
696 * Initialize a lock; required before use.
697 * Note that clients declare the "struct lock"
698 * variables and then initialize them, rather
699 * than getting a new one from this module.
700 */
701 void
702 lock_init(
703 lock_t *lck,
704 boolean_t can_sleep,
705 __unused unsigned short tag,
706 __unused unsigned short tag1)
707 {
708 if (!can_sleep)
709 panic("lock_init: sleep mode must be set to TRUE\n");
710
711 (void) memset((void *) lck, 0, sizeof(lock_t));
712 #if MACH_LDEBUG
713 lck->lck_rw_deb.type = RW_TAG;
714 lck->lck_rw_attr |= (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD|LCK_RW_ATTR_DIS_MYLOCK);
715 #endif
716
717 }
718
719
720 /*
721 * Routine: lock_free
722 * Function:
723 * Free a lock allocated for external users.
724 * For now just use kfree, but a zone is probably
725 * warranted.
726 */
727 void
728 lock_free(
729 lock_t *lck)
730 {
731 kfree((void *)lck, sizeof(lock_t));
732 }
733
734 #if MACH_LDEBUG
735 void
736 lock_write(
737 lock_t *lck)
738 {
739 lck_rw_lock_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
740 }
741
742 void
743 lock_done(
744 lock_t *lck)
745 {
746 (void)lck_rw_done_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
747 }
748
749 void
750 lock_read(
751 lock_t *lck)
752 {
753 lck_rw_lock_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
754 }
755
756 boolean_t
757 lock_read_to_write(
758 lock_t *lck)
759 {
760 return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck));
761 }
762
763 void
764 lock_write_to_read(
765 register lock_t *lck)
766 {
767 lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
768 }
769 #endif
770
771 /*
772 * Routine: lck_rw_alloc_init
773 */
774 lck_rw_t *
775 lck_rw_alloc_init(
776 lck_grp_t *grp,
777 lck_attr_t *attr) {
778 lck_rw_t *lck;
779
780 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
781 lck_rw_init(lck, grp, attr);
782
783 return(lck);
784 }
785
786 /*
787 * Routine: lck_rw_free
788 */
789 void
790 lck_rw_free(
791 lck_rw_t *lck,
792 lck_grp_t *grp) {
793 lck_rw_destroy(lck, grp);
794 kfree((void *)lck, sizeof(lck_rw_t));
795 }
796
797 /*
798 * Routine: lck_rw_init
799 */
800 void
801 lck_rw_init(
802 lck_rw_t *lck,
803 lck_grp_t *grp,
804 lck_attr_t *attr) {
805 lck_rw_ext_t *lck_ext;
806 lck_attr_t *lck_attr;
807
808 if (attr != LCK_ATTR_NULL)
809 lck_attr = attr;
810 else
811 lck_attr = &LockDefaultLckAttr;
812
813 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
814 if ((lck_ext = (lck_rw_ext_t *)kalloc(sizeof(lck_rw_ext_t))) != 0) {
815 lck_rw_ext_init(lck_ext, grp, lck_attr);
816 lck->lck_rw_tag = LCK_RW_TAG_INDIRECT;
817 lck->lck_rw_ptr = lck_ext;
818 }
819 } else {
820 (void) memset((void *) lck, 0, sizeof(lck_rw_t));
821 }
822
823 lck_grp_reference(grp);
824 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
825 }
826
827 /*
828 * Routine: lck_rw_ext_init
829 */
830 void
831 lck_rw_ext_init(
832 lck_rw_ext_t *lck,
833 lck_grp_t *grp,
834 lck_attr_t *attr) {
835
836 bzero((void *)lck, sizeof(lck_rw_ext_t));
837
838 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
839 lck->lck_rw_deb.type = RW_TAG;
840 lck->lck_rw_attr |= LCK_RW_ATTR_DEBUG;
841 }
842
843 lck->lck_rw_grp = grp;
844
845 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
846 lck->lck_rw_attr |= LCK_RW_ATTR_STAT;
847 }
848
849 /*
850 * Routine: lck_rw_destroy
851 */
852 void
853 lck_rw_destroy(
854 lck_rw_t *lck,
855 lck_grp_t *grp) {
856 boolean_t lck_is_indirect;
857
858 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
859 return;
860 lck_is_indirect = (lck->lck_rw_tag == LCK_RW_TAG_INDIRECT);
861 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
862 if (lck_is_indirect)
863 kfree((void *)lck->lck_rw_ptr, sizeof(lck_rw_ext_t));
864
865 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
866 lck_grp_deallocate(grp);
867 return;
868 }
869
870 /*
871 * Routine: lck_rw_lock
872 */
873 void
874 lck_rw_lock(
875 lck_rw_t *lck,
876 lck_rw_type_t lck_rw_type)
877 {
878 if (lck_rw_type == LCK_RW_TYPE_SHARED)
879 lck_rw_lock_shared(lck);
880 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
881 lck_rw_lock_exclusive(lck);
882 else
883 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type);
884 }
885
886
887 /*
888 * Routine: lck_rw_unlock
889 */
890 void
891 lck_rw_unlock(
892 lck_rw_t *lck,
893 lck_rw_type_t lck_rw_type)
894 {
895 if (lck_rw_type == LCK_RW_TYPE_SHARED)
896 lck_rw_unlock_shared(lck);
897 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
898 lck_rw_unlock_exclusive(lck);
899 else
900 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
901 }
902
903
904 /*
905 * Routine: lck_rw_unlock_shared
906 */
907 void
908 lck_rw_unlock_shared(
909 lck_rw_t *lck)
910 {
911 lck_rw_type_t ret;
912
913 ret = lck_rw_done(lck);
914
915 if (ret != LCK_RW_TYPE_SHARED)
916 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
917 }
918
919
920 /*
921 * Routine: lck_rw_unlock_exclusive
922 */
923 void
924 lck_rw_unlock_exclusive(
925 lck_rw_t *lck)
926 {
927 lck_rw_type_t ret;
928
929 ret = lck_rw_done(lck);
930
931 if (ret != LCK_RW_TYPE_EXCLUSIVE)
932 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
933 }
934
935
936 /*
937 * Routine: lck_rw_try_lock
938 */
939 boolean_t
940 lck_rw_try_lock(
941 lck_rw_t *lck,
942 lck_rw_type_t lck_rw_type)
943 {
944 if (lck_rw_type == LCK_RW_TYPE_SHARED)
945 return(lck_rw_try_lock_shared(lck));
946 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
947 return(lck_rw_try_lock_exclusive(lck));
948 else
949 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
950 return(FALSE);
951 }
952
953
954
955 /*
956 * Routine: lck_rw_lock_exclusive_gen
957 */
958 void
959 lck_rw_lock_exclusive_gen(
960 lck_rw_t *lck)
961 {
962 int i;
963 boolean_t lock_miss = FALSE;
964 wait_result_t res;
965
966 lck_rw_ilk_lock(lck);
967
968 /*
969 * Try to acquire the lck_rw_want_excl bit.
970 */
971 while (lck->lck_rw_want_excl) {
972 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
973
974 if (!lock_miss) {
975 lock_miss = TRUE;
976 }
977
978 i = lock_wait_time[1];
979 if (i != 0) {
980 lck_rw_ilk_unlock(lck);
981 while (--i != 0 && lck->lck_rw_want_excl)
982 continue;
983 lck_rw_ilk_lock(lck);
984 }
985
986 if (lck->lck_rw_want_excl) {
987 lck->lck_rw_waiting = TRUE;
988 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
989 if (res == THREAD_WAITING) {
990 lck_rw_ilk_unlock(lck);
991 res = thread_block(THREAD_CONTINUE_NULL);
992 lck_rw_ilk_lock(lck);
993 }
994 }
995 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
996 }
997 lck->lck_rw_want_excl = TRUE;
998
999 /* Wait for readers (and upgrades) to finish */
1000
1001 while ((lck->lck_rw_shared_cnt != 0) || lck->lck_rw_want_upgrade) {
1002 if (!lock_miss) {
1003 lock_miss = TRUE;
1004 }
1005
1006 i = lock_wait_time[1];
1007
1008 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1009 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, i, 0);
1010
1011 if (i != 0) {
1012 lck_rw_ilk_unlock(lck);
1013 while (--i != 0 && (lck->lck_rw_shared_cnt != 0 ||
1014 lck->lck_rw_want_upgrade))
1015 continue;
1016 lck_rw_ilk_lock(lck);
1017 }
1018
1019 if (lck->lck_rw_shared_cnt != 0 || lck->lck_rw_want_upgrade) {
1020 lck->lck_rw_waiting = TRUE;
1021 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1022 if (res == THREAD_WAITING) {
1023 lck_rw_ilk_unlock(lck);
1024 res = thread_block(THREAD_CONTINUE_NULL);
1025 lck_rw_ilk_lock(lck);
1026 }
1027 }
1028 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1029 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, res, 0);
1030 }
1031
1032 lck_rw_ilk_unlock(lck);
1033 }
1034
1035
1036 /*
1037 * Routine: lck_rw_done_gen
1038 */
1039 lck_rw_type_t
1040 lck_rw_done_gen(
1041 lck_rw_t *lck)
1042 {
1043 boolean_t do_wakeup = FALSE;
1044 lck_rw_type_t lck_rw_type;
1045
1046
1047 lck_rw_ilk_lock(lck);
1048
1049 if (lck->lck_rw_shared_cnt != 0) {
1050 lck_rw_type = LCK_RW_TYPE_SHARED;
1051 lck->lck_rw_shared_cnt--;
1052 }
1053 else {
1054 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1055 if (lck->lck_rw_want_upgrade)
1056 lck->lck_rw_want_upgrade = FALSE;
1057 else
1058 lck->lck_rw_want_excl = FALSE;
1059 }
1060
1061 /*
1062 * There is no reason to wakeup a lck_rw_waiting thread
1063 * if the read-count is non-zero. Consider:
1064 * we must be dropping a read lock
1065 * threads are waiting only if one wants a write lock
1066 * if there are still readers, they can't proceed
1067 */
1068
1069 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1070 lck->lck_rw_waiting = FALSE;
1071 do_wakeup = TRUE;
1072 }
1073
1074 lck_rw_ilk_unlock(lck);
1075
1076 if (do_wakeup)
1077 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1078 return(lck_rw_type);
1079 }
1080
1081
1082 /*
1083 * Routine: lck_rw_lock_shared_gen
1084 */
1085 void
1086 lck_rw_lock_shared_gen(
1087 lck_rw_t *lck)
1088 {
1089 int i;
1090 wait_result_t res;
1091
1092 lck_rw_ilk_lock(lck);
1093
1094 while (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
1095 i = lock_wait_time[1];
1096
1097 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1098 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, i, 0);
1099
1100 if (i != 0) {
1101 lck_rw_ilk_unlock(lck);
1102 while (--i != 0 && (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade))
1103 continue;
1104 lck_rw_ilk_lock(lck);
1105 }
1106
1107 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
1108 lck->lck_rw_waiting = TRUE;
1109 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1110 if (res == THREAD_WAITING) {
1111 lck_rw_ilk_unlock(lck);
1112 res = thread_block(THREAD_CONTINUE_NULL);
1113 lck_rw_ilk_lock(lck);
1114 }
1115 }
1116 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1117 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, res, 0);
1118 }
1119
1120 lck->lck_rw_shared_cnt++;
1121
1122 lck_rw_ilk_unlock(lck);
1123 }
1124
1125
1126 /*
1127 * Routine: lck_rw_lock_shared_to_exclusive_gen
1128 * Function:
1129 * Improves a read-only lock to one with
1130 * write permission. If another reader has
1131 * already requested an upgrade to a write lock,
1132 * no lock is held upon return.
1133 *
1134 * Returns TRUE if the upgrade *failed*.
1135 */
1136
1137 boolean_t
1138 lck_rw_lock_shared_to_exclusive_gen(
1139 lck_rw_t *lck)
1140 {
1141 int i;
1142 boolean_t do_wakeup = FALSE;
1143 wait_result_t res;
1144
1145 lck_rw_ilk_lock(lck);
1146
1147 lck->lck_rw_shared_cnt--;
1148
1149 if (lck->lck_rw_want_upgrade) {
1150 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1151 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1152
1153 /*
1154 * Someone else has requested upgrade.
1155 * Since we've released a read lock, wake
1156 * him up.
1157 */
1158 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1159 lck->lck_rw_waiting = FALSE;
1160 do_wakeup = TRUE;
1161 }
1162
1163 lck_rw_ilk_unlock(lck);
1164
1165 if (do_wakeup)
1166 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1167
1168 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1169 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1170
1171 return (TRUE);
1172 }
1173
1174 lck->lck_rw_want_upgrade = TRUE;
1175
1176 while (lck->lck_rw_shared_cnt != 0) {
1177 i = lock_wait_time[1];
1178
1179 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1180 (int)lck, lck->lck_rw_shared_cnt, i, 0, 0);
1181
1182 if (i != 0) {
1183 lck_rw_ilk_unlock(lck);
1184 while (--i != 0 && lck->lck_rw_shared_cnt != 0)
1185 continue;
1186 lck_rw_ilk_lock(lck);
1187 }
1188
1189 if (lck->lck_rw_shared_cnt != 0) {
1190 lck->lck_rw_waiting = TRUE;
1191 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1192 if (res == THREAD_WAITING) {
1193 lck_rw_ilk_unlock(lck);
1194 res = thread_block(THREAD_CONTINUE_NULL);
1195 lck_rw_ilk_lock(lck);
1196 }
1197 }
1198 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1199 (int)lck, lck->lck_rw_shared_cnt, 0, 0, 0);
1200 }
1201
1202 lck_rw_ilk_unlock(lck);
1203
1204 return (FALSE);
1205 }
1206
1207 /*
1208 * Routine: lck_rw_lock_exclusive_to_shared_gen
1209 */
1210 void
1211 lck_rw_lock_exclusive_to_shared_gen(
1212 lck_rw_t *lck)
1213 {
1214 boolean_t do_wakeup = FALSE;
1215
1216 lck_rw_ilk_lock(lck);
1217
1218 lck->lck_rw_shared_cnt++;
1219 if (lck->lck_rw_want_upgrade)
1220 lck->lck_rw_want_upgrade = FALSE;
1221 else
1222 lck->lck_rw_want_excl = FALSE;
1223
1224 if (lck->lck_rw_waiting) {
1225 lck->lck_rw_waiting = FALSE;
1226 do_wakeup = TRUE;
1227 }
1228
1229 lck_rw_ilk_unlock(lck);
1230
1231 if (do_wakeup)
1232 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1233
1234 }
1235
1236
1237 /*
1238 * Routine: lck_rw_try_lock_exclusive_gen
1239 * Function:
1240 * Tries to get a write lock.
1241 *
1242 * Returns FALSE if the lock is not held on return.
1243 */
1244
1245 boolean_t
1246 lck_rw_try_lock_exclusive_gen(
1247 lck_rw_t *lck)
1248 {
1249 lck_rw_ilk_lock(lck);
1250
1251 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade || lck->lck_rw_shared_cnt) {
1252 /*
1253 * Can't get lock.
1254 */
1255 lck_rw_ilk_unlock(lck);
1256 return(FALSE);
1257 }
1258
1259 /*
1260 * Have lock.
1261 */
1262
1263 lck->lck_rw_want_excl = TRUE;
1264
1265 lck_rw_ilk_unlock(lck);
1266
1267 return(TRUE);
1268 }
1269
1270 /*
1271 * Routine: lck_rw_try_lock_shared_gen
1272 * Function:
1273 * Tries to get a read lock.
1274 *
1275 * Returns FALSE if the lock is not held on return.
1276 */
1277
1278 boolean_t
1279 lck_rw_try_lock_shared_gen(
1280 lck_rw_t *lck)
1281 {
1282 lck_rw_ilk_lock(lck);
1283
1284 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) {
1285 lck_rw_ilk_unlock(lck);
1286 return(FALSE);
1287 }
1288
1289 lck->lck_rw_shared_cnt++;
1290
1291 lck_rw_ilk_unlock(lck);
1292
1293 return(TRUE);
1294 }
1295
1296
1297 /*
1298 * Routine: lck_rw_ext_backtrace
1299 */
1300 void
1301 lck_rw_ext_backtrace(
1302 lck_rw_ext_t *lck)
1303 {
1304 unsigned int *stackptr, *stackptr_prev;
1305 unsigned int frame;
1306
1307 __asm__ volatile("mr %0,r1" : "=r" (stackptr));
1308 frame = 0;
1309 while (frame < LCK_FRAMES_MAX) {
1310 stackptr_prev = stackptr;
1311 stackptr = ( unsigned int *)*stackptr;
1312 if ( (((unsigned int)stackptr_prev) ^ ((unsigned int)stackptr)) > 8192)
1313 break;
1314 lck->lck_rw_deb.stack[frame] = *(stackptr+2);
1315 frame++;
1316 }
1317 while (frame < LCK_FRAMES_MAX) {
1318 lck->lck_rw_deb.stack[frame] = 0;
1319 frame++;
1320 }
1321 }
1322
1323
1324 /*
1325 * Routine: lck_rw_lock_exclusive_ext
1326 */
1327 void
1328 lck_rw_lock_exclusive_ext(
1329 lck_rw_ext_t *lck,
1330 lck_rw_t *rlck)
1331 {
1332 int i;
1333 wait_result_t res;
1334 boolean_t lock_miss = FALSE;
1335 boolean_t lock_wait = FALSE;
1336 boolean_t lock_stat;
1337
1338 lck_rw_check_type(lck, rlck);
1339
1340 if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_MYLOCK)) == LCK_RW_ATTR_DEBUG)
1341 && (lck->lck_rw_deb.thread == current_thread()))
1342 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1343
1344 lck_rw_ilk_lock(&lck->lck_rw);
1345
1346 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1347
1348 if (lock_stat)
1349 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1350
1351 /*
1352 * Try to acquire the lck_rw.lck_rw_want_excl bit.
1353 */
1354 while (lck->lck_rw.lck_rw_want_excl) {
1355 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)rlck, 0, 0, 0, 0);
1356
1357 if (lock_stat && !lock_miss) {
1358 lock_miss = TRUE;
1359 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1360 }
1361
1362 i = lock_wait_time[1];
1363 if (i != 0) {
1364 lck_rw_ilk_unlock(&lck->lck_rw);
1365 while (--i != 0 && lck->lck_rw.lck_rw_want_excl)
1366 continue;
1367 lck_rw_ilk_lock(&lck->lck_rw);
1368 }
1369
1370 if (lck->lck_rw.lck_rw_want_excl) {
1371 lck->lck_rw.lck_rw_waiting = TRUE;
1372 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1373 if (res == THREAD_WAITING) {
1374 if (lock_stat && !lock_wait) {
1375 lock_wait = TRUE;
1376 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1377 }
1378 lck_rw_ilk_unlock(&lck->lck_rw);
1379 res = thread_block(THREAD_CONTINUE_NULL);
1380 lck_rw_ilk_lock(&lck->lck_rw);
1381 }
1382 }
1383 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)rlck, res, 0, 0, 0);
1384 }
1385 lck->lck_rw.lck_rw_want_excl = TRUE;
1386
1387 /* Wait for readers (and upgrades) to finish */
1388
1389 while ((lck->lck_rw.lck_rw_shared_cnt != 0) || lck->lck_rw.lck_rw_want_upgrade) {
1390 i = lock_wait_time[1];
1391
1392 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1393 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1394
1395 if (lock_stat && !lock_miss) {
1396 lock_miss = TRUE;
1397 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1398 }
1399
1400 if (i != 0) {
1401 lck_rw_ilk_unlock(&lck->lck_rw);
1402 while (--i != 0 && (lck->lck_rw.lck_rw_shared_cnt != 0 ||
1403 lck->lck_rw.lck_rw_want_upgrade))
1404 continue;
1405 lck_rw_ilk_lock(&lck->lck_rw);
1406 }
1407
1408 if (lck->lck_rw.lck_rw_shared_cnt != 0 || lck->lck_rw.lck_rw_want_upgrade) {
1409 lck->lck_rw.lck_rw_waiting = TRUE;
1410 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1411 if (res == THREAD_WAITING) {
1412 if (lock_stat && !lock_wait) {
1413 lock_wait = TRUE;
1414 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1415 }
1416 lck_rw_ilk_unlock(&lck->lck_rw);
1417 res = thread_block(THREAD_CONTINUE_NULL);
1418 lck_rw_ilk_lock(&lck->lck_rw);
1419 }
1420 }
1421 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1422 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1423 }
1424
1425 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1426 if (LcksOpts & enaLkExtStck)
1427 lck_rw_ext_backtrace(lck);
1428 lck->lck_rw_deb.thread = current_thread();
1429
1430 lck_rw_ilk_unlock(&lck->lck_rw);
1431 }
1432
1433
1434 /*
1435 * Routine: lck_rw_done_ext
1436 */
1437 lck_rw_type_t
1438 lck_rw_done_ext(
1439 lck_rw_ext_t *lck,
1440 lck_rw_t *rlck)
1441 {
1442 boolean_t do_wakeup = FALSE;
1443 lck_rw_type_t lck_rw_type;
1444
1445
1446 lck_rw_check_type(lck, rlck);
1447
1448 lck_rw_ilk_lock(&lck->lck_rw);
1449
1450 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1451 lck_rw_type = LCK_RW_TYPE_SHARED;
1452 lck->lck_rw.lck_rw_shared_cnt--;
1453 }
1454 else {
1455 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1456 if (lck->lck_rw.lck_rw_want_upgrade)
1457 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1458 else if (lck->lck_rw.lck_rw_want_excl)
1459 lck->lck_rw.lck_rw_want_excl = FALSE;
1460 else
1461 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1462 rlck, lck->lck_rw);
1463 if (lck->lck_rw_deb.thread == THREAD_NULL)
1464 panic("rw lock (0x%08X) not held\n",
1465 rlck);
1466 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1467 && (lck->lck_rw_deb.thread != current_thread()))
1468 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1469 rlck, current_thread(), lck->lck_rw_deb.thread);
1470 lck->lck_rw_deb.thread = THREAD_NULL;
1471 }
1472
1473 if (lck->lck_rw_attr & LCK_RW_ATTR_DEBUG)
1474 lck->lck_rw_deb.pc_done = __builtin_return_address(0);
1475
1476 /*
1477 * There is no reason to wakeup a waiting thread
1478 * if the read-count is non-zero. Consider:
1479 * we must be dropping a read lock
1480 * threads are waiting only if one wants a write lock
1481 * if there are still readers, they can't proceed
1482 */
1483
1484 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1485 lck->lck_rw.lck_rw_waiting = FALSE;
1486 do_wakeup = TRUE;
1487 }
1488
1489 lck_rw_ilk_unlock(&lck->lck_rw);
1490
1491 if (do_wakeup)
1492 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1493 return(lck_rw_type);
1494 }
1495
1496
1497 /*
1498 * Routine: lck_rw_lock_shared_ext
1499 */
1500 void
1501 lck_rw_lock_shared_ext(
1502 lck_rw_ext_t *lck,
1503 lck_rw_t *rlck)
1504 {
1505 int i;
1506 wait_result_t res;
1507 boolean_t lock_miss = FALSE;
1508 boolean_t lock_wait = FALSE;
1509 boolean_t lock_stat;
1510
1511 lck_rw_check_type(lck, rlck);
1512
1513 lck_rw_ilk_lock(&lck->lck_rw);
1514
1515 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1516
1517 if (lock_stat)
1518 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1519
1520 while (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
1521 i = lock_wait_time[1];
1522
1523 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1524 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1525
1526 if (lock_stat && !lock_miss) {
1527 lock_miss = TRUE;
1528 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1529 }
1530
1531 if (i != 0) {
1532 lck_rw_ilk_unlock(&lck->lck_rw);
1533 while (--i != 0 && (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade))
1534 continue;
1535 lck_rw_ilk_lock(&lck->lck_rw);
1536 }
1537
1538 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
1539 lck->lck_rw.lck_rw_waiting = TRUE;
1540 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1541 if (res == THREAD_WAITING) {
1542 if (lock_stat && !lock_wait) {
1543 lock_wait = TRUE;
1544 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1545 }
1546 lck_rw_ilk_unlock(&lck->lck_rw);
1547 res = thread_block(THREAD_CONTINUE_NULL);
1548 lck_rw_ilk_lock(&lck->lck_rw);
1549 }
1550 }
1551 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1552 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1553 }
1554
1555 lck->lck_rw.lck_rw_shared_cnt++;
1556
1557 lck_rw_ilk_unlock(&lck->lck_rw);
1558 }
1559
1560
1561 /*
1562 * Routine: lck_rw_lock_shared_to_exclusive_ext
1563 * Function:
1564 * Improves a read-only lock to one with
1565 * write permission. If another reader has
1566 * already requested an upgrade to a write lock,
1567 * no lock is held upon return.
1568 *
1569 * Returns TRUE if the upgrade *failed*.
1570 */
1571
1572 boolean_t
1573 lck_rw_lock_shared_to_exclusive_ext(
1574 lck_rw_ext_t *lck,
1575 lck_rw_t *rlck)
1576 {
1577 int i;
1578 boolean_t do_wakeup = FALSE;
1579 wait_result_t res;
1580 boolean_t lock_miss = FALSE;
1581 boolean_t lock_wait = FALSE;
1582 boolean_t lock_stat;
1583
1584 lck_rw_check_type(lck, rlck);
1585
1586 if (lck->lck_rw_deb.thread == current_thread())
1587 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1588
1589 lck_rw_ilk_lock(&lck->lck_rw);
1590
1591 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1592
1593 if (lock_stat)
1594 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1595
1596 lck->lck_rw.lck_rw_shared_cnt--;
1597
1598 if (lck->lck_rw.lck_rw_want_upgrade) {
1599 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1600 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1601
1602 /*
1603 * Someone else has requested upgrade.
1604 * Since we've released a read lock, wake
1605 * him up.
1606 */
1607 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1608 lck->lck_rw.lck_rw_waiting = FALSE;
1609 do_wakeup = TRUE;
1610 }
1611
1612 lck_rw_ilk_unlock(&lck->lck_rw);
1613
1614 if (do_wakeup)
1615 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1616
1617 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1618 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1619
1620 return (TRUE);
1621 }
1622
1623 lck->lck_rw.lck_rw_want_upgrade = TRUE;
1624
1625 while (lck->lck_rw.lck_rw_shared_cnt != 0) {
1626 i = lock_wait_time[1];
1627
1628 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1629 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, i, 0, 0);
1630
1631 if (lock_stat && !lock_miss) {
1632 lock_miss = TRUE;
1633 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1634 }
1635
1636 if (i != 0) {
1637 lck_rw_ilk_unlock(&lck->lck_rw);
1638 while (--i != 0 && lck->lck_rw.lck_rw_shared_cnt != 0)
1639 continue;
1640 lck_rw_ilk_lock(&lck->lck_rw);
1641 }
1642
1643 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1644 lck->lck_rw.lck_rw_waiting = TRUE;
1645 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1646 if (res == THREAD_WAITING) {
1647 if (lock_stat && !lock_wait) {
1648 lock_wait = TRUE;
1649 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1650 }
1651 lck_rw_ilk_unlock(&lck->lck_rw);
1652 res = thread_block(THREAD_CONTINUE_NULL);
1653 lck_rw_ilk_lock(&lck->lck_rw);
1654 }
1655 }
1656 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1657 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, 0, 0, 0);
1658 }
1659
1660 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1661 if (LcksOpts & enaLkExtStck)
1662 lck_rw_ext_backtrace(lck);
1663 lck->lck_rw_deb.thread = current_thread();
1664
1665 lck_rw_ilk_unlock(&lck->lck_rw);
1666
1667 return (FALSE);
1668 }
1669
1670 /*
1671 * Routine: lck_rw_lock_exclusive_to_shared_ext
1672 */
1673 void
1674 lck_rw_lock_exclusive_to_shared_ext(
1675 lck_rw_ext_t *lck,
1676 lck_rw_t *rlck)
1677 {
1678 boolean_t do_wakeup = FALSE;
1679
1680 lck_rw_check_type(lck, rlck);
1681
1682 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1683 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1684
1685 lck_rw_ilk_lock(&lck->lck_rw);
1686
1687 lck->lck_rw.lck_rw_shared_cnt++;
1688 if (lck->lck_rw.lck_rw_want_upgrade)
1689 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1690 else if (lck->lck_rw.lck_rw_want_excl)
1691 lck->lck_rw.lck_rw_want_excl = FALSE;
1692 else
1693 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1694 rlck, lck->lck_rw);
1695 if (lck->lck_rw_deb.thread == THREAD_NULL)
1696 panic("rw lock (0x%08X) not held\n",
1697 rlck);
1698 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1699 && (lck->lck_rw_deb.thread != current_thread()))
1700 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1701 rlck, current_thread(), lck->lck_rw_deb.thread);
1702
1703 lck->lck_rw_deb.thread = THREAD_NULL;
1704
1705 if (lck->lck_rw.lck_rw_waiting) {
1706 lck->lck_rw.lck_rw_waiting = FALSE;
1707 do_wakeup = TRUE;
1708 }
1709
1710 lck_rw_ilk_unlock(&lck->lck_rw);
1711
1712 if (do_wakeup)
1713 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1714
1715 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1716 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, lck->lck_rw.lck_rw_shared_cnt, 0);
1717
1718 }
1719
1720
1721 /*
1722 * Routine: lck_rw_try_lock_exclusive_ext
1723 * Function:
1724 * Tries to get a write lock.
1725 *
1726 * Returns FALSE if the lock is not held on return.
1727 */
1728
1729 boolean_t
1730 lck_rw_try_lock_exclusive_ext(
1731 lck_rw_ext_t *lck,
1732 lck_rw_t *rlck)
1733 {
1734 boolean_t lock_stat;
1735
1736 lck_rw_check_type(lck, rlck);
1737
1738 lck_rw_ilk_lock(&lck->lck_rw);
1739
1740 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1741
1742 if (lock_stat)
1743 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1744
1745 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade || lck->lck_rw.lck_rw_shared_cnt) {
1746 /*
1747 * Can't get lock.
1748 */
1749 if (lock_stat) {
1750 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1751 }
1752 lck_rw_ilk_unlock(&lck->lck_rw);
1753 return(FALSE);
1754 }
1755
1756 /*
1757 * Have lock.
1758 */
1759
1760 lck->lck_rw.lck_rw_want_excl = TRUE;
1761 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1762 if (LcksOpts & enaLkExtStck)
1763 lck_rw_ext_backtrace(lck);
1764 lck->lck_rw_deb.thread = current_thread();
1765
1766 lck_rw_ilk_unlock(&lck->lck_rw);
1767
1768 return(TRUE);
1769 }
1770
1771 /*
1772 * Routine: lck_rw_try_lock_shared_ext
1773 * Function:
1774 * Tries to get a read lock.
1775 *
1776 * Returns FALSE if the lock is not held on return.
1777 */
1778
1779 boolean_t
1780 lck_rw_try_lock_shared_ext(
1781 lck_rw_ext_t *lck,
1782 lck_rw_t *rlck)
1783 {
1784 boolean_t lock_stat;
1785
1786 lck_rw_check_type(lck, rlck);
1787
1788 lck_rw_ilk_lock(&lck->lck_rw);
1789
1790 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1791
1792 if (lock_stat)
1793 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1794
1795 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) {
1796 if (lock_stat) {
1797 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1798 }
1799 lck_rw_ilk_unlock(&lck->lck_rw);
1800 return(FALSE);
1801 }
1802
1803 lck->lck_rw.lck_rw_shared_cnt++;
1804
1805 lck_rw_ilk_unlock(&lck->lck_rw);
1806
1807 return(TRUE);
1808 }
1809
1810 void
1811 lck_rw_check_type(
1812 lck_rw_ext_t *lck,
1813 lck_rw_t *rlck)
1814 {
1815 if (lck->lck_rw_deb.type != RW_TAG)
1816 panic("rw lock (0x%08X) not a rw lock type (0x%08X)\n",rlck, lck->lck_rw_deb.type);
1817 }
1818
1819 /*
1820 * The C portion of the mutex package. These routines are only invoked
1821 * if the optimized assembler routines can't do the work.
1822 */
1823
1824 /*
1825 * Forward definition
1826 */
1827
1828 void lck_mtx_ext_init(
1829 lck_mtx_ext_t *lck,
1830 lck_grp_t *grp,
1831 lck_attr_t *attr);
1832
1833 /*
1834 * Routine: mutex_alloc
1835 * Function:
1836 * Allocate a mutex for external users who cannot
1837 * hard-code the structure definition into their
1838 * objects.
1839 * For now just use kalloc, but a zone is probably
1840 * warranted.
1841 */
1842 mutex_t *
1843 mutex_alloc(
1844 unsigned short tag)
1845 {
1846 mutex_t *m;
1847
1848 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1849 mutex_init(m, tag);
1850 return(m);
1851 }
1852
1853 /*
1854 * Routine: mutex_free
1855 */
1856 void
1857 mutex_free(
1858 mutex_t *m)
1859 {
1860 kfree((void *)m, sizeof(mutex_t));
1861 }
1862
1863 /*
1864 * Routine: lck_mtx_alloc_init
1865 */
1866 lck_mtx_t *
1867 lck_mtx_alloc_init(
1868 lck_grp_t *grp,
1869 lck_attr_t *attr) {
1870 lck_mtx_t *lck;
1871
1872 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1873 lck_mtx_init(lck, grp, attr);
1874
1875 return(lck);
1876 }
1877
1878 /*
1879 * Routine: lck_mtx_free
1880 */
1881 void
1882 lck_mtx_free(
1883 lck_mtx_t *lck,
1884 lck_grp_t *grp) {
1885 lck_mtx_destroy(lck, grp);
1886 kfree((void *)lck, sizeof(lck_mtx_t));
1887 }
1888
1889 /*
1890 * Routine: lck_mtx_init
1891 */
1892 void
1893 lck_mtx_init(
1894 lck_mtx_t *lck,
1895 lck_grp_t *grp,
1896 lck_attr_t *attr) {
1897 lck_mtx_ext_t *lck_ext;
1898 lck_attr_t *lck_attr;
1899
1900 if (attr != LCK_ATTR_NULL)
1901 lck_attr = attr;
1902 else
1903 lck_attr = &LockDefaultLckAttr;
1904
1905 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1906 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1907 lck_mtx_ext_init(lck_ext, grp, lck_attr);
1908 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1909 lck->lck_mtx_ptr = lck_ext;
1910 }
1911 } else {
1912 lck->lck_mtx_data = 0;
1913 lck->lck_mtx_waiters = 0;
1914 lck->lck_mtx_pri = 0;
1915 }
1916 lck_grp_reference(grp);
1917 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1918 }
1919
1920 /*
1921 * Routine: lck_mtx_ext_init
1922 */
1923 void
1924 lck_mtx_ext_init(
1925 lck_mtx_ext_t *lck,
1926 lck_grp_t *grp,
1927 lck_attr_t *attr) {
1928
1929 bzero((void *)lck, sizeof(lck_mtx_ext_t));
1930
1931 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1932 lck->lck_mtx_deb.type = MUTEX_TAG;
1933 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1934 }
1935
1936 lck->lck_mtx_grp = grp;
1937
1938 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
1939 lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
1940 }
1941
1942 /*
1943 * Routine: lck_mtx_destroy
1944 */
1945 void
1946 lck_mtx_destroy(
1947 lck_mtx_t *lck,
1948 lck_grp_t *grp) {
1949 boolean_t lck_is_indirect;
1950
1951 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1952 return;
1953 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1954 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1955 if (lck_is_indirect)
1956 kfree((void *)lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1957
1958 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1959 lck_grp_deallocate(grp);
1960 return;
1961 }
1962
1963
1964 #if MACH_KDB
1965 /*
1966 * Routines to print out simple_locks and mutexes in a nicely-formatted
1967 * fashion.
1968 */
1969
1970 char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1971 char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1972
1973 void db_print_simple_lock(
1974 simple_lock_t addr);
1975
1976 void db_print_mutex(
1977 mutex_t * addr);
1978
1979 void
1980 db_show_one_simple_lock (
1981 db_expr_t addr,
1982 boolean_t have_addr,
1983 db_expr_t count,
1984 char * modif)
1985 {
1986 simple_lock_t saddr = (simple_lock_t)addr;
1987
1988 if (saddr == (simple_lock_t)0 || !have_addr) {
1989 db_error ("No simple_lock\n");
1990 }
1991 #if USLOCK_DEBUG
1992 else if (saddr->lock_type != USLOCK_TAG)
1993 db_error ("Not a simple_lock\n");
1994 #endif /* USLOCK_DEBUG */
1995
1996 db_printf ("%s\n", simple_lock_labels);
1997 db_print_simple_lock (saddr);
1998 }
1999
2000 void
2001 db_print_simple_lock (
2002 simple_lock_t addr)
2003 {
2004
2005 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
2006 #if USLOCK_DEBUG
2007 db_printf (" %08x", addr->debug.lock_thread);
2008 db_printf (" %08x ", addr->debug.duration[1]);
2009 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
2010 #endif /* USLOCK_DEBUG */
2011 db_printf ("\n");
2012 }
2013
2014 void
2015 db_show_one_mutex (
2016 db_expr_t addr,
2017 boolean_t have_addr,
2018 db_expr_t count,
2019 char * modif)
2020 {
2021 mutex_t * maddr = (mutex_t *)addr;
2022
2023 if (maddr == (mutex_t *)0 || !have_addr)
2024 db_error ("No mutex\n");
2025 #if MACH_LDEBUG
2026 else if (maddr->lck_mtx_deb.type != MUTEX_TAG)
2027 db_error ("Not a mutex\n");
2028 #endif /* MACH_LDEBUG */
2029
2030 db_printf ("%s\n", mutex_labels);
2031 db_print_mutex (maddr);
2032 }
2033
2034 void
2035 db_print_mutex (
2036 mutex_t * addr)
2037 {
2038 db_printf ("%08x %6d %7d",
2039 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
2040 #if MACH_LDEBUG
2041 db_printf (" %08x ", addr->lck_mtx_deb.thread);
2042 db_printsym (addr->lck_mtx_deb.stack[0], DB_STGY_ANY);
2043 #endif /* MACH_LDEBUG */
2044 db_printf ("\n");
2045 }
2046
2047 void
2048 db_show_one_lock(
2049 lock_t *lock)
2050 {
2051 db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
2052 lock->lck_rw.lck_rw_shared_cnt,
2053 lock->lck_rw.lck_rw_want_upgrade ? "" : "!",
2054 lock->lck_rw.lck_rw_want_excl ? "" : "!");
2055 db_printf("%swaiting\n",
2056 lock->lck_rw.lck_rw_waiting ? "" : "!");
2057 db_printf("%sInterlock\n",
2058 lock->lck_rw.lck_rw_interlock ? "" : "!");
2059 }
2060
2061 #endif /* MACH_KDB */
2062