]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/locks_ppc.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / ppc / locks_ppc.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
91447636 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
91447636
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
91447636
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 * File: kern/lock.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young
53 * Date: 1985
54 *
55 * Locking primitives implementation
56 */
57
58#include <mach_kdb.h>
59#include <mach_ldebug.h>
60
61#include <kern/kalloc.h>
62#include <kern/lock.h>
63#include <kern/locks.h>
64#include <kern/misc_protos.h>
65#include <kern/thread.h>
66#include <kern/processor.h>
67#include <kern/sched_prim.h>
68#include <kern/xpr.h>
69#include <kern/debug.h>
70#include <string.h>
71
72#if MACH_KDB
73#include <ddb/db_command.h>
74#include <ddb/db_output.h>
75#include <ddb/db_sym.h>
76#include <ddb/db_print.h>
77#endif /* MACH_KDB */
78
79#ifdef __ppc__
80#include <ppc/Firmware.h>
81#endif
82
83#include <sys/kdebug.h>
84
85#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
86#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
87#define LCK_RW_LCK_SHARED_CODE 0x102
88#define LCK_RW_LCK_SH_TO_EX_CODE 0x103
89#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
90#define LCK_RW_LCK_EX_TO_SH_CODE 0x105
91
92
93#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
94
95unsigned int lock_wait_time[2] = { (unsigned int)-1, 0 } ;
96
97/* Forwards */
98
99
100#if USLOCK_DEBUG
101/*
102 * Perform simple lock checks.
103 */
104int uslock_check = 1;
105int max_lock_loops = 100000000;
106decl_simple_lock_data(extern , printf_lock)
107decl_simple_lock_data(extern , panic_lock)
108#if MACH_KDB
109decl_simple_lock_data(extern , kdb_lock)
110#endif /* MACH_KDB */
111#endif /* USLOCK_DEBUG */
112
113
114/*
115 * We often want to know the addresses of the callers
116 * of the various lock routines. However, this information
117 * is only used for debugging and statistics.
118 */
119typedef void *pc_t;
120#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
121#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
122#if ANY_LOCK_DEBUG
123#define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
124#else /* ANY_LOCK_DEBUG */
125#ifdef lint
126/*
127 * Eliminate lint complaints about unused local pc variables.
128 */
129#define OBTAIN_PC(pc,l) ++pc
130#else /* lint */
131#define OBTAIN_PC(pc,l)
132#endif /* lint */
133#endif /* USLOCK_DEBUG */
134
135
136/*
137 * Portable lock package implementation of usimple_locks.
138 */
139
140#if USLOCK_DEBUG
141#define USLDBG(stmt) stmt
142void usld_lock_init(usimple_lock_t, unsigned short);
143void usld_lock_pre(usimple_lock_t, pc_t);
144void usld_lock_post(usimple_lock_t, pc_t);
145void usld_unlock(usimple_lock_t, pc_t);
146void usld_lock_try_pre(usimple_lock_t, pc_t);
147void usld_lock_try_post(usimple_lock_t, pc_t);
148int usld_lock_common_checks(usimple_lock_t, char *);
149#else /* USLOCK_DEBUG */
150#define USLDBG(stmt)
151#endif /* USLOCK_DEBUG */
152
153/*
154 * Routine: lck_spin_alloc_init
155 */
156lck_spin_t *
157lck_spin_alloc_init(
158 lck_grp_t *grp,
159 lck_attr_t *attr) {
160 lck_spin_t *lck;
161
162 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
163 lck_spin_init(lck, grp, attr);
164
165 return(lck);
166}
167
168/*
169 * Routine: lck_spin_free
170 */
171void
172lck_spin_free(
173 lck_spin_t *lck,
174 lck_grp_t *grp) {
175 lck_spin_destroy(lck, grp);
176 kfree((void *)lck, sizeof(lck_spin_t));
177}
178
179/*
180 * Routine: lck_spin_init
181 */
182void
183lck_spin_init(
184 lck_spin_t *lck,
185 lck_grp_t *grp,
186 __unused lck_attr_t *attr) {
187
188 lck->interlock = 0;
189 lck_grp_reference(grp);
190 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
191}
192
193/*
194 * Routine: lck_spin_destroy
195 */
196void
197lck_spin_destroy(
198 lck_spin_t *lck,
199 lck_grp_t *grp) {
200 if (lck->interlock == LCK_SPIN_TAG_DESTROYED)
201 return;
202 lck->interlock = LCK_SPIN_TAG_DESTROYED;
203 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
204 lck_grp_deallocate(grp);
205}
206
207/*
208 * Initialize a usimple_lock.
209 *
210 * No change in preemption state.
211 */
212void
213usimple_lock_init(
214 usimple_lock_t l,
215 unsigned short tag)
216{
217#ifndef MACHINE_SIMPLE_LOCK
218 USLDBG(usld_lock_init(l, tag));
219 hw_lock_init(&l->interlock);
220#else
221 simple_lock_init((simple_lock_t)l,tag);
222#endif
223}
224
225
226/*
227 * Acquire a usimple_lock.
228 *
229 * Returns with preemption disabled. Note
230 * that the hw_lock routines are responsible for
231 * maintaining preemption state.
232 */
233void
234usimple_lock(
235 usimple_lock_t l)
236{
237#ifndef MACHINE_SIMPLE_LOCK
238 int i;
239 pc_t pc;
240#if USLOCK_DEBUG
241 int count = 0;
242#endif /* USLOCK_DEBUG */
243
244 OBTAIN_PC(pc, l);
245 USLDBG(usld_lock_pre(l, pc));
246
247 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
248 panic("simple lock deadlock detection - l=0x%08X, cpu=%d, ret=0x%08X", l, cpu_number(), pc);
249
250 USLDBG(usld_lock_post(l, pc));
251#else
252 simple_lock((simple_lock_t)l);
253#endif
254}
255
256
257/*
258 * Release a usimple_lock.
259 *
260 * Returns with preemption enabled. Note
261 * that the hw_lock routines are responsible for
262 * maintaining preemption state.
263 */
264void
265usimple_unlock(
266 usimple_lock_t l)
267{
268#ifndef MACHINE_SIMPLE_LOCK
269 pc_t pc;
270
271 OBTAIN_PC(pc, l);
272 USLDBG(usld_unlock(l, pc));
273 sync();
274 hw_lock_unlock(&l->interlock);
275#else
276 simple_unlock_rwmb((simple_lock_t)l);
277#endif
278}
279
280
281/*
282 * Conditionally acquire a usimple_lock.
283 *
284 * On success, returns with preemption disabled.
285 * On failure, returns with preemption in the same state
286 * as when first invoked. Note that the hw_lock routines
287 * are responsible for maintaining preemption state.
288 *
289 * XXX No stats are gathered on a miss; I preserved this
290 * behavior from the original assembly-language code, but
291 * doesn't it make sense to log misses? XXX
292 */
293unsigned int
294usimple_lock_try(
295 usimple_lock_t l)
296{
297#ifndef MACHINE_SIMPLE_LOCK
298 pc_t pc;
299 unsigned int success;
300
301 OBTAIN_PC(pc, l);
302 USLDBG(usld_lock_try_pre(l, pc));
303 if (success = hw_lock_try(&l->interlock)) {
304 USLDBG(usld_lock_try_post(l, pc));
305 }
306 return success;
307#else
308 return(simple_lock_try((simple_lock_t)l));
309#endif
310}
311
312#if USLOCK_DEBUG
313/*
314 * States of a usimple_lock. The default when initializing
315 * a usimple_lock is setting it up for debug checking.
316 */
317#define USLOCK_CHECKED 0x0001 /* lock is being checked */
318#define USLOCK_TAKEN 0x0002 /* lock has been taken */
319#define USLOCK_INIT 0xBAA0 /* lock has been initialized */
320#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
321#define USLOCK_CHECKING(l) (uslock_check && \
322 ((l)->debug.state & USLOCK_CHECKED))
323
324/*
325 * Trace activities of a particularly interesting lock.
326 */
327void usl_trace(usimple_lock_t, int, pc_t, const char *);
328
329
330/*
331 * Initialize the debugging information contained
332 * in a usimple_lock.
333 */
334void
335usld_lock_init(
336 usimple_lock_t l,
337 unsigned short tag)
338{
339 if (l == USIMPLE_LOCK_NULL)
340 panic("lock initialization: null lock pointer");
341 l->lock_type = USLOCK_TAG;
342 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
343 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
344 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
345 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
346 l->debug.duration[0] = l->debug.duration[1] = 0;
347 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
348 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
349 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
350}
351
352
353/*
354 * These checks apply to all usimple_locks, not just
355 * those with USLOCK_CHECKED turned on.
356 */
357int
358usld_lock_common_checks(
359 usimple_lock_t l,
360 char *caller)
361{
362 if (l == USIMPLE_LOCK_NULL)
363 panic("%s: null lock pointer", caller);
364 if (l->lock_type != USLOCK_TAG)
365 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
366 if (!(l->debug.state & USLOCK_INIT))
367 panic("%s: 0x%x is not an initialized lock",
368 caller, (integer_t) l);
369 return USLOCK_CHECKING(l);
370}
371
372
373/*
374 * Debug checks on a usimple_lock just before attempting
375 * to acquire it.
376 */
377/* ARGSUSED */
378void
379usld_lock_pre(
380 usimple_lock_t l,
381 pc_t pc)
382{
383 char *caller = "usimple_lock";
384
385
386 if (!usld_lock_common_checks(l, caller))
387 return;
388
389/*
390 * Note that we have a weird case where we are getting a lock when we are]
391 * in the process of putting the system to sleep. We are running with no
392 * current threads, therefore we can't tell if we are trying to retake a lock
393 * we have or someone on the other processor has it. Therefore we just
394 * ignore this test if the locking thread is 0.
395 */
396
397 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
398 l->debug.lock_thread == (void *) current_thread()) {
399 printf("%s: lock 0x%x already locked (at 0x%x) by",
400 caller, (integer_t) l, l->debug.lock_pc);
401 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
402 l->debug.lock_thread, pc);
403 panic(caller);
404 }
405 mp_disable_preemption();
406 usl_trace(l, cpu_number(), pc, caller);
407 mp_enable_preemption();
408}
409
410
411/*
412 * Debug checks on a usimple_lock just after acquiring it.
413 *
414 * Pre-emption has been disabled at this point,
415 * so we are safe in using cpu_number.
416 */
417void
418usld_lock_post(
419 usimple_lock_t l,
420 pc_t pc)
421{
422 register int mycpu;
423 char *caller = "successful usimple_lock";
424
425
426 if (!usld_lock_common_checks(l, caller))
427 return;
428
429 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
430 panic("%s: lock 0x%x became uninitialized",
431 caller, (integer_t) l);
432 if ((l->debug.state & USLOCK_TAKEN))
433 panic("%s: lock 0x%x became TAKEN by someone else",
434 caller, (integer_t) l);
435
436 mycpu = cpu_number();
437 l->debug.lock_thread = (void *)current_thread();
438 l->debug.state |= USLOCK_TAKEN;
439 l->debug.lock_pc = pc;
440 l->debug.lock_cpu = mycpu;
441
442 usl_trace(l, mycpu, pc, caller);
443}
444
445
446/*
447 * Debug checks on a usimple_lock just before
448 * releasing it. Note that the caller has not
449 * yet released the hardware lock.
450 *
451 * Preemption is still disabled, so there's
452 * no problem using cpu_number.
453 */
454void
455usld_unlock(
456 usimple_lock_t l,
457 pc_t pc)
458{
459 register int mycpu;
460 char *caller = "usimple_unlock";
461
462
463 if (!usld_lock_common_checks(l, caller))
464 return;
465
466 mycpu = cpu_number();
467
468 if (!(l->debug.state & USLOCK_TAKEN))
469 panic("%s: lock 0x%x hasn't been taken",
470 caller, (integer_t) l);
471 if (l->debug.lock_thread != (void *) current_thread())
472 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
473 caller, (integer_t) l, l->debug.lock_thread);
474 if (l->debug.lock_cpu != mycpu) {
475 printf("%s: unlocking lock 0x%x on cpu 0x%x",
476 caller, (integer_t) l, mycpu);
477 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
478 panic(caller);
479 }
480 usl_trace(l, mycpu, pc, caller);
481
482 l->debug.unlock_thread = l->debug.lock_thread;
483 l->debug.lock_thread = INVALID_PC;
484 l->debug.state &= ~USLOCK_TAKEN;
485 l->debug.unlock_pc = pc;
486 l->debug.unlock_cpu = mycpu;
487}
488
489
490/*
491 * Debug checks on a usimple_lock just before
492 * attempting to acquire it.
493 *
494 * Preemption isn't guaranteed to be disabled.
495 */
496void
497usld_lock_try_pre(
498 usimple_lock_t l,
499 pc_t pc)
500{
501 char *caller = "usimple_lock_try";
502
503 if (!usld_lock_common_checks(l, caller))
504 return;
505 mp_disable_preemption();
506 usl_trace(l, cpu_number(), pc, caller);
507 mp_enable_preemption();
508}
509
510
511/*
512 * Debug checks on a usimple_lock just after
513 * successfully attempting to acquire it.
514 *
515 * Preemption has been disabled by the
516 * lock acquisition attempt, so it's safe
517 * to use cpu_number.
518 */
519void
520usld_lock_try_post(
521 usimple_lock_t l,
522 pc_t pc)
523{
524 register int mycpu;
525 char *caller = "successful usimple_lock_try";
526
527 if (!usld_lock_common_checks(l, caller))
528 return;
529
530 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
531 panic("%s: lock 0x%x became uninitialized",
532 caller, (integer_t) l);
533 if ((l->debug.state & USLOCK_TAKEN))
534 panic("%s: lock 0x%x became TAKEN by someone else",
535 caller, (integer_t) l);
536
537 mycpu = cpu_number();
538 l->debug.lock_thread = (void *) current_thread();
539 l->debug.state |= USLOCK_TAKEN;
540 l->debug.lock_pc = pc;
541 l->debug.lock_cpu = mycpu;
542
543 usl_trace(l, mycpu, pc, caller);
544}
545
546
547/*
548 * For very special cases, set traced_lock to point to a
549 * specific lock of interest. The result is a series of
550 * XPRs showing lock operations on that lock. The lock_seq
551 * value is used to show the order of those operations.
552 */
553usimple_lock_t traced_lock;
554unsigned int lock_seq;
555
556void
557usl_trace(
558 usimple_lock_t l,
559 int mycpu,
560 pc_t pc,
561 const char * op_name)
562{
563 if (traced_lock == l) {
564 XPR(XPR_SLOCK,
565 "seq %d, cpu %d, %s @ %x\n",
566 (integer_t) lock_seq, (integer_t) mycpu,
567 (integer_t) op_name, (integer_t) pc, 0);
568 lock_seq++;
569 }
570}
571
572
573#endif /* USLOCK_DEBUG */
574
575/*
576 * The C portion of the shared/exclusive locks package.
577 */
578
579/*
580 * Forward definition
581 */
582
583void lck_rw_lock_exclusive_gen(
584 lck_rw_t *lck);
585
586lck_rw_type_t lck_rw_done_gen(
587 lck_rw_t *lck);
588
589void
590lck_rw_lock_shared_gen(
591 lck_rw_t *lck);
592
593boolean_t
594lck_rw_lock_shared_to_exclusive_gen(
595 lck_rw_t *lck);
596
597void
598lck_rw_lock_exclusive_to_shared_gen(
599 lck_rw_t *lck);
600
601boolean_t
602lck_rw_try_lock_exclusive_gen(
603 lck_rw_t *lck);
604
605boolean_t
606lck_rw_try_lock_shared_gen(
607 lck_rw_t *lck);
608
609void lck_rw_ext_init(
610 lck_rw_ext_t *lck,
611 lck_grp_t *grp,
612 lck_attr_t *attr);
613
614void lck_rw_ext_backtrace(
615 lck_rw_ext_t *lck);
616
617void lck_rw_lock_exclusive_ext(
618 lck_rw_ext_t *lck,
619 lck_rw_t *rlck);
620
621lck_rw_type_t lck_rw_done_ext(
622 lck_rw_ext_t *lck,
623 lck_rw_t *rlck);
624
625void
626lck_rw_lock_shared_ext(
627 lck_rw_ext_t *lck,
628 lck_rw_t *rlck);
629
630boolean_t
631lck_rw_lock_shared_to_exclusive_ext(
632 lck_rw_ext_t *lck,
633 lck_rw_t *rlck);
634
635void
636lck_rw_lock_exclusive_to_shared_ext(
637 lck_rw_ext_t *lck,
638 lck_rw_t *rlck);
639
640boolean_t
641lck_rw_try_lock_exclusive_ext(
642 lck_rw_ext_t *lck,
643 lck_rw_t *rlck);
644
645boolean_t
646lck_rw_try_lock_shared_ext(
647 lck_rw_ext_t *lck,
648 lck_rw_t *rlck);
649
650void
651lck_rw_ilk_lock(
652 lck_rw_t *lck);
653
654void
655lck_rw_ilk_unlock(
656 lck_rw_t *lck);
657
658void
659lck_rw_check_type(
660 lck_rw_ext_t *lck,
661 lck_rw_t *rlck);
662
663/*
664 * Routine: lock_alloc
665 * Function:
666 * Allocate a lock for external users who cannot
667 * hard-code the structure definition into their
668 * objects.
669 * For now just use kalloc, but a zone is probably
670 * warranted.
671 */
672lock_t *
673lock_alloc(
674 boolean_t can_sleep,
675 __unused unsigned short tag,
676 __unused unsigned short tag1)
677{
678 lock_t *lck;
679
680 if ((lck = (lock_t *)kalloc(sizeof(lock_t))) != 0)
681 lock_init(lck, can_sleep, tag, tag1);
682 return(lck);
683}
684
685/*
686 * Routine: lock_init
687 * Function:
688 * Initialize a lock; required before use.
689 * Note that clients declare the "struct lock"
690 * variables and then initialize them, rather
691 * than getting a new one from this module.
692 */
693void
694lock_init(
695 lock_t *lck,
696 boolean_t can_sleep,
697 __unused unsigned short tag,
698 __unused unsigned short tag1)
699{
700 if (!can_sleep)
701 panic("lock_init: sleep mode must be set to TRUE\n");
702
703 (void) memset((void *) lck, 0, sizeof(lock_t));
704#if MACH_LDEBUG
705 lck->lck_rw_deb.type = RW_TAG;
706 lck->lck_rw_attr |= (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD|LCK_RW_ATTR_DIS_MYLOCK);
c0fea474
A
707 lck->lck_rw.lck_rw_priv_excl = TRUE;
708#else
709 lck->lck_rw_priv_excl = TRUE;
91447636
A
710#endif
711
712}
713
714
715/*
716 * Routine: lock_free
717 * Function:
718 * Free a lock allocated for external users.
719 * For now just use kfree, but a zone is probably
720 * warranted.
721 */
722void
723lock_free(
724 lock_t *lck)
725{
726 kfree((void *)lck, sizeof(lock_t));
727}
728
729#if MACH_LDEBUG
730void
731lock_write(
732 lock_t *lck)
733{
734 lck_rw_lock_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
735}
736
737void
738lock_done(
739 lock_t *lck)
740{
741 (void)lck_rw_done_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
742}
743
744void
745lock_read(
746 lock_t *lck)
747{
748 lck_rw_lock_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
749}
750
751boolean_t
752lock_read_to_write(
753 lock_t *lck)
754{
755 return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck));
756}
757
758void
759lock_write_to_read(
760 register lock_t *lck)
761{
762 lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
763}
764#endif
765
766/*
767 * Routine: lck_rw_alloc_init
768 */
769lck_rw_t *
770lck_rw_alloc_init(
771 lck_grp_t *grp,
772 lck_attr_t *attr) {
773 lck_rw_t *lck;
774
775 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
776 lck_rw_init(lck, grp, attr);
777
778 return(lck);
779}
780
781/*
782 * Routine: lck_rw_free
783 */
784void
785lck_rw_free(
786 lck_rw_t *lck,
787 lck_grp_t *grp) {
788 lck_rw_destroy(lck, grp);
789 kfree((void *)lck, sizeof(lck_rw_t));
790}
791
792/*
793 * Routine: lck_rw_init
794 */
795void
796lck_rw_init(
797 lck_rw_t *lck,
798 lck_grp_t *grp,
799 lck_attr_t *attr) {
800 lck_rw_ext_t *lck_ext;
801 lck_attr_t *lck_attr;
802
803 if (attr != LCK_ATTR_NULL)
804 lck_attr = attr;
805 else
806 lck_attr = &LockDefaultLckAttr;
807
808 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
809 if ((lck_ext = (lck_rw_ext_t *)kalloc(sizeof(lck_rw_ext_t))) != 0) {
810 lck_rw_ext_init(lck_ext, grp, lck_attr);
811 lck->lck_rw_tag = LCK_RW_TAG_INDIRECT;
812 lck->lck_rw_ptr = lck_ext;
813 }
814 } else {
815 (void) memset((void *) lck, 0, sizeof(lck_rw_t));
c0fea474
A
816 if ((lck_attr->lck_attr_val) & LCK_ATTR_RW_SHARED_PRIORITY)
817 lck->lck_rw_priv_excl = FALSE;
818 else
819 lck->lck_rw_priv_excl = TRUE;
91447636
A
820 }
821
822 lck_grp_reference(grp);
823 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
824}
825
826/*
827 * Routine: lck_rw_ext_init
828 */
829void
830lck_rw_ext_init(
831 lck_rw_ext_t *lck,
832 lck_grp_t *grp,
833 lck_attr_t *attr) {
834
835 bzero((void *)lck, sizeof(lck_rw_ext_t));
c0fea474
A
836 if ((attr->lck_attr_val) & LCK_ATTR_RW_SHARED_PRIORITY)
837 lck->lck_rw.lck_rw_priv_excl = FALSE;
838 else
839 lck->lck_rw.lck_rw_priv_excl = TRUE;
91447636
A
840
841 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
842 lck->lck_rw_deb.type = RW_TAG;
843 lck->lck_rw_attr |= LCK_RW_ATTR_DEBUG;
844 }
845
846 lck->lck_rw_grp = grp;
847
848 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
849 lck->lck_rw_attr |= LCK_RW_ATTR_STAT;
850}
851
852/*
853 * Routine: lck_rw_destroy
854 */
855void
856lck_rw_destroy(
857 lck_rw_t *lck,
858 lck_grp_t *grp) {
859 boolean_t lck_is_indirect;
860
861 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
862 return;
863 lck_is_indirect = (lck->lck_rw_tag == LCK_RW_TAG_INDIRECT);
864 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
865 if (lck_is_indirect)
866 kfree((void *)lck->lck_rw_ptr, sizeof(lck_rw_ext_t));
867
868 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
869 lck_grp_deallocate(grp);
870 return;
871}
872
873/*
874 * Routine: lck_rw_lock
875 */
876void
877lck_rw_lock(
878 lck_rw_t *lck,
879 lck_rw_type_t lck_rw_type)
880{
881 if (lck_rw_type == LCK_RW_TYPE_SHARED)
882 lck_rw_lock_shared(lck);
883 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
884 lck_rw_lock_exclusive(lck);
885 else
886 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type);
887}
888
889
890/*
891 * Routine: lck_rw_unlock
892 */
893void
894lck_rw_unlock(
895 lck_rw_t *lck,
896 lck_rw_type_t lck_rw_type)
897{
898 if (lck_rw_type == LCK_RW_TYPE_SHARED)
899 lck_rw_unlock_shared(lck);
900 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
901 lck_rw_unlock_exclusive(lck);
902 else
903 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
904}
905
906
907/*
908 * Routine: lck_rw_unlock_shared
909 */
910void
911lck_rw_unlock_shared(
912 lck_rw_t *lck)
913{
914 lck_rw_type_t ret;
915
916 ret = lck_rw_done(lck);
917
918 if (ret != LCK_RW_TYPE_SHARED)
919 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
920}
921
922
923/*
924 * Routine: lck_rw_unlock_exclusive
925 */
926void
927lck_rw_unlock_exclusive(
928 lck_rw_t *lck)
929{
930 lck_rw_type_t ret;
931
932 ret = lck_rw_done(lck);
933
934 if (ret != LCK_RW_TYPE_EXCLUSIVE)
935 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
936}
937
938
939/*
940 * Routine: lck_rw_try_lock
941 */
942boolean_t
943lck_rw_try_lock(
944 lck_rw_t *lck,
945 lck_rw_type_t lck_rw_type)
946{
947 if (lck_rw_type == LCK_RW_TYPE_SHARED)
948 return(lck_rw_try_lock_shared(lck));
949 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
950 return(lck_rw_try_lock_exclusive(lck));
951 else
952 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
953 return(FALSE);
954}
955
956
957
958/*
959 * Routine: lck_rw_lock_exclusive_gen
960 */
961void
962lck_rw_lock_exclusive_gen(
963 lck_rw_t *lck)
964{
965 int i;
966 boolean_t lock_miss = FALSE;
967 wait_result_t res;
968
969 lck_rw_ilk_lock(lck);
970
971 /*
972 * Try to acquire the lck_rw_want_excl bit.
973 */
974 while (lck->lck_rw_want_excl) {
975 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
976
977 if (!lock_miss) {
978 lock_miss = TRUE;
979 }
980
981 i = lock_wait_time[1];
982 if (i != 0) {
983 lck_rw_ilk_unlock(lck);
984 while (--i != 0 && lck->lck_rw_want_excl)
985 continue;
986 lck_rw_ilk_lock(lck);
987 }
988
989 if (lck->lck_rw_want_excl) {
990 lck->lck_rw_waiting = TRUE;
991 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
992 if (res == THREAD_WAITING) {
993 lck_rw_ilk_unlock(lck);
994 res = thread_block(THREAD_CONTINUE_NULL);
995 lck_rw_ilk_lock(lck);
996 }
997 }
998 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
999 }
1000 lck->lck_rw_want_excl = TRUE;
1001
1002 /* Wait for readers (and upgrades) to finish */
1003
1004 while ((lck->lck_rw_shared_cnt != 0) || lck->lck_rw_want_upgrade) {
1005 if (!lock_miss) {
1006 lock_miss = TRUE;
1007 }
1008
1009 i = lock_wait_time[1];
1010
1011 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1012 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, i, 0);
1013
1014 if (i != 0) {
1015 lck_rw_ilk_unlock(lck);
1016 while (--i != 0 && (lck->lck_rw_shared_cnt != 0 ||
1017 lck->lck_rw_want_upgrade))
1018 continue;
1019 lck_rw_ilk_lock(lck);
1020 }
1021
1022 if (lck->lck_rw_shared_cnt != 0 || lck->lck_rw_want_upgrade) {
1023 lck->lck_rw_waiting = TRUE;
1024 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1025 if (res == THREAD_WAITING) {
1026 lck_rw_ilk_unlock(lck);
1027 res = thread_block(THREAD_CONTINUE_NULL);
1028 lck_rw_ilk_lock(lck);
1029 }
1030 }
1031 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1032 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, res, 0);
1033 }
1034
1035 lck_rw_ilk_unlock(lck);
1036}
1037
1038
1039/*
1040 * Routine: lck_rw_done_gen
1041 */
1042lck_rw_type_t
1043lck_rw_done_gen(
1044 lck_rw_t *lck)
1045{
1046 boolean_t do_wakeup = FALSE;
1047 lck_rw_type_t lck_rw_type;
1048
1049
1050 lck_rw_ilk_lock(lck);
1051
1052 if (lck->lck_rw_shared_cnt != 0) {
1053 lck_rw_type = LCK_RW_TYPE_SHARED;
1054 lck->lck_rw_shared_cnt--;
1055 }
1056 else {
1057 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1058 if (lck->lck_rw_want_upgrade)
1059 lck->lck_rw_want_upgrade = FALSE;
1060 else
1061 lck->lck_rw_want_excl = FALSE;
1062 }
1063
1064 /*
1065 * There is no reason to wakeup a lck_rw_waiting thread
1066 * if the read-count is non-zero. Consider:
1067 * we must be dropping a read lock
1068 * threads are waiting only if one wants a write lock
1069 * if there are still readers, they can't proceed
1070 */
1071
1072 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1073 lck->lck_rw_waiting = FALSE;
1074 do_wakeup = TRUE;
1075 }
1076
1077 lck_rw_ilk_unlock(lck);
1078
1079 if (do_wakeup)
1080 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1081 return(lck_rw_type);
1082}
1083
1084
1085/*
1086 * Routine: lck_rw_lock_shared_gen
1087 */
1088void
1089lck_rw_lock_shared_gen(
1090 lck_rw_t *lck)
1091{
1092 int i;
1093 wait_result_t res;
1094
1095 lck_rw_ilk_lock(lck);
1096
c0fea474
A
1097 while ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1098 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl))) {
91447636
A
1099 i = lock_wait_time[1];
1100
1101 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1102 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, i, 0);
1103
1104 if (i != 0) {
1105 lck_rw_ilk_unlock(lck);
c0fea474
A
1106 while (--i != 0 &&
1107 (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1108 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl)))
91447636
A
1109 continue;
1110 lck_rw_ilk_lock(lck);
1111 }
1112
c0fea474
A
1113 if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1114 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl))) {
91447636
A
1115 lck->lck_rw_waiting = TRUE;
1116 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1117 if (res == THREAD_WAITING) {
1118 lck_rw_ilk_unlock(lck);
1119 res = thread_block(THREAD_CONTINUE_NULL);
1120 lck_rw_ilk_lock(lck);
1121 }
1122 }
1123 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1124 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, res, 0);
1125 }
1126
1127 lck->lck_rw_shared_cnt++;
1128
1129 lck_rw_ilk_unlock(lck);
1130}
1131
1132
1133/*
1134 * Routine: lck_rw_lock_shared_to_exclusive_gen
1135 * Function:
1136 * Improves a read-only lock to one with
1137 * write permission. If another reader has
1138 * already requested an upgrade to a write lock,
1139 * no lock is held upon return.
1140 *
1141 * Returns TRUE if the upgrade *failed*.
1142 */
1143
1144boolean_t
1145lck_rw_lock_shared_to_exclusive_gen(
1146 lck_rw_t *lck)
1147{
1148 int i;
1149 boolean_t do_wakeup = FALSE;
1150 wait_result_t res;
1151
1152 lck_rw_ilk_lock(lck);
1153
1154 lck->lck_rw_shared_cnt--;
1155
1156 if (lck->lck_rw_want_upgrade) {
1157 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1158 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1159
1160 /*
1161 * Someone else has requested upgrade.
1162 * Since we've released a read lock, wake
1163 * him up.
1164 */
1165 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1166 lck->lck_rw_waiting = FALSE;
1167 do_wakeup = TRUE;
1168 }
1169
1170 lck_rw_ilk_unlock(lck);
1171
1172 if (do_wakeup)
1173 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1174
1175 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1176 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1177
1178 return (TRUE);
1179 }
1180
1181 lck->lck_rw_want_upgrade = TRUE;
1182
1183 while (lck->lck_rw_shared_cnt != 0) {
1184 i = lock_wait_time[1];
1185
1186 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1187 (int)lck, lck->lck_rw_shared_cnt, i, 0, 0);
1188
1189 if (i != 0) {
1190 lck_rw_ilk_unlock(lck);
1191 while (--i != 0 && lck->lck_rw_shared_cnt != 0)
1192 continue;
1193 lck_rw_ilk_lock(lck);
1194 }
1195
1196 if (lck->lck_rw_shared_cnt != 0) {
1197 lck->lck_rw_waiting = TRUE;
1198 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1199 if (res == THREAD_WAITING) {
1200 lck_rw_ilk_unlock(lck);
1201 res = thread_block(THREAD_CONTINUE_NULL);
1202 lck_rw_ilk_lock(lck);
1203 }
1204 }
1205 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1206 (int)lck, lck->lck_rw_shared_cnt, 0, 0, 0);
1207 }
1208
1209 lck_rw_ilk_unlock(lck);
1210
1211 return (FALSE);
1212}
1213
1214/*
1215 * Routine: lck_rw_lock_exclusive_to_shared_gen
1216 */
1217void
1218lck_rw_lock_exclusive_to_shared_gen(
1219 lck_rw_t *lck)
1220{
1221 boolean_t do_wakeup = FALSE;
1222
1223 lck_rw_ilk_lock(lck);
1224
1225 lck->lck_rw_shared_cnt++;
1226 if (lck->lck_rw_want_upgrade)
1227 lck->lck_rw_want_upgrade = FALSE;
1228 else
1229 lck->lck_rw_want_excl = FALSE;
1230
1231 if (lck->lck_rw_waiting) {
1232 lck->lck_rw_waiting = FALSE;
1233 do_wakeup = TRUE;
1234 }
1235
1236 lck_rw_ilk_unlock(lck);
1237
1238 if (do_wakeup)
1239 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1240
1241}
1242
1243
1244/*
1245 * Routine: lck_rw_try_lock_exclusive_gen
1246 * Function:
1247 * Tries to get a write lock.
1248 *
1249 * Returns FALSE if the lock is not held on return.
1250 */
1251
1252boolean_t
1253lck_rw_try_lock_exclusive_gen(
1254 lck_rw_t *lck)
1255{
1256 lck_rw_ilk_lock(lck);
1257
1258 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade || lck->lck_rw_shared_cnt) {
1259 /*
1260 * Can't get lock.
1261 */
1262 lck_rw_ilk_unlock(lck);
1263 return(FALSE);
1264 }
1265
1266 /*
1267 * Have lock.
1268 */
1269
1270 lck->lck_rw_want_excl = TRUE;
1271
1272 lck_rw_ilk_unlock(lck);
1273
1274 return(TRUE);
1275}
1276
1277/*
1278 * Routine: lck_rw_try_lock_shared_gen
1279 * Function:
1280 * Tries to get a read lock.
1281 *
1282 * Returns FALSE if the lock is not held on return.
1283 */
1284
1285boolean_t
1286lck_rw_try_lock_shared_gen(
1287 lck_rw_t *lck)
1288{
1289 lck_rw_ilk_lock(lck);
1290
c0fea474
A
1291 if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1292 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl))) {
91447636
A
1293 lck_rw_ilk_unlock(lck);
1294 return(FALSE);
1295 }
1296
1297 lck->lck_rw_shared_cnt++;
1298
1299 lck_rw_ilk_unlock(lck);
1300
1301 return(TRUE);
1302}
1303
1304
1305/*
1306 * Routine: lck_rw_ext_backtrace
1307 */
1308void
1309lck_rw_ext_backtrace(
1310 lck_rw_ext_t *lck)
1311{
1312 unsigned int *stackptr, *stackptr_prev;
1313 unsigned int frame;
1314
1315 __asm__ volatile("mr %0,r1" : "=r" (stackptr));
1316 frame = 0;
1317 while (frame < LCK_FRAMES_MAX) {
1318 stackptr_prev = stackptr;
1319 stackptr = ( unsigned int *)*stackptr;
1320 if ( (((unsigned int)stackptr_prev) ^ ((unsigned int)stackptr)) > 8192)
1321 break;
1322 lck->lck_rw_deb.stack[frame] = *(stackptr+2);
1323 frame++;
1324 }
1325 while (frame < LCK_FRAMES_MAX) {
1326 lck->lck_rw_deb.stack[frame] = 0;
1327 frame++;
1328 }
1329}
1330
1331
1332/*
1333 * Routine: lck_rw_lock_exclusive_ext
1334 */
1335void
1336lck_rw_lock_exclusive_ext(
1337 lck_rw_ext_t *lck,
1338 lck_rw_t *rlck)
1339{
1340 int i;
1341 wait_result_t res;
1342 boolean_t lock_miss = FALSE;
1343 boolean_t lock_wait = FALSE;
1344 boolean_t lock_stat;
1345
1346 lck_rw_check_type(lck, rlck);
1347
1348 if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_MYLOCK)) == LCK_RW_ATTR_DEBUG)
1349 && (lck->lck_rw_deb.thread == current_thread()))
1350 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1351
1352 lck_rw_ilk_lock(&lck->lck_rw);
1353
1354 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1355
1356 if (lock_stat)
1357 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1358
1359 /*
1360 * Try to acquire the lck_rw.lck_rw_want_excl bit.
1361 */
1362 while (lck->lck_rw.lck_rw_want_excl) {
1363 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)rlck, 0, 0, 0, 0);
1364
1365 if (lock_stat && !lock_miss) {
1366 lock_miss = TRUE;
1367 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1368 }
1369
1370 i = lock_wait_time[1];
1371 if (i != 0) {
1372 lck_rw_ilk_unlock(&lck->lck_rw);
1373 while (--i != 0 && lck->lck_rw.lck_rw_want_excl)
1374 continue;
1375 lck_rw_ilk_lock(&lck->lck_rw);
1376 }
1377
1378 if (lck->lck_rw.lck_rw_want_excl) {
1379 lck->lck_rw.lck_rw_waiting = TRUE;
1380 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1381 if (res == THREAD_WAITING) {
1382 if (lock_stat && !lock_wait) {
1383 lock_wait = TRUE;
1384 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1385 }
1386 lck_rw_ilk_unlock(&lck->lck_rw);
1387 res = thread_block(THREAD_CONTINUE_NULL);
1388 lck_rw_ilk_lock(&lck->lck_rw);
1389 }
1390 }
1391 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)rlck, res, 0, 0, 0);
1392 }
1393 lck->lck_rw.lck_rw_want_excl = TRUE;
1394
1395 /* Wait for readers (and upgrades) to finish */
1396
1397 while ((lck->lck_rw.lck_rw_shared_cnt != 0) || lck->lck_rw.lck_rw_want_upgrade) {
1398 i = lock_wait_time[1];
1399
1400 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1401 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1402
1403 if (lock_stat && !lock_miss) {
1404 lock_miss = TRUE;
1405 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1406 }
1407
1408 if (i != 0) {
1409 lck_rw_ilk_unlock(&lck->lck_rw);
1410 while (--i != 0 && (lck->lck_rw.lck_rw_shared_cnt != 0 ||
1411 lck->lck_rw.lck_rw_want_upgrade))
1412 continue;
1413 lck_rw_ilk_lock(&lck->lck_rw);
1414 }
1415
1416 if (lck->lck_rw.lck_rw_shared_cnt != 0 || lck->lck_rw.lck_rw_want_upgrade) {
1417 lck->lck_rw.lck_rw_waiting = TRUE;
1418 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1419 if (res == THREAD_WAITING) {
1420 if (lock_stat && !lock_wait) {
1421 lock_wait = TRUE;
1422 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1423 }
1424 lck_rw_ilk_unlock(&lck->lck_rw);
1425 res = thread_block(THREAD_CONTINUE_NULL);
1426 lck_rw_ilk_lock(&lck->lck_rw);
1427 }
1428 }
1429 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1430 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1431 }
1432
1433 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1434 if (LcksOpts & enaLkExtStck)
1435 lck_rw_ext_backtrace(lck);
1436 lck->lck_rw_deb.thread = current_thread();
1437
1438 lck_rw_ilk_unlock(&lck->lck_rw);
1439}
1440
1441
1442/*
1443 * Routine: lck_rw_done_ext
1444 */
1445lck_rw_type_t
1446lck_rw_done_ext(
1447 lck_rw_ext_t *lck,
1448 lck_rw_t *rlck)
1449{
1450 boolean_t do_wakeup = FALSE;
1451 lck_rw_type_t lck_rw_type;
1452
1453
1454 lck_rw_check_type(lck, rlck);
1455
1456 lck_rw_ilk_lock(&lck->lck_rw);
1457
1458 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1459 lck_rw_type = LCK_RW_TYPE_SHARED;
1460 lck->lck_rw.lck_rw_shared_cnt--;
1461 }
1462 else {
1463 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1464 if (lck->lck_rw.lck_rw_want_upgrade)
1465 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1466 else if (lck->lck_rw.lck_rw_want_excl)
1467 lck->lck_rw.lck_rw_want_excl = FALSE;
1468 else
1469 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1470 rlck, lck->lck_rw);
1471 if (lck->lck_rw_deb.thread == THREAD_NULL)
1472 panic("rw lock (0x%08X) not held\n",
1473 rlck);
1474 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1475 && (lck->lck_rw_deb.thread != current_thread()))
1476 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1477 rlck, current_thread(), lck->lck_rw_deb.thread);
1478 lck->lck_rw_deb.thread = THREAD_NULL;
1479 }
1480
1481 if (lck->lck_rw_attr & LCK_RW_ATTR_DEBUG)
1482 lck->lck_rw_deb.pc_done = __builtin_return_address(0);
1483
1484 /*
1485 * There is no reason to wakeup a waiting thread
1486 * if the read-count is non-zero. Consider:
1487 * we must be dropping a read lock
1488 * threads are waiting only if one wants a write lock
1489 * if there are still readers, they can't proceed
1490 */
1491
1492 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1493 lck->lck_rw.lck_rw_waiting = FALSE;
1494 do_wakeup = TRUE;
1495 }
1496
1497 lck_rw_ilk_unlock(&lck->lck_rw);
1498
1499 if (do_wakeup)
1500 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1501 return(lck_rw_type);
1502}
1503
1504
1505/*
1506 * Routine: lck_rw_lock_shared_ext
1507 */
1508void
1509lck_rw_lock_shared_ext(
1510 lck_rw_ext_t *lck,
1511 lck_rw_t *rlck)
1512{
1513 int i;
1514 wait_result_t res;
1515 boolean_t lock_miss = FALSE;
1516 boolean_t lock_wait = FALSE;
1517 boolean_t lock_stat;
1518
1519 lck_rw_check_type(lck, rlck);
1520
1521 lck_rw_ilk_lock(&lck->lck_rw);
1522
1523 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1524
1525 if (lock_stat)
1526 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1527
c0fea474
A
1528 while ((lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1529 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl))) {
91447636
A
1530 i = lock_wait_time[1];
1531
1532 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1533 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1534
1535 if (lock_stat && !lock_miss) {
1536 lock_miss = TRUE;
1537 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1538 }
1539
1540 if (i != 0) {
1541 lck_rw_ilk_unlock(&lck->lck_rw);
c0fea474
A
1542 while (--i != 0 &&
1543 (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1544 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl)))
91447636
A
1545 continue;
1546 lck_rw_ilk_lock(&lck->lck_rw);
1547 }
1548
c0fea474
A
1549 if ((lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1550 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl))) {
91447636
A
1551 lck->lck_rw.lck_rw_waiting = TRUE;
1552 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1553 if (res == THREAD_WAITING) {
1554 if (lock_stat && !lock_wait) {
1555 lock_wait = TRUE;
1556 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1557 }
1558 lck_rw_ilk_unlock(&lck->lck_rw);
1559 res = thread_block(THREAD_CONTINUE_NULL);
1560 lck_rw_ilk_lock(&lck->lck_rw);
1561 }
1562 }
1563 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1564 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1565 }
1566
1567 lck->lck_rw.lck_rw_shared_cnt++;
1568
1569 lck_rw_ilk_unlock(&lck->lck_rw);
1570}
1571
1572
1573/*
1574 * Routine: lck_rw_lock_shared_to_exclusive_ext
1575 * Function:
1576 * Improves a read-only lock to one with
1577 * write permission. If another reader has
1578 * already requested an upgrade to a write lock,
1579 * no lock is held upon return.
1580 *
1581 * Returns TRUE if the upgrade *failed*.
1582 */
1583
1584boolean_t
1585lck_rw_lock_shared_to_exclusive_ext(
1586 lck_rw_ext_t *lck,
1587 lck_rw_t *rlck)
1588{
1589 int i;
1590 boolean_t do_wakeup = FALSE;
1591 wait_result_t res;
1592 boolean_t lock_miss = FALSE;
1593 boolean_t lock_wait = FALSE;
1594 boolean_t lock_stat;
1595
1596 lck_rw_check_type(lck, rlck);
1597
1598 if (lck->lck_rw_deb.thread == current_thread())
1599 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1600
1601 lck_rw_ilk_lock(&lck->lck_rw);
1602
1603 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1604
1605 if (lock_stat)
1606 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1607
1608 lck->lck_rw.lck_rw_shared_cnt--;
1609
1610 if (lck->lck_rw.lck_rw_want_upgrade) {
1611 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1612 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1613
1614 /*
1615 * Someone else has requested upgrade.
1616 * Since we've released a read lock, wake
1617 * him up.
1618 */
1619 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1620 lck->lck_rw.lck_rw_waiting = FALSE;
1621 do_wakeup = TRUE;
1622 }
1623
1624 lck_rw_ilk_unlock(&lck->lck_rw);
1625
1626 if (do_wakeup)
1627 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1628
1629 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1630 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1631
1632 return (TRUE);
1633 }
1634
1635 lck->lck_rw.lck_rw_want_upgrade = TRUE;
1636
1637 while (lck->lck_rw.lck_rw_shared_cnt != 0) {
1638 i = lock_wait_time[1];
1639
1640 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1641 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, i, 0, 0);
1642
1643 if (lock_stat && !lock_miss) {
1644 lock_miss = TRUE;
1645 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1646 }
1647
1648 if (i != 0) {
1649 lck_rw_ilk_unlock(&lck->lck_rw);
1650 while (--i != 0 && lck->lck_rw.lck_rw_shared_cnt != 0)
1651 continue;
1652 lck_rw_ilk_lock(&lck->lck_rw);
1653 }
1654
1655 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1656 lck->lck_rw.lck_rw_waiting = TRUE;
1657 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1658 if (res == THREAD_WAITING) {
1659 if (lock_stat && !lock_wait) {
1660 lock_wait = TRUE;
1661 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1662 }
1663 lck_rw_ilk_unlock(&lck->lck_rw);
1664 res = thread_block(THREAD_CONTINUE_NULL);
1665 lck_rw_ilk_lock(&lck->lck_rw);
1666 }
1667 }
1668 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1669 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, 0, 0, 0);
1670 }
1671
1672 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1673 if (LcksOpts & enaLkExtStck)
1674 lck_rw_ext_backtrace(lck);
1675 lck->lck_rw_deb.thread = current_thread();
1676
1677 lck_rw_ilk_unlock(&lck->lck_rw);
1678
1679 return (FALSE);
1680}
1681
1682/*
1683 * Routine: lck_rw_lock_exclusive_to_shared_ext
1684 */
1685void
1686lck_rw_lock_exclusive_to_shared_ext(
1687 lck_rw_ext_t *lck,
1688 lck_rw_t *rlck)
1689{
1690 boolean_t do_wakeup = FALSE;
1691
1692 lck_rw_check_type(lck, rlck);
1693
1694 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1695 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1696
1697 lck_rw_ilk_lock(&lck->lck_rw);
1698
1699 lck->lck_rw.lck_rw_shared_cnt++;
1700 if (lck->lck_rw.lck_rw_want_upgrade)
1701 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1702 else if (lck->lck_rw.lck_rw_want_excl)
1703 lck->lck_rw.lck_rw_want_excl = FALSE;
1704 else
1705 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1706 rlck, lck->lck_rw);
1707 if (lck->lck_rw_deb.thread == THREAD_NULL)
1708 panic("rw lock (0x%08X) not held\n",
1709 rlck);
1710 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1711 && (lck->lck_rw_deb.thread != current_thread()))
1712 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1713 rlck, current_thread(), lck->lck_rw_deb.thread);
1714
1715 lck->lck_rw_deb.thread = THREAD_NULL;
1716
1717 if (lck->lck_rw.lck_rw_waiting) {
1718 lck->lck_rw.lck_rw_waiting = FALSE;
1719 do_wakeup = TRUE;
1720 }
1721
1722 lck_rw_ilk_unlock(&lck->lck_rw);
1723
1724 if (do_wakeup)
1725 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1726
1727 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1728 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, lck->lck_rw.lck_rw_shared_cnt, 0);
1729
1730}
1731
1732
1733/*
1734 * Routine: lck_rw_try_lock_exclusive_ext
1735 * Function:
1736 * Tries to get a write lock.
1737 *
1738 * Returns FALSE if the lock is not held on return.
1739 */
1740
1741boolean_t
1742lck_rw_try_lock_exclusive_ext(
1743 lck_rw_ext_t *lck,
1744 lck_rw_t *rlck)
1745{
1746 boolean_t lock_stat;
1747
1748 lck_rw_check_type(lck, rlck);
1749
1750 lck_rw_ilk_lock(&lck->lck_rw);
1751
1752 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1753
1754 if (lock_stat)
1755 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1756
1757 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade || lck->lck_rw.lck_rw_shared_cnt) {
1758 /*
1759 * Can't get lock.
1760 */
1761 if (lock_stat) {
1762 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1763 }
1764 lck_rw_ilk_unlock(&lck->lck_rw);
1765 return(FALSE);
1766 }
1767
1768 /*
1769 * Have lock.
1770 */
1771
1772 lck->lck_rw.lck_rw_want_excl = TRUE;
1773 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1774 if (LcksOpts & enaLkExtStck)
1775 lck_rw_ext_backtrace(lck);
1776 lck->lck_rw_deb.thread = current_thread();
1777
1778 lck_rw_ilk_unlock(&lck->lck_rw);
1779
1780 return(TRUE);
1781}
1782
1783/*
1784 * Routine: lck_rw_try_lock_shared_ext
1785 * Function:
1786 * Tries to get a read lock.
1787 *
1788 * Returns FALSE if the lock is not held on return.
1789 */
1790
1791boolean_t
1792lck_rw_try_lock_shared_ext(
1793 lck_rw_ext_t *lck,
1794 lck_rw_t *rlck)
1795{
1796 boolean_t lock_stat;
1797
1798 lck_rw_check_type(lck, rlck);
1799
1800 lck_rw_ilk_lock(&lck->lck_rw);
1801
1802 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1803
1804 if (lock_stat)
1805 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1806
c0fea474
A
1807 if ((lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1808 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl))) {
91447636
A
1809 if (lock_stat) {
1810 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1811 }
1812 lck_rw_ilk_unlock(&lck->lck_rw);
1813 return(FALSE);
1814 }
1815
1816 lck->lck_rw.lck_rw_shared_cnt++;
1817
1818 lck_rw_ilk_unlock(&lck->lck_rw);
1819
1820 return(TRUE);
1821}
1822
1823void
1824lck_rw_check_type(
1825 lck_rw_ext_t *lck,
1826 lck_rw_t *rlck)
1827{
1828 if (lck->lck_rw_deb.type != RW_TAG)
1829 panic("rw lock (0x%08X) not a rw lock type (0x%08X)\n",rlck, lck->lck_rw_deb.type);
1830}
1831
1832/*
1833 * The C portion of the mutex package. These routines are only invoked
1834 * if the optimized assembler routines can't do the work.
1835 */
1836
1837/*
1838 * Forward definition
1839 */
1840
1841void lck_mtx_ext_init(
1842 lck_mtx_ext_t *lck,
1843 lck_grp_t *grp,
1844 lck_attr_t *attr);
1845
1846/*
1847 * Routine: mutex_alloc
1848 * Function:
1849 * Allocate a mutex for external users who cannot
1850 * hard-code the structure definition into their
1851 * objects.
1852 * For now just use kalloc, but a zone is probably
1853 * warranted.
1854 */
1855mutex_t *
1856mutex_alloc(
1857 unsigned short tag)
1858{
1859 mutex_t *m;
1860
1861 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1862 mutex_init(m, tag);
1863 return(m);
1864}
1865
1866/*
1867 * Routine: mutex_free
1868 */
1869void
1870mutex_free(
1871 mutex_t *m)
1872{
1873 kfree((void *)m, sizeof(mutex_t));
1874}
1875
1876/*
1877 * Routine: lck_mtx_alloc_init
1878 */
1879lck_mtx_t *
1880lck_mtx_alloc_init(
1881 lck_grp_t *grp,
1882 lck_attr_t *attr) {
1883 lck_mtx_t *lck;
1884
1885 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1886 lck_mtx_init(lck, grp, attr);
1887
1888 return(lck);
1889}
1890
1891/*
1892 * Routine: lck_mtx_free
1893 */
1894void
1895lck_mtx_free(
1896 lck_mtx_t *lck,
1897 lck_grp_t *grp) {
1898 lck_mtx_destroy(lck, grp);
1899 kfree((void *)lck, sizeof(lck_mtx_t));
1900}
1901
1902/*
1903 * Routine: lck_mtx_init
1904 */
1905void
1906lck_mtx_init(
1907 lck_mtx_t *lck,
1908 lck_grp_t *grp,
1909 lck_attr_t *attr) {
1910 lck_mtx_ext_t *lck_ext;
1911 lck_attr_t *lck_attr;
1912
1913 if (attr != LCK_ATTR_NULL)
1914 lck_attr = attr;
1915 else
1916 lck_attr = &LockDefaultLckAttr;
1917
1918 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1919 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1920 lck_mtx_ext_init(lck_ext, grp, lck_attr);
1921 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1922 lck->lck_mtx_ptr = lck_ext;
1923 }
1924 } else {
1925 lck->lck_mtx_data = 0;
1926 lck->lck_mtx_waiters = 0;
1927 lck->lck_mtx_pri = 0;
1928 }
1929 lck_grp_reference(grp);
1930 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1931}
1932
1933/*
1934 * Routine: lck_mtx_ext_init
1935 */
1936void
1937lck_mtx_ext_init(
1938 lck_mtx_ext_t *lck,
1939 lck_grp_t *grp,
1940 lck_attr_t *attr) {
1941
1942 bzero((void *)lck, sizeof(lck_mtx_ext_t));
1943
1944 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1945 lck->lck_mtx_deb.type = MUTEX_TAG;
1946 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1947 }
1948
1949 lck->lck_mtx_grp = grp;
1950
1951 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
1952 lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
1953}
1954
1955/*
1956 * Routine: lck_mtx_destroy
1957 */
1958void
1959lck_mtx_destroy(
1960 lck_mtx_t *lck,
1961 lck_grp_t *grp) {
1962 boolean_t lck_is_indirect;
1963
1964 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1965 return;
1966 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1967 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1968 if (lck_is_indirect)
1969 kfree((void *)lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1970
1971 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1972 lck_grp_deallocate(grp);
1973 return;
1974}
1975
1976
1977#if MACH_KDB
1978/*
1979 * Routines to print out simple_locks and mutexes in a nicely-formatted
1980 * fashion.
1981 */
1982
1983char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1984char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1985
1986void db_print_simple_lock(
1987 simple_lock_t addr);
1988
1989void db_print_mutex(
1990 mutex_t * addr);
1991
1992void
1993db_show_one_simple_lock (
1994 db_expr_t addr,
1995 boolean_t have_addr,
1996 db_expr_t count,
1997 char * modif)
1998{
1999 simple_lock_t saddr = (simple_lock_t)addr;
2000
2001 if (saddr == (simple_lock_t)0 || !have_addr) {
2002 db_error ("No simple_lock\n");
2003 }
2004#if USLOCK_DEBUG
2005 else if (saddr->lock_type != USLOCK_TAG)
2006 db_error ("Not a simple_lock\n");
2007#endif /* USLOCK_DEBUG */
2008
2009 db_printf ("%s\n", simple_lock_labels);
2010 db_print_simple_lock (saddr);
2011}
2012
2013void
2014db_print_simple_lock (
2015 simple_lock_t addr)
2016{
2017
2018 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
2019#if USLOCK_DEBUG
2020 db_printf (" %08x", addr->debug.lock_thread);
2021 db_printf (" %08x ", addr->debug.duration[1]);
2022 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
2023#endif /* USLOCK_DEBUG */
2024 db_printf ("\n");
2025}
2026
2027void
2028db_show_one_mutex (
2029 db_expr_t addr,
2030 boolean_t have_addr,
2031 db_expr_t count,
2032 char * modif)
2033{
2034 mutex_t * maddr = (mutex_t *)addr;
2035
2036 if (maddr == (mutex_t *)0 || !have_addr)
2037 db_error ("No mutex\n");
2038#if MACH_LDEBUG
2039 else if (maddr->lck_mtx_deb.type != MUTEX_TAG)
2040 db_error ("Not a mutex\n");
2041#endif /* MACH_LDEBUG */
2042
2043 db_printf ("%s\n", mutex_labels);
2044 db_print_mutex (maddr);
2045}
2046
2047void
2048db_print_mutex (
2049 mutex_t * addr)
2050{
2051 db_printf ("%08x %6d %7d",
2052 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
2053#if MACH_LDEBUG
2054 db_printf (" %08x ", addr->lck_mtx_deb.thread);
2055 db_printsym (addr->lck_mtx_deb.stack[0], DB_STGY_ANY);
2056#endif /* MACH_LDEBUG */
2057 db_printf ("\n");
2058}
2059
2060void
2061db_show_one_lock(
2062 lock_t *lock)
2063{
2064 db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
2065 lock->lck_rw.lck_rw_shared_cnt,
2066 lock->lck_rw.lck_rw_want_upgrade ? "" : "!",
2067 lock->lck_rw.lck_rw_want_excl ? "" : "!");
2068 db_printf("%swaiting\n",
2069 lock->lck_rw.lck_rw_waiting ? "" : "!");
2070 db_printf("%sInterlock\n",
2071 lock->lck_rw.lck_rw_interlock ? "" : "!");
2072}
2073
2074#endif /* MACH_KDB */
2075