]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/locks_ppc.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / ppc / locks_ppc.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/lock.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young
59 * Date: 1985
60 *
61 * Locking primitives implementation
62 */
63
64#include <mach_kdb.h>
65#include <mach_ldebug.h>
66
67#include <kern/kalloc.h>
68#include <kern/lock.h>
69#include <kern/locks.h>
70#include <kern/misc_protos.h>
71#include <kern/thread.h>
72#include <kern/processor.h>
73#include <kern/sched_prim.h>
74#include <kern/xpr.h>
75#include <kern/debug.h>
76#include <string.h>
77
78#if MACH_KDB
79#include <ddb/db_command.h>
80#include <ddb/db_output.h>
81#include <ddb/db_sym.h>
82#include <ddb/db_print.h>
83#endif /* MACH_KDB */
84
85#ifdef __ppc__
86#include <ppc/Firmware.h>
87#endif
88
89#include <sys/kdebug.h>
90
91#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
92#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
93#define LCK_RW_LCK_SHARED_CODE 0x102
94#define LCK_RW_LCK_SH_TO_EX_CODE 0x103
95#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
96#define LCK_RW_LCK_EX_TO_SH_CODE 0x105
97
98
99#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
100
101unsigned int lock_wait_time[2] = { (unsigned int)-1, 0 } ;
102
103/* Forwards */
104
105
106#if USLOCK_DEBUG
107/*
108 * Perform simple lock checks.
109 */
110int uslock_check = 1;
111int max_lock_loops = 100000000;
112decl_simple_lock_data(extern , printf_lock)
113decl_simple_lock_data(extern , panic_lock)
114#if MACH_KDB
115decl_simple_lock_data(extern , kdb_lock)
116#endif /* MACH_KDB */
117#endif /* USLOCK_DEBUG */
118
119
120/*
121 * We often want to know the addresses of the callers
122 * of the various lock routines. However, this information
123 * is only used for debugging and statistics.
124 */
125typedef void *pc_t;
126#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
127#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
128#if ANY_LOCK_DEBUG
129#define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
130#else /* ANY_LOCK_DEBUG */
131#ifdef lint
132/*
133 * Eliminate lint complaints about unused local pc variables.
134 */
135#define OBTAIN_PC(pc,l) ++pc
136#else /* lint */
137#define OBTAIN_PC(pc,l)
138#endif /* lint */
139#endif /* USLOCK_DEBUG */
140
141
142/*
143 * Portable lock package implementation of usimple_locks.
144 */
145
146#if USLOCK_DEBUG
147#define USLDBG(stmt) stmt
148void usld_lock_init(usimple_lock_t, unsigned short);
149void usld_lock_pre(usimple_lock_t, pc_t);
150void usld_lock_post(usimple_lock_t, pc_t);
151void usld_unlock(usimple_lock_t, pc_t);
152void usld_lock_try_pre(usimple_lock_t, pc_t);
153void usld_lock_try_post(usimple_lock_t, pc_t);
154int usld_lock_common_checks(usimple_lock_t, char *);
155#else /* USLOCK_DEBUG */
156#define USLDBG(stmt)
157#endif /* USLOCK_DEBUG */
158
159/*
160 * Routine: lck_spin_alloc_init
161 */
162lck_spin_t *
163lck_spin_alloc_init(
164 lck_grp_t *grp,
165 lck_attr_t *attr) {
166 lck_spin_t *lck;
167
168 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
169 lck_spin_init(lck, grp, attr);
170
171 return(lck);
172}
173
174/*
175 * Routine: lck_spin_free
176 */
177void
178lck_spin_free(
179 lck_spin_t *lck,
180 lck_grp_t *grp) {
181 lck_spin_destroy(lck, grp);
182 kfree((void *)lck, sizeof(lck_spin_t));
183}
184
185/*
186 * Routine: lck_spin_init
187 */
188void
189lck_spin_init(
190 lck_spin_t *lck,
191 lck_grp_t *grp,
192 __unused lck_attr_t *attr) {
193
194 lck->interlock = 0;
195 lck_grp_reference(grp);
196 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
197}
198
199/*
200 * Routine: lck_spin_destroy
201 */
202void
203lck_spin_destroy(
204 lck_spin_t *lck,
205 lck_grp_t *grp) {
206 if (lck->interlock == LCK_SPIN_TAG_DESTROYED)
207 return;
208 lck->interlock = LCK_SPIN_TAG_DESTROYED;
209 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
210 lck_grp_deallocate(grp);
211}
212
213/*
214 * Initialize a usimple_lock.
215 *
216 * No change in preemption state.
217 */
218void
219usimple_lock_init(
220 usimple_lock_t l,
221 unsigned short tag)
222{
223#ifndef MACHINE_SIMPLE_LOCK
224 USLDBG(usld_lock_init(l, tag));
225 hw_lock_init(&l->interlock);
226#else
227 simple_lock_init((simple_lock_t)l,tag);
228#endif
229}
230
231
232/*
233 * Acquire a usimple_lock.
234 *
235 * Returns with preemption disabled. Note
236 * that the hw_lock routines are responsible for
237 * maintaining preemption state.
238 */
239void
240usimple_lock(
241 usimple_lock_t l)
242{
243#ifndef MACHINE_SIMPLE_LOCK
244 int i;
245 pc_t pc;
246#if USLOCK_DEBUG
247 int count = 0;
248#endif /* USLOCK_DEBUG */
249
250 OBTAIN_PC(pc, l);
251 USLDBG(usld_lock_pre(l, pc));
252
253 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
254 panic("simple lock deadlock detection - l=0x%08X, cpu=%d, ret=0x%08X", l, cpu_number(), pc);
255
256 USLDBG(usld_lock_post(l, pc));
257#else
258 simple_lock((simple_lock_t)l);
259#endif
260}
261
262
263/*
264 * Release a usimple_lock.
265 *
266 * Returns with preemption enabled. Note
267 * that the hw_lock routines are responsible for
268 * maintaining preemption state.
269 */
270void
271usimple_unlock(
272 usimple_lock_t l)
273{
274#ifndef MACHINE_SIMPLE_LOCK
275 pc_t pc;
276
277 OBTAIN_PC(pc, l);
278 USLDBG(usld_unlock(l, pc));
279 sync();
280 hw_lock_unlock(&l->interlock);
281#else
282 simple_unlock_rwmb((simple_lock_t)l);
283#endif
284}
285
286
287/*
288 * Conditionally acquire a usimple_lock.
289 *
290 * On success, returns with preemption disabled.
291 * On failure, returns with preemption in the same state
292 * as when first invoked. Note that the hw_lock routines
293 * are responsible for maintaining preemption state.
294 *
295 * XXX No stats are gathered on a miss; I preserved this
296 * behavior from the original assembly-language code, but
297 * doesn't it make sense to log misses? XXX
298 */
299unsigned int
300usimple_lock_try(
301 usimple_lock_t l)
302{
303#ifndef MACHINE_SIMPLE_LOCK
304 pc_t pc;
305 unsigned int success;
306
307 OBTAIN_PC(pc, l);
308 USLDBG(usld_lock_try_pre(l, pc));
309 if (success = hw_lock_try(&l->interlock)) {
310 USLDBG(usld_lock_try_post(l, pc));
311 }
312 return success;
313#else
314 return(simple_lock_try((simple_lock_t)l));
315#endif
316}
317
318#if USLOCK_DEBUG
319/*
320 * States of a usimple_lock. The default when initializing
321 * a usimple_lock is setting it up for debug checking.
322 */
323#define USLOCK_CHECKED 0x0001 /* lock is being checked */
324#define USLOCK_TAKEN 0x0002 /* lock has been taken */
325#define USLOCK_INIT 0xBAA0 /* lock has been initialized */
326#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
327#define USLOCK_CHECKING(l) (uslock_check && \
328 ((l)->debug.state & USLOCK_CHECKED))
329
330/*
331 * Trace activities of a particularly interesting lock.
332 */
333void usl_trace(usimple_lock_t, int, pc_t, const char *);
334
335
336/*
337 * Initialize the debugging information contained
338 * in a usimple_lock.
339 */
340void
341usld_lock_init(
342 usimple_lock_t l,
343 unsigned short tag)
344{
345 if (l == USIMPLE_LOCK_NULL)
346 panic("lock initialization: null lock pointer");
347 l->lock_type = USLOCK_TAG;
348 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
349 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
350 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
351 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
352 l->debug.duration[0] = l->debug.duration[1] = 0;
353 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
354 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
355 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
356}
357
358
359/*
360 * These checks apply to all usimple_locks, not just
361 * those with USLOCK_CHECKED turned on.
362 */
363int
364usld_lock_common_checks(
365 usimple_lock_t l,
366 char *caller)
367{
368 if (l == USIMPLE_LOCK_NULL)
369 panic("%s: null lock pointer", caller);
370 if (l->lock_type != USLOCK_TAG)
371 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
372 if (!(l->debug.state & USLOCK_INIT))
373 panic("%s: 0x%x is not an initialized lock",
374 caller, (integer_t) l);
375 return USLOCK_CHECKING(l);
376}
377
378
379/*
380 * Debug checks on a usimple_lock just before attempting
381 * to acquire it.
382 */
383/* ARGSUSED */
384void
385usld_lock_pre(
386 usimple_lock_t l,
387 pc_t pc)
388{
389 char *caller = "usimple_lock";
390
391
392 if (!usld_lock_common_checks(l, caller))
393 return;
394
395/*
396 * Note that we have a weird case where we are getting a lock when we are]
397 * in the process of putting the system to sleep. We are running with no
398 * current threads, therefore we can't tell if we are trying to retake a lock
399 * we have or someone on the other processor has it. Therefore we just
400 * ignore this test if the locking thread is 0.
401 */
402
403 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
404 l->debug.lock_thread == (void *) current_thread()) {
405 printf("%s: lock 0x%x already locked (at 0x%x) by",
406 caller, (integer_t) l, l->debug.lock_pc);
407 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
408 l->debug.lock_thread, pc);
409 panic(caller);
410 }
411 mp_disable_preemption();
412 usl_trace(l, cpu_number(), pc, caller);
413 mp_enable_preemption();
414}
415
416
417/*
418 * Debug checks on a usimple_lock just after acquiring it.
419 *
420 * Pre-emption has been disabled at this point,
421 * so we are safe in using cpu_number.
422 */
423void
424usld_lock_post(
425 usimple_lock_t l,
426 pc_t pc)
427{
428 register int mycpu;
429 char *caller = "successful usimple_lock";
430
431
432 if (!usld_lock_common_checks(l, caller))
433 return;
434
435 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
436 panic("%s: lock 0x%x became uninitialized",
437 caller, (integer_t) l);
438 if ((l->debug.state & USLOCK_TAKEN))
439 panic("%s: lock 0x%x became TAKEN by someone else",
440 caller, (integer_t) l);
441
442 mycpu = cpu_number();
443 l->debug.lock_thread = (void *)current_thread();
444 l->debug.state |= USLOCK_TAKEN;
445 l->debug.lock_pc = pc;
446 l->debug.lock_cpu = mycpu;
447
448 usl_trace(l, mycpu, pc, caller);
449}
450
451
452/*
453 * Debug checks on a usimple_lock just before
454 * releasing it. Note that the caller has not
455 * yet released the hardware lock.
456 *
457 * Preemption is still disabled, so there's
458 * no problem using cpu_number.
459 */
460void
461usld_unlock(
462 usimple_lock_t l,
463 pc_t pc)
464{
465 register int mycpu;
466 char *caller = "usimple_unlock";
467
468
469 if (!usld_lock_common_checks(l, caller))
470 return;
471
472 mycpu = cpu_number();
473
474 if (!(l->debug.state & USLOCK_TAKEN))
475 panic("%s: lock 0x%x hasn't been taken",
476 caller, (integer_t) l);
477 if (l->debug.lock_thread != (void *) current_thread())
478 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
479 caller, (integer_t) l, l->debug.lock_thread);
480 if (l->debug.lock_cpu != mycpu) {
481 printf("%s: unlocking lock 0x%x on cpu 0x%x",
482 caller, (integer_t) l, mycpu);
483 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
484 panic(caller);
485 }
486 usl_trace(l, mycpu, pc, caller);
487
488 l->debug.unlock_thread = l->debug.lock_thread;
489 l->debug.lock_thread = INVALID_PC;
490 l->debug.state &= ~USLOCK_TAKEN;
491 l->debug.unlock_pc = pc;
492 l->debug.unlock_cpu = mycpu;
493}
494
495
496/*
497 * Debug checks on a usimple_lock just before
498 * attempting to acquire it.
499 *
500 * Preemption isn't guaranteed to be disabled.
501 */
502void
503usld_lock_try_pre(
504 usimple_lock_t l,
505 pc_t pc)
506{
507 char *caller = "usimple_lock_try";
508
509 if (!usld_lock_common_checks(l, caller))
510 return;
511 mp_disable_preemption();
512 usl_trace(l, cpu_number(), pc, caller);
513 mp_enable_preemption();
514}
515
516
517/*
518 * Debug checks on a usimple_lock just after
519 * successfully attempting to acquire it.
520 *
521 * Preemption has been disabled by the
522 * lock acquisition attempt, so it's safe
523 * to use cpu_number.
524 */
525void
526usld_lock_try_post(
527 usimple_lock_t l,
528 pc_t pc)
529{
530 register int mycpu;
531 char *caller = "successful usimple_lock_try";
532
533 if (!usld_lock_common_checks(l, caller))
534 return;
535
536 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
537 panic("%s: lock 0x%x became uninitialized",
538 caller, (integer_t) l);
539 if ((l->debug.state & USLOCK_TAKEN))
540 panic("%s: lock 0x%x became TAKEN by someone else",
541 caller, (integer_t) l);
542
543 mycpu = cpu_number();
544 l->debug.lock_thread = (void *) current_thread();
545 l->debug.state |= USLOCK_TAKEN;
546 l->debug.lock_pc = pc;
547 l->debug.lock_cpu = mycpu;
548
549 usl_trace(l, mycpu, pc, caller);
550}
551
552
553/*
554 * For very special cases, set traced_lock to point to a
555 * specific lock of interest. The result is a series of
556 * XPRs showing lock operations on that lock. The lock_seq
557 * value is used to show the order of those operations.
558 */
559usimple_lock_t traced_lock;
560unsigned int lock_seq;
561
562void
563usl_trace(
564 usimple_lock_t l,
565 int mycpu,
566 pc_t pc,
567 const char * op_name)
568{
569 if (traced_lock == l) {
570 XPR(XPR_SLOCK,
571 "seq %d, cpu %d, %s @ %x\n",
572 (integer_t) lock_seq, (integer_t) mycpu,
573 (integer_t) op_name, (integer_t) pc, 0);
574 lock_seq++;
575 }
576}
577
578
579#endif /* USLOCK_DEBUG */
580
581/*
582 * The C portion of the shared/exclusive locks package.
583 */
584
585/*
586 * Forward definition
587 */
588
589void lck_rw_lock_exclusive_gen(
590 lck_rw_t *lck);
591
592lck_rw_type_t lck_rw_done_gen(
593 lck_rw_t *lck);
594
595void
596lck_rw_lock_shared_gen(
597 lck_rw_t *lck);
598
599boolean_t
600lck_rw_lock_shared_to_exclusive_gen(
601 lck_rw_t *lck);
602
603void
604lck_rw_lock_exclusive_to_shared_gen(
605 lck_rw_t *lck);
606
607boolean_t
608lck_rw_try_lock_exclusive_gen(
609 lck_rw_t *lck);
610
611boolean_t
612lck_rw_try_lock_shared_gen(
613 lck_rw_t *lck);
614
615void lck_rw_ext_init(
616 lck_rw_ext_t *lck,
617 lck_grp_t *grp,
618 lck_attr_t *attr);
619
620void lck_rw_ext_backtrace(
621 lck_rw_ext_t *lck);
622
623void lck_rw_lock_exclusive_ext(
624 lck_rw_ext_t *lck,
625 lck_rw_t *rlck);
626
627lck_rw_type_t lck_rw_done_ext(
628 lck_rw_ext_t *lck,
629 lck_rw_t *rlck);
630
631void
632lck_rw_lock_shared_ext(
633 lck_rw_ext_t *lck,
634 lck_rw_t *rlck);
635
636boolean_t
637lck_rw_lock_shared_to_exclusive_ext(
638 lck_rw_ext_t *lck,
639 lck_rw_t *rlck);
640
641void
642lck_rw_lock_exclusive_to_shared_ext(
643 lck_rw_ext_t *lck,
644 lck_rw_t *rlck);
645
646boolean_t
647lck_rw_try_lock_exclusive_ext(
648 lck_rw_ext_t *lck,
649 lck_rw_t *rlck);
650
651boolean_t
652lck_rw_try_lock_shared_ext(
653 lck_rw_ext_t *lck,
654 lck_rw_t *rlck);
655
656void
657lck_rw_ilk_lock(
658 lck_rw_t *lck);
659
660void
661lck_rw_ilk_unlock(
662 lck_rw_t *lck);
663
664void
665lck_rw_check_type(
666 lck_rw_ext_t *lck,
667 lck_rw_t *rlck);
668
669/*
670 * Routine: lock_alloc
671 * Function:
672 * Allocate a lock for external users who cannot
673 * hard-code the structure definition into their
674 * objects.
675 * For now just use kalloc, but a zone is probably
676 * warranted.
677 */
678lock_t *
679lock_alloc(
680 boolean_t can_sleep,
681 __unused unsigned short tag,
682 __unused unsigned short tag1)
683{
684 lock_t *lck;
685
686 if ((lck = (lock_t *)kalloc(sizeof(lock_t))) != 0)
687 lock_init(lck, can_sleep, tag, tag1);
688 return(lck);
689}
690
691/*
692 * Routine: lock_init
693 * Function:
694 * Initialize a lock; required before use.
695 * Note that clients declare the "struct lock"
696 * variables and then initialize them, rather
697 * than getting a new one from this module.
698 */
699void
700lock_init(
701 lock_t *lck,
702 boolean_t can_sleep,
703 __unused unsigned short tag,
704 __unused unsigned short tag1)
705{
706 if (!can_sleep)
707 panic("lock_init: sleep mode must be set to TRUE\n");
708
709 (void) memset((void *) lck, 0, sizeof(lock_t));
710#if MACH_LDEBUG
711 lck->lck_rw_deb.type = RW_TAG;
712 lck->lck_rw_attr |= (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD|LCK_RW_ATTR_DIS_MYLOCK);
89b3af67
A
713 lck->lck_rw.lck_rw_priv_excl = TRUE;
714#else
715 lck->lck_rw_priv_excl = TRUE;
91447636
A
716#endif
717
718}
719
720
721/*
722 * Routine: lock_free
723 * Function:
724 * Free a lock allocated for external users.
725 * For now just use kfree, but a zone is probably
726 * warranted.
727 */
728void
729lock_free(
730 lock_t *lck)
731{
732 kfree((void *)lck, sizeof(lock_t));
733}
734
735#if MACH_LDEBUG
736void
737lock_write(
738 lock_t *lck)
739{
740 lck_rw_lock_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
741}
742
743void
744lock_done(
745 lock_t *lck)
746{
747 (void)lck_rw_done_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
748}
749
750void
751lock_read(
752 lock_t *lck)
753{
754 lck_rw_lock_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
755}
756
757boolean_t
758lock_read_to_write(
759 lock_t *lck)
760{
761 return(lck_rw_lock_shared_to_exclusive_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck));
762}
763
764void
765lock_write_to_read(
766 register lock_t *lck)
767{
768 lck_rw_lock_exclusive_to_shared_ext((lck_rw_ext_t *)lck, (lck_rw_t *)lck);
769}
770#endif
771
772/*
773 * Routine: lck_rw_alloc_init
774 */
775lck_rw_t *
776lck_rw_alloc_init(
777 lck_grp_t *grp,
778 lck_attr_t *attr) {
779 lck_rw_t *lck;
780
781 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
782 lck_rw_init(lck, grp, attr);
783
784 return(lck);
785}
786
787/*
788 * Routine: lck_rw_free
789 */
790void
791lck_rw_free(
792 lck_rw_t *lck,
793 lck_grp_t *grp) {
794 lck_rw_destroy(lck, grp);
795 kfree((void *)lck, sizeof(lck_rw_t));
796}
797
798/*
799 * Routine: lck_rw_init
800 */
801void
802lck_rw_init(
803 lck_rw_t *lck,
804 lck_grp_t *grp,
805 lck_attr_t *attr) {
806 lck_rw_ext_t *lck_ext;
807 lck_attr_t *lck_attr;
808
809 if (attr != LCK_ATTR_NULL)
810 lck_attr = attr;
811 else
812 lck_attr = &LockDefaultLckAttr;
813
814 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
815 if ((lck_ext = (lck_rw_ext_t *)kalloc(sizeof(lck_rw_ext_t))) != 0) {
816 lck_rw_ext_init(lck_ext, grp, lck_attr);
817 lck->lck_rw_tag = LCK_RW_TAG_INDIRECT;
818 lck->lck_rw_ptr = lck_ext;
819 }
820 } else {
821 (void) memset((void *) lck, 0, sizeof(lck_rw_t));
89b3af67
A
822 if ((lck_attr->lck_attr_val) & LCK_ATTR_RW_SHARED_PRIORITY)
823 lck->lck_rw_priv_excl = FALSE;
824 else
825 lck->lck_rw_priv_excl = TRUE;
91447636
A
826 }
827
828 lck_grp_reference(grp);
829 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
830}
831
832/*
833 * Routine: lck_rw_ext_init
834 */
835void
836lck_rw_ext_init(
837 lck_rw_ext_t *lck,
838 lck_grp_t *grp,
839 lck_attr_t *attr) {
840
841 bzero((void *)lck, sizeof(lck_rw_ext_t));
89b3af67
A
842 if ((attr->lck_attr_val) & LCK_ATTR_RW_SHARED_PRIORITY)
843 lck->lck_rw.lck_rw_priv_excl = FALSE;
844 else
845 lck->lck_rw.lck_rw_priv_excl = TRUE;
91447636
A
846
847 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
848 lck->lck_rw_deb.type = RW_TAG;
849 lck->lck_rw_attr |= LCK_RW_ATTR_DEBUG;
850 }
851
852 lck->lck_rw_grp = grp;
853
854 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
855 lck->lck_rw_attr |= LCK_RW_ATTR_STAT;
856}
857
858/*
859 * Routine: lck_rw_destroy
860 */
861void
862lck_rw_destroy(
863 lck_rw_t *lck,
864 lck_grp_t *grp) {
865 boolean_t lck_is_indirect;
866
867 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
868 return;
869 lck_is_indirect = (lck->lck_rw_tag == LCK_RW_TAG_INDIRECT);
870 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
871 if (lck_is_indirect)
872 kfree((void *)lck->lck_rw_ptr, sizeof(lck_rw_ext_t));
873
874 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
875 lck_grp_deallocate(grp);
876 return;
877}
878
879/*
880 * Routine: lck_rw_lock
881 */
882void
883lck_rw_lock(
884 lck_rw_t *lck,
885 lck_rw_type_t lck_rw_type)
886{
887 if (lck_rw_type == LCK_RW_TYPE_SHARED)
888 lck_rw_lock_shared(lck);
889 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
890 lck_rw_lock_exclusive(lck);
891 else
892 panic("lck_rw_lock(): Invalid RW lock type: %d\n", lck_rw_type);
893}
894
895
896/*
897 * Routine: lck_rw_unlock
898 */
899void
900lck_rw_unlock(
901 lck_rw_t *lck,
902 lck_rw_type_t lck_rw_type)
903{
904 if (lck_rw_type == LCK_RW_TYPE_SHARED)
905 lck_rw_unlock_shared(lck);
906 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
907 lck_rw_unlock_exclusive(lck);
908 else
909 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
910}
911
912
913/*
914 * Routine: lck_rw_unlock_shared
915 */
916void
917lck_rw_unlock_shared(
918 lck_rw_t *lck)
919{
920 lck_rw_type_t ret;
921
922 ret = lck_rw_done(lck);
923
924 if (ret != LCK_RW_TYPE_SHARED)
925 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
926}
927
928
929/*
930 * Routine: lck_rw_unlock_exclusive
931 */
932void
933lck_rw_unlock_exclusive(
934 lck_rw_t *lck)
935{
936 lck_rw_type_t ret;
937
938 ret = lck_rw_done(lck);
939
940 if (ret != LCK_RW_TYPE_EXCLUSIVE)
941 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
942}
943
944
945/*
946 * Routine: lck_rw_try_lock
947 */
948boolean_t
949lck_rw_try_lock(
950 lck_rw_t *lck,
951 lck_rw_type_t lck_rw_type)
952{
953 if (lck_rw_type == LCK_RW_TYPE_SHARED)
954 return(lck_rw_try_lock_shared(lck));
955 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
956 return(lck_rw_try_lock_exclusive(lck));
957 else
958 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
959 return(FALSE);
960}
961
962
963
964/*
965 * Routine: lck_rw_lock_exclusive_gen
966 */
967void
968lck_rw_lock_exclusive_gen(
969 lck_rw_t *lck)
970{
971 int i;
972 boolean_t lock_miss = FALSE;
973 wait_result_t res;
974
975 lck_rw_ilk_lock(lck);
976
977 /*
978 * Try to acquire the lck_rw_want_excl bit.
979 */
980 while (lck->lck_rw_want_excl) {
981 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
982
983 if (!lock_miss) {
984 lock_miss = TRUE;
985 }
986
987 i = lock_wait_time[1];
988 if (i != 0) {
989 lck_rw_ilk_unlock(lck);
990 while (--i != 0 && lck->lck_rw_want_excl)
991 continue;
992 lck_rw_ilk_lock(lck);
993 }
994
995 if (lck->lck_rw_want_excl) {
996 lck->lck_rw_waiting = TRUE;
997 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
998 if (res == THREAD_WAITING) {
999 lck_rw_ilk_unlock(lck);
1000 res = thread_block(THREAD_CONTINUE_NULL);
1001 lck_rw_ilk_lock(lck);
1002 }
1003 }
1004 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
1005 }
1006 lck->lck_rw_want_excl = TRUE;
1007
1008 /* Wait for readers (and upgrades) to finish */
1009
1010 while ((lck->lck_rw_shared_cnt != 0) || lck->lck_rw_want_upgrade) {
1011 if (!lock_miss) {
1012 lock_miss = TRUE;
1013 }
1014
1015 i = lock_wait_time[1];
1016
1017 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1018 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, i, 0);
1019
1020 if (i != 0) {
1021 lck_rw_ilk_unlock(lck);
1022 while (--i != 0 && (lck->lck_rw_shared_cnt != 0 ||
1023 lck->lck_rw_want_upgrade))
1024 continue;
1025 lck_rw_ilk_lock(lck);
1026 }
1027
1028 if (lck->lck_rw_shared_cnt != 0 || lck->lck_rw_want_upgrade) {
1029 lck->lck_rw_waiting = TRUE;
1030 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1031 if (res == THREAD_WAITING) {
1032 lck_rw_ilk_unlock(lck);
1033 res = thread_block(THREAD_CONTINUE_NULL);
1034 lck_rw_ilk_lock(lck);
1035 }
1036 }
1037 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1038 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, res, 0);
1039 }
1040
1041 lck_rw_ilk_unlock(lck);
1042}
1043
1044
1045/*
1046 * Routine: lck_rw_done_gen
1047 */
1048lck_rw_type_t
1049lck_rw_done_gen(
1050 lck_rw_t *lck)
1051{
1052 boolean_t do_wakeup = FALSE;
1053 lck_rw_type_t lck_rw_type;
1054
1055
1056 lck_rw_ilk_lock(lck);
1057
1058 if (lck->lck_rw_shared_cnt != 0) {
1059 lck_rw_type = LCK_RW_TYPE_SHARED;
1060 lck->lck_rw_shared_cnt--;
1061 }
1062 else {
1063 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1064 if (lck->lck_rw_want_upgrade)
1065 lck->lck_rw_want_upgrade = FALSE;
1066 else
1067 lck->lck_rw_want_excl = FALSE;
1068 }
1069
1070 /*
1071 * There is no reason to wakeup a lck_rw_waiting thread
1072 * if the read-count is non-zero. Consider:
1073 * we must be dropping a read lock
1074 * threads are waiting only if one wants a write lock
1075 * if there are still readers, they can't proceed
1076 */
1077
1078 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1079 lck->lck_rw_waiting = FALSE;
1080 do_wakeup = TRUE;
1081 }
1082
1083 lck_rw_ilk_unlock(lck);
1084
1085 if (do_wakeup)
1086 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1087 return(lck_rw_type);
1088}
1089
1090
1091/*
1092 * Routine: lck_rw_lock_shared_gen
1093 */
1094void
1095lck_rw_lock_shared_gen(
1096 lck_rw_t *lck)
1097{
1098 int i;
1099 wait_result_t res;
1100
1101 lck_rw_ilk_lock(lck);
1102
89b3af67
A
1103 while ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1104 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl))) {
91447636
A
1105 i = lock_wait_time[1];
1106
1107 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1108 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, i, 0);
1109
1110 if (i != 0) {
1111 lck_rw_ilk_unlock(lck);
89b3af67
A
1112 while (--i != 0 &&
1113 (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1114 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl)))
91447636
A
1115 continue;
1116 lck_rw_ilk_lock(lck);
1117 }
1118
89b3af67
A
1119 if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1120 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl))) {
91447636
A
1121 lck->lck_rw_waiting = TRUE;
1122 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1123 if (res == THREAD_WAITING) {
1124 lck_rw_ilk_unlock(lck);
1125 res = thread_block(THREAD_CONTINUE_NULL);
1126 lck_rw_ilk_lock(lck);
1127 }
1128 }
1129 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1130 (int)lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, res, 0);
1131 }
1132
1133 lck->lck_rw_shared_cnt++;
1134
1135 lck_rw_ilk_unlock(lck);
1136}
1137
1138
1139/*
1140 * Routine: lck_rw_lock_shared_to_exclusive_gen
1141 * Function:
1142 * Improves a read-only lock to one with
1143 * write permission. If another reader has
1144 * already requested an upgrade to a write lock,
1145 * no lock is held upon return.
1146 *
1147 * Returns TRUE if the upgrade *failed*.
1148 */
1149
1150boolean_t
1151lck_rw_lock_shared_to_exclusive_gen(
1152 lck_rw_t *lck)
1153{
1154 int i;
1155 boolean_t do_wakeup = FALSE;
1156 wait_result_t res;
1157
1158 lck_rw_ilk_lock(lck);
1159
1160 lck->lck_rw_shared_cnt--;
1161
1162 if (lck->lck_rw_want_upgrade) {
1163 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1164 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1165
1166 /*
1167 * Someone else has requested upgrade.
1168 * Since we've released a read lock, wake
1169 * him up.
1170 */
1171 if (lck->lck_rw_waiting && (lck->lck_rw_shared_cnt == 0)) {
1172 lck->lck_rw_waiting = FALSE;
1173 do_wakeup = TRUE;
1174 }
1175
1176 lck_rw_ilk_unlock(lck);
1177
1178 if (do_wakeup)
1179 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1180
1181 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1182 (int)lck, lck->lck_rw_shared_cnt, lck->lck_rw_want_upgrade, 0, 0);
1183
1184 return (TRUE);
1185 }
1186
1187 lck->lck_rw_want_upgrade = TRUE;
1188
1189 while (lck->lck_rw_shared_cnt != 0) {
1190 i = lock_wait_time[1];
1191
1192 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1193 (int)lck, lck->lck_rw_shared_cnt, i, 0, 0);
1194
1195 if (i != 0) {
1196 lck_rw_ilk_unlock(lck);
1197 while (--i != 0 && lck->lck_rw_shared_cnt != 0)
1198 continue;
1199 lck_rw_ilk_lock(lck);
1200 }
1201
1202 if (lck->lck_rw_shared_cnt != 0) {
1203 lck->lck_rw_waiting = TRUE;
1204 res = assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1205 if (res == THREAD_WAITING) {
1206 lck_rw_ilk_unlock(lck);
1207 res = thread_block(THREAD_CONTINUE_NULL);
1208 lck_rw_ilk_lock(lck);
1209 }
1210 }
1211 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1212 (int)lck, lck->lck_rw_shared_cnt, 0, 0, 0);
1213 }
1214
1215 lck_rw_ilk_unlock(lck);
1216
1217 return (FALSE);
1218}
1219
1220/*
1221 * Routine: lck_rw_lock_exclusive_to_shared_gen
1222 */
1223void
1224lck_rw_lock_exclusive_to_shared_gen(
1225 lck_rw_t *lck)
1226{
1227 boolean_t do_wakeup = FALSE;
1228
1229 lck_rw_ilk_lock(lck);
1230
1231 lck->lck_rw_shared_cnt++;
1232 if (lck->lck_rw_want_upgrade)
1233 lck->lck_rw_want_upgrade = FALSE;
1234 else
1235 lck->lck_rw_want_excl = FALSE;
1236
1237 if (lck->lck_rw_waiting) {
1238 lck->lck_rw_waiting = FALSE;
1239 do_wakeup = TRUE;
1240 }
1241
1242 lck_rw_ilk_unlock(lck);
1243
1244 if (do_wakeup)
1245 thread_wakeup((event_t)(((unsigned int*)lck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1246
1247}
1248
1249
1250/*
1251 * Routine: lck_rw_try_lock_exclusive_gen
1252 * Function:
1253 * Tries to get a write lock.
1254 *
1255 * Returns FALSE if the lock is not held on return.
1256 */
1257
1258boolean_t
1259lck_rw_try_lock_exclusive_gen(
1260 lck_rw_t *lck)
1261{
1262 lck_rw_ilk_lock(lck);
1263
1264 if (lck->lck_rw_want_excl || lck->lck_rw_want_upgrade || lck->lck_rw_shared_cnt) {
1265 /*
1266 * Can't get lock.
1267 */
1268 lck_rw_ilk_unlock(lck);
1269 return(FALSE);
1270 }
1271
1272 /*
1273 * Have lock.
1274 */
1275
1276 lck->lck_rw_want_excl = TRUE;
1277
1278 lck_rw_ilk_unlock(lck);
1279
1280 return(TRUE);
1281}
1282
1283/*
1284 * Routine: lck_rw_try_lock_shared_gen
1285 * Function:
1286 * Tries to get a read lock.
1287 *
1288 * Returns FALSE if the lock is not held on return.
1289 */
1290
1291boolean_t
1292lck_rw_try_lock_shared_gen(
1293 lck_rw_t *lck)
1294{
1295 lck_rw_ilk_lock(lck);
1296
89b3af67
A
1297 if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) &&
1298 ((lck->lck_rw_shared_cnt == 0) || (lck->lck_rw_priv_excl))) {
91447636
A
1299 lck_rw_ilk_unlock(lck);
1300 return(FALSE);
1301 }
1302
1303 lck->lck_rw_shared_cnt++;
1304
1305 lck_rw_ilk_unlock(lck);
1306
1307 return(TRUE);
1308}
1309
1310
1311/*
1312 * Routine: lck_rw_ext_backtrace
1313 */
1314void
1315lck_rw_ext_backtrace(
1316 lck_rw_ext_t *lck)
1317{
1318 unsigned int *stackptr, *stackptr_prev;
1319 unsigned int frame;
1320
1321 __asm__ volatile("mr %0,r1" : "=r" (stackptr));
1322 frame = 0;
1323 while (frame < LCK_FRAMES_MAX) {
1324 stackptr_prev = stackptr;
1325 stackptr = ( unsigned int *)*stackptr;
1326 if ( (((unsigned int)stackptr_prev) ^ ((unsigned int)stackptr)) > 8192)
1327 break;
1328 lck->lck_rw_deb.stack[frame] = *(stackptr+2);
1329 frame++;
1330 }
1331 while (frame < LCK_FRAMES_MAX) {
1332 lck->lck_rw_deb.stack[frame] = 0;
1333 frame++;
1334 }
1335}
1336
1337
1338/*
1339 * Routine: lck_rw_lock_exclusive_ext
1340 */
1341void
1342lck_rw_lock_exclusive_ext(
1343 lck_rw_ext_t *lck,
1344 lck_rw_t *rlck)
1345{
1346 int i;
1347 wait_result_t res;
1348 boolean_t lock_miss = FALSE;
1349 boolean_t lock_wait = FALSE;
1350 boolean_t lock_stat;
1351
1352 lck_rw_check_type(lck, rlck);
1353
1354 if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_MYLOCK)) == LCK_RW_ATTR_DEBUG)
1355 && (lck->lck_rw_deb.thread == current_thread()))
1356 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1357
1358 lck_rw_ilk_lock(&lck->lck_rw);
1359
1360 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1361
1362 if (lock_stat)
1363 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1364
1365 /*
1366 * Try to acquire the lck_rw.lck_rw_want_excl bit.
1367 */
1368 while (lck->lck_rw.lck_rw_want_excl) {
1369 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)rlck, 0, 0, 0, 0);
1370
1371 if (lock_stat && !lock_miss) {
1372 lock_miss = TRUE;
1373 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1374 }
1375
1376 i = lock_wait_time[1];
1377 if (i != 0) {
1378 lck_rw_ilk_unlock(&lck->lck_rw);
1379 while (--i != 0 && lck->lck_rw.lck_rw_want_excl)
1380 continue;
1381 lck_rw_ilk_lock(&lck->lck_rw);
1382 }
1383
1384 if (lck->lck_rw.lck_rw_want_excl) {
1385 lck->lck_rw.lck_rw_waiting = TRUE;
1386 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1387 if (res == THREAD_WAITING) {
1388 if (lock_stat && !lock_wait) {
1389 lock_wait = TRUE;
1390 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1391 }
1392 lck_rw_ilk_unlock(&lck->lck_rw);
1393 res = thread_block(THREAD_CONTINUE_NULL);
1394 lck_rw_ilk_lock(&lck->lck_rw);
1395 }
1396 }
1397 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)rlck, res, 0, 0, 0);
1398 }
1399 lck->lck_rw.lck_rw_want_excl = TRUE;
1400
1401 /* Wait for readers (and upgrades) to finish */
1402
1403 while ((lck->lck_rw.lck_rw_shared_cnt != 0) || lck->lck_rw.lck_rw_want_upgrade) {
1404 i = lock_wait_time[1];
1405
1406 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1407 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1408
1409 if (lock_stat && !lock_miss) {
1410 lock_miss = TRUE;
1411 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1412 }
1413
1414 if (i != 0) {
1415 lck_rw_ilk_unlock(&lck->lck_rw);
1416 while (--i != 0 && (lck->lck_rw.lck_rw_shared_cnt != 0 ||
1417 lck->lck_rw.lck_rw_want_upgrade))
1418 continue;
1419 lck_rw_ilk_lock(&lck->lck_rw);
1420 }
1421
1422 if (lck->lck_rw.lck_rw_shared_cnt != 0 || lck->lck_rw.lck_rw_want_upgrade) {
1423 lck->lck_rw.lck_rw_waiting = TRUE;
1424 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1425 if (res == THREAD_WAITING) {
1426 if (lock_stat && !lock_wait) {
1427 lock_wait = TRUE;
1428 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1429 }
1430 lck_rw_ilk_unlock(&lck->lck_rw);
1431 res = thread_block(THREAD_CONTINUE_NULL);
1432 lck_rw_ilk_lock(&lck->lck_rw);
1433 }
1434 }
1435 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1436 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1437 }
1438
1439 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1440 if (LcksOpts & enaLkExtStck)
1441 lck_rw_ext_backtrace(lck);
1442 lck->lck_rw_deb.thread = current_thread();
1443
1444 lck_rw_ilk_unlock(&lck->lck_rw);
1445}
1446
1447
1448/*
1449 * Routine: lck_rw_done_ext
1450 */
1451lck_rw_type_t
1452lck_rw_done_ext(
1453 lck_rw_ext_t *lck,
1454 lck_rw_t *rlck)
1455{
1456 boolean_t do_wakeup = FALSE;
1457 lck_rw_type_t lck_rw_type;
1458
1459
1460 lck_rw_check_type(lck, rlck);
1461
1462 lck_rw_ilk_lock(&lck->lck_rw);
1463
1464 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1465 lck_rw_type = LCK_RW_TYPE_SHARED;
1466 lck->lck_rw.lck_rw_shared_cnt--;
1467 }
1468 else {
1469 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1470 if (lck->lck_rw.lck_rw_want_upgrade)
1471 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1472 else if (lck->lck_rw.lck_rw_want_excl)
1473 lck->lck_rw.lck_rw_want_excl = FALSE;
1474 else
1475 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1476 rlck, lck->lck_rw);
1477 if (lck->lck_rw_deb.thread == THREAD_NULL)
1478 panic("rw lock (0x%08X) not held\n",
1479 rlck);
1480 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1481 && (lck->lck_rw_deb.thread != current_thread()))
1482 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1483 rlck, current_thread(), lck->lck_rw_deb.thread);
1484 lck->lck_rw_deb.thread = THREAD_NULL;
1485 }
1486
1487 if (lck->lck_rw_attr & LCK_RW_ATTR_DEBUG)
1488 lck->lck_rw_deb.pc_done = __builtin_return_address(0);
1489
1490 /*
1491 * There is no reason to wakeup a waiting thread
1492 * if the read-count is non-zero. Consider:
1493 * we must be dropping a read lock
1494 * threads are waiting only if one wants a write lock
1495 * if there are still readers, they can't proceed
1496 */
1497
1498 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1499 lck->lck_rw.lck_rw_waiting = FALSE;
1500 do_wakeup = TRUE;
1501 }
1502
1503 lck_rw_ilk_unlock(&lck->lck_rw);
1504
1505 if (do_wakeup)
1506 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1507 return(lck_rw_type);
1508}
1509
1510
1511/*
1512 * Routine: lck_rw_lock_shared_ext
1513 */
1514void
1515lck_rw_lock_shared_ext(
1516 lck_rw_ext_t *lck,
1517 lck_rw_t *rlck)
1518{
1519 int i;
1520 wait_result_t res;
1521 boolean_t lock_miss = FALSE;
1522 boolean_t lock_wait = FALSE;
1523 boolean_t lock_stat;
1524
1525 lck_rw_check_type(lck, rlck);
1526
1527 lck_rw_ilk_lock(&lck->lck_rw);
1528
1529 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1530
1531 if (lock_stat)
1532 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1533
89b3af67
A
1534 while ((lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1535 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl))) {
91447636
A
1536 i = lock_wait_time[1];
1537
1538 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1539 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, i, 0);
1540
1541 if (lock_stat && !lock_miss) {
1542 lock_miss = TRUE;
1543 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1544 }
1545
1546 if (i != 0) {
1547 lck_rw_ilk_unlock(&lck->lck_rw);
89b3af67
A
1548 while (--i != 0 &&
1549 (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1550 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl)))
91447636
A
1551 continue;
1552 lck_rw_ilk_lock(&lck->lck_rw);
1553 }
1554
89b3af67
A
1555 if ((lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1556 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl))) {
91447636
A
1557 lck->lck_rw.lck_rw_waiting = TRUE;
1558 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1559 if (res == THREAD_WAITING) {
1560 if (lock_stat && !lock_wait) {
1561 lock_wait = TRUE;
1562 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1563 }
1564 lck_rw_ilk_unlock(&lck->lck_rw);
1565 res = thread_block(THREAD_CONTINUE_NULL);
1566 lck_rw_ilk_lock(&lck->lck_rw);
1567 }
1568 }
1569 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1570 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, res, 0);
1571 }
1572
1573 lck->lck_rw.lck_rw_shared_cnt++;
1574
1575 lck_rw_ilk_unlock(&lck->lck_rw);
1576}
1577
1578
1579/*
1580 * Routine: lck_rw_lock_shared_to_exclusive_ext
1581 * Function:
1582 * Improves a read-only lock to one with
1583 * write permission. If another reader has
1584 * already requested an upgrade to a write lock,
1585 * no lock is held upon return.
1586 *
1587 * Returns TRUE if the upgrade *failed*.
1588 */
1589
1590boolean_t
1591lck_rw_lock_shared_to_exclusive_ext(
1592 lck_rw_ext_t *lck,
1593 lck_rw_t *rlck)
1594{
1595 int i;
1596 boolean_t do_wakeup = FALSE;
1597 wait_result_t res;
1598 boolean_t lock_miss = FALSE;
1599 boolean_t lock_wait = FALSE;
1600 boolean_t lock_stat;
1601
1602 lck_rw_check_type(lck, rlck);
1603
1604 if (lck->lck_rw_deb.thread == current_thread())
1605 panic("rw lock (0x%08X) recursive lock attempt\n", rlck);
1606
1607 lck_rw_ilk_lock(&lck->lck_rw);
1608
1609 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1610
1611 if (lock_stat)
1612 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1613
1614 lck->lck_rw.lck_rw_shared_cnt--;
1615
1616 if (lck->lck_rw.lck_rw_want_upgrade) {
1617 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1618 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1619
1620 /*
1621 * Someone else has requested upgrade.
1622 * Since we've released a read lock, wake
1623 * him up.
1624 */
1625 if (lck->lck_rw.lck_rw_waiting && (lck->lck_rw.lck_rw_shared_cnt == 0)) {
1626 lck->lck_rw.lck_rw_waiting = FALSE;
1627 do_wakeup = TRUE;
1628 }
1629
1630 lck_rw_ilk_unlock(&lck->lck_rw);
1631
1632 if (do_wakeup)
1633 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1634
1635 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1636 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1637
1638 return (TRUE);
1639 }
1640
1641 lck->lck_rw.lck_rw_want_upgrade = TRUE;
1642
1643 while (lck->lck_rw.lck_rw_shared_cnt != 0) {
1644 i = lock_wait_time[1];
1645
1646 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1647 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, i, 0, 0);
1648
1649 if (lock_stat && !lock_miss) {
1650 lock_miss = TRUE;
1651 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1652 }
1653
1654 if (i != 0) {
1655 lck_rw_ilk_unlock(&lck->lck_rw);
1656 while (--i != 0 && lck->lck_rw.lck_rw_shared_cnt != 0)
1657 continue;
1658 lck_rw_ilk_lock(&lck->lck_rw);
1659 }
1660
1661 if (lck->lck_rw.lck_rw_shared_cnt != 0) {
1662 lck->lck_rw.lck_rw_waiting = TRUE;
1663 res = assert_wait((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
1664 if (res == THREAD_WAITING) {
1665 if (lock_stat && !lock_wait) {
1666 lock_wait = TRUE;
1667 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt++;
1668 }
1669 lck_rw_ilk_unlock(&lck->lck_rw);
1670 res = thread_block(THREAD_CONTINUE_NULL);
1671 lck_rw_ilk_lock(&lck->lck_rw);
1672 }
1673 }
1674 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1675 (int)rlck, lck->lck_rw.lck_rw_shared_cnt, 0, 0, 0);
1676 }
1677
1678 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1679 if (LcksOpts & enaLkExtStck)
1680 lck_rw_ext_backtrace(lck);
1681 lck->lck_rw_deb.thread = current_thread();
1682
1683 lck_rw_ilk_unlock(&lck->lck_rw);
1684
1685 return (FALSE);
1686}
1687
1688/*
1689 * Routine: lck_rw_lock_exclusive_to_shared_ext
1690 */
1691void
1692lck_rw_lock_exclusive_to_shared_ext(
1693 lck_rw_ext_t *lck,
1694 lck_rw_t *rlck)
1695{
1696 boolean_t do_wakeup = FALSE;
1697
1698 lck_rw_check_type(lck, rlck);
1699
1700 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1701 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, 0, 0);
1702
1703 lck_rw_ilk_lock(&lck->lck_rw);
1704
1705 lck->lck_rw.lck_rw_shared_cnt++;
1706 if (lck->lck_rw.lck_rw_want_upgrade)
1707 lck->lck_rw.lck_rw_want_upgrade = FALSE;
1708 else if (lck->lck_rw.lck_rw_want_excl)
1709 lck->lck_rw.lck_rw_want_excl = FALSE;
1710 else
1711 panic("rw lock (0x%08X) bad state (0x%08X) on attempt to release a shared or exlusive right\n",
1712 rlck, lck->lck_rw);
1713 if (lck->lck_rw_deb.thread == THREAD_NULL)
1714 panic("rw lock (0x%08X) not held\n",
1715 rlck);
1716 else if ( ((lck->lck_rw_attr & (LCK_RW_ATTR_DEBUG|LCK_RW_ATTR_DIS_THREAD)) == LCK_RW_ATTR_DEBUG)
1717 && (lck->lck_rw_deb.thread != current_thread()))
1718 panic("rw lock (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n",
1719 rlck, current_thread(), lck->lck_rw_deb.thread);
1720
1721 lck->lck_rw_deb.thread = THREAD_NULL;
1722
1723 if (lck->lck_rw.lck_rw_waiting) {
1724 lck->lck_rw.lck_rw_waiting = FALSE;
1725 do_wakeup = TRUE;
1726 }
1727
1728 lck_rw_ilk_unlock(&lck->lck_rw);
1729
1730 if (do_wakeup)
1731 thread_wakeup((event_t)(((unsigned int*)rlck)+((sizeof(lck_rw_t)-1)/sizeof(unsigned int))));
1732
1733 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1734 (int)rlck, lck->lck_rw.lck_rw_want_excl, lck->lck_rw.lck_rw_want_upgrade, lck->lck_rw.lck_rw_shared_cnt, 0);
1735
1736}
1737
1738
1739/*
1740 * Routine: lck_rw_try_lock_exclusive_ext
1741 * Function:
1742 * Tries to get a write lock.
1743 *
1744 * Returns FALSE if the lock is not held on return.
1745 */
1746
1747boolean_t
1748lck_rw_try_lock_exclusive_ext(
1749 lck_rw_ext_t *lck,
1750 lck_rw_t *rlck)
1751{
1752 boolean_t lock_stat;
1753
1754 lck_rw_check_type(lck, rlck);
1755
1756 lck_rw_ilk_lock(&lck->lck_rw);
1757
1758 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1759
1760 if (lock_stat)
1761 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1762
1763 if (lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade || lck->lck_rw.lck_rw_shared_cnt) {
1764 /*
1765 * Can't get lock.
1766 */
1767 if (lock_stat) {
1768 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1769 }
1770 lck_rw_ilk_unlock(&lck->lck_rw);
1771 return(FALSE);
1772 }
1773
1774 /*
1775 * Have lock.
1776 */
1777
1778 lck->lck_rw.lck_rw_want_excl = TRUE;
1779 lck->lck_rw_deb.pc_excl = __builtin_return_address(0);
1780 if (LcksOpts & enaLkExtStck)
1781 lck_rw_ext_backtrace(lck);
1782 lck->lck_rw_deb.thread = current_thread();
1783
1784 lck_rw_ilk_unlock(&lck->lck_rw);
1785
1786 return(TRUE);
1787}
1788
1789/*
1790 * Routine: lck_rw_try_lock_shared_ext
1791 * Function:
1792 * Tries to get a read lock.
1793 *
1794 * Returns FALSE if the lock is not held on return.
1795 */
1796
1797boolean_t
1798lck_rw_try_lock_shared_ext(
1799 lck_rw_ext_t *lck,
1800 lck_rw_t *rlck)
1801{
1802 boolean_t lock_stat;
1803
1804 lck_rw_check_type(lck, rlck);
1805
1806 lck_rw_ilk_lock(&lck->lck_rw);
1807
1808 lock_stat = (lck->lck_rw_attr & LCK_RW_ATTR_STAT) ? TRUE : FALSE;
1809
1810 if (lock_stat)
1811 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt++;
1812
89b3af67
A
1813 if ((lck->lck_rw.lck_rw_want_excl || lck->lck_rw.lck_rw_want_upgrade) &&
1814 ((lck->lck_rw.lck_rw_shared_cnt == 0) || (lck->lck_rw.lck_rw_priv_excl))) {
91447636
A
1815 if (lock_stat) {
1816 lck->lck_rw_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt++;
1817 }
1818 lck_rw_ilk_unlock(&lck->lck_rw);
1819 return(FALSE);
1820 }
1821
1822 lck->lck_rw.lck_rw_shared_cnt++;
1823
1824 lck_rw_ilk_unlock(&lck->lck_rw);
1825
1826 return(TRUE);
1827}
1828
1829void
1830lck_rw_check_type(
1831 lck_rw_ext_t *lck,
1832 lck_rw_t *rlck)
1833{
1834 if (lck->lck_rw_deb.type != RW_TAG)
1835 panic("rw lock (0x%08X) not a rw lock type (0x%08X)\n",rlck, lck->lck_rw_deb.type);
1836}
1837
1838/*
1839 * The C portion of the mutex package. These routines are only invoked
1840 * if the optimized assembler routines can't do the work.
1841 */
1842
1843/*
1844 * Forward definition
1845 */
1846
1847void lck_mtx_ext_init(
1848 lck_mtx_ext_t *lck,
1849 lck_grp_t *grp,
1850 lck_attr_t *attr);
1851
1852/*
1853 * Routine: mutex_alloc
1854 * Function:
1855 * Allocate a mutex for external users who cannot
1856 * hard-code the structure definition into their
1857 * objects.
1858 * For now just use kalloc, but a zone is probably
1859 * warranted.
1860 */
1861mutex_t *
1862mutex_alloc(
1863 unsigned short tag)
1864{
1865 mutex_t *m;
1866
1867 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1868 mutex_init(m, tag);
1869 return(m);
1870}
1871
1872/*
1873 * Routine: mutex_free
1874 */
1875void
1876mutex_free(
1877 mutex_t *m)
1878{
1879 kfree((void *)m, sizeof(mutex_t));
1880}
1881
1882/*
1883 * Routine: lck_mtx_alloc_init
1884 */
1885lck_mtx_t *
1886lck_mtx_alloc_init(
1887 lck_grp_t *grp,
1888 lck_attr_t *attr) {
1889 lck_mtx_t *lck;
1890
1891 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1892 lck_mtx_init(lck, grp, attr);
1893
1894 return(lck);
1895}
1896
1897/*
1898 * Routine: lck_mtx_free
1899 */
1900void
1901lck_mtx_free(
1902 lck_mtx_t *lck,
1903 lck_grp_t *grp) {
1904 lck_mtx_destroy(lck, grp);
1905 kfree((void *)lck, sizeof(lck_mtx_t));
1906}
1907
1908/*
1909 * Routine: lck_mtx_init
1910 */
1911void
1912lck_mtx_init(
1913 lck_mtx_t *lck,
1914 lck_grp_t *grp,
1915 lck_attr_t *attr) {
1916 lck_mtx_ext_t *lck_ext;
1917 lck_attr_t *lck_attr;
1918
1919 if (attr != LCK_ATTR_NULL)
1920 lck_attr = attr;
1921 else
1922 lck_attr = &LockDefaultLckAttr;
1923
1924 if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1925 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1926 lck_mtx_ext_init(lck_ext, grp, lck_attr);
1927 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1928 lck->lck_mtx_ptr = lck_ext;
1929 }
1930 } else {
1931 lck->lck_mtx_data = 0;
1932 lck->lck_mtx_waiters = 0;
1933 lck->lck_mtx_pri = 0;
1934 }
1935 lck_grp_reference(grp);
1936 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1937}
1938
1939/*
1940 * Routine: lck_mtx_ext_init
1941 */
1942void
1943lck_mtx_ext_init(
1944 lck_mtx_ext_t *lck,
1945 lck_grp_t *grp,
1946 lck_attr_t *attr) {
1947
1948 bzero((void *)lck, sizeof(lck_mtx_ext_t));
1949
1950 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1951 lck->lck_mtx_deb.type = MUTEX_TAG;
1952 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1953 }
1954
1955 lck->lck_mtx_grp = grp;
1956
1957 if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT)
1958 lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
1959}
1960
1961/*
1962 * Routine: lck_mtx_destroy
1963 */
1964void
1965lck_mtx_destroy(
1966 lck_mtx_t *lck,
1967 lck_grp_t *grp) {
1968 boolean_t lck_is_indirect;
1969
1970 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1971 return;
1972 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1973 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1974 if (lck_is_indirect)
1975 kfree((void *)lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1976
1977 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1978 lck_grp_deallocate(grp);
1979 return;
1980}
1981
1982
1983#if MACH_KDB
1984/*
1985 * Routines to print out simple_locks and mutexes in a nicely-formatted
1986 * fashion.
1987 */
1988
1989char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1990char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1991
1992void db_print_simple_lock(
1993 simple_lock_t addr);
1994
1995void db_print_mutex(
1996 mutex_t * addr);
1997
1998void
1999db_show_one_simple_lock (
2000 db_expr_t addr,
2001 boolean_t have_addr,
2002 db_expr_t count,
2003 char * modif)
2004{
2005 simple_lock_t saddr = (simple_lock_t)addr;
2006
2007 if (saddr == (simple_lock_t)0 || !have_addr) {
2008 db_error ("No simple_lock\n");
2009 }
2010#if USLOCK_DEBUG
2011 else if (saddr->lock_type != USLOCK_TAG)
2012 db_error ("Not a simple_lock\n");
2013#endif /* USLOCK_DEBUG */
2014
2015 db_printf ("%s\n", simple_lock_labels);
2016 db_print_simple_lock (saddr);
2017}
2018
2019void
2020db_print_simple_lock (
2021 simple_lock_t addr)
2022{
2023
2024 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
2025#if USLOCK_DEBUG
2026 db_printf (" %08x", addr->debug.lock_thread);
2027 db_printf (" %08x ", addr->debug.duration[1]);
2028 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
2029#endif /* USLOCK_DEBUG */
2030 db_printf ("\n");
2031}
2032
2033void
2034db_show_one_mutex (
2035 db_expr_t addr,
2036 boolean_t have_addr,
2037 db_expr_t count,
2038 char * modif)
2039{
2040 mutex_t * maddr = (mutex_t *)addr;
2041
2042 if (maddr == (mutex_t *)0 || !have_addr)
2043 db_error ("No mutex\n");
2044#if MACH_LDEBUG
2045 else if (maddr->lck_mtx_deb.type != MUTEX_TAG)
2046 db_error ("Not a mutex\n");
2047#endif /* MACH_LDEBUG */
2048
2049 db_printf ("%s\n", mutex_labels);
2050 db_print_mutex (maddr);
2051}
2052
2053void
2054db_print_mutex (
2055 mutex_t * addr)
2056{
2057 db_printf ("%08x %6d %7d",
2058 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
2059#if MACH_LDEBUG
2060 db_printf (" %08x ", addr->lck_mtx_deb.thread);
2061 db_printsym (addr->lck_mtx_deb.stack[0], DB_STGY_ANY);
2062#endif /* MACH_LDEBUG */
2063 db_printf ("\n");
2064}
2065
2066void
2067db_show_one_lock(
2068 lock_t *lock)
2069{
2070 db_printf("shared_count = 0x%x, %swant_upgrade, %swant_exclusive, ",
2071 lock->lck_rw.lck_rw_shared_cnt,
2072 lock->lck_rw.lck_rw_want_upgrade ? "" : "!",
2073 lock->lck_rw.lck_rw_want_excl ? "" : "!");
2074 db_printf("%swaiting\n",
2075 lock->lck_rw.lck_rw_waiting ? "" : "!");
2076 db_printf("%sInterlock\n",
2077 lock->lck_rw.lck_rw_interlock ? "" : "!");
2078}
2079
2080#endif /* MACH_KDB */
2081