]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/locks_i386.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/lock.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young
59 * Date: 1985
60 *
61 * Locking primitives implementation
62 */
63
64#include <mach_kdb.h>
65#include <mach_ldebug.h>
66
67#include <kern/lock.h>
68#include <kern/locks.h>
69#include <kern/kalloc.h>
70#include <kern/misc_protos.h>
71#include <kern/thread.h>
72#include <kern/processor.h>
73#include <kern/cpu_data.h>
74#include <kern/cpu_number.h>
75#include <kern/sched_prim.h>
76#include <kern/xpr.h>
77#include <kern/debug.h>
78#include <string.h>
79
80#if MACH_KDB
81#include <ddb/db_command.h>
82#include <ddb/db_output.h>
83#include <ddb/db_sym.h>
84#include <ddb/db_print.h>
85#endif /* MACH_KDB */
86
8f6c56a5
A
87#ifdef __ppc__
88#include <ppc/Firmware.h>
89#endif
91447636
A
90
91#include <sys/kdebug.h>
92
93#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
94#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
95#define LCK_RW_LCK_SHARED_CODE 0x102
96#define LCK_RW_LCK_SH_TO_EX_CODE 0x103
97#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
98#define LCK_RW_LCK_EX_TO_SH_CODE 0x105
99
100
101#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
102
103unsigned int LcksOpts=0;
104unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ;
105
106/* Forwards */
107
108#if MACH_KDB
109void db_print_simple_lock(
110 simple_lock_t addr);
111
112void db_print_mutex(
113 mutex_t * addr);
114#endif /* MACH_KDB */
115
116
117#if USLOCK_DEBUG
118/*
119 * Perform simple lock checks.
120 */
121int uslock_check = 1;
122int max_lock_loops = 100000000;
123decl_simple_lock_data(extern , printf_lock)
124decl_simple_lock_data(extern , panic_lock)
125#if MACH_KDB
126decl_simple_lock_data(extern , kdb_lock)
127#endif /* MACH_KDB */
128#endif /* USLOCK_DEBUG */
129
130
131/*
132 * We often want to know the addresses of the callers
133 * of the various lock routines. However, this information
134 * is only used for debugging and statistics.
135 */
136typedef void *pc_t;
137#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
138#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
139#if ANY_LOCK_DEBUG
140#define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
141#define DECL_PC(pc) pc_t pc;
142#else /* ANY_LOCK_DEBUG */
143#define DECL_PC(pc)
144#ifdef lint
145/*
146 * Eliminate lint complaints about unused local pc variables.
147 */
148#define OBTAIN_PC(pc,l) ++pc
149#else /* lint */
150#define OBTAIN_PC(pc,l)
151#endif /* lint */
152#endif /* USLOCK_DEBUG */
153
154
155/*
156 * Portable lock package implementation of usimple_locks.
157 */
158
159#if USLOCK_DEBUG
160#define USLDBG(stmt) stmt
161void usld_lock_init(usimple_lock_t, unsigned short);
162void usld_lock_pre(usimple_lock_t, pc_t);
163void usld_lock_post(usimple_lock_t, pc_t);
164void usld_unlock(usimple_lock_t, pc_t);
165void usld_lock_try_pre(usimple_lock_t, pc_t);
166void usld_lock_try_post(usimple_lock_t, pc_t);
167int usld_lock_common_checks(usimple_lock_t, char *);
168#else /* USLOCK_DEBUG */
169#define USLDBG(stmt)
170#endif /* USLOCK_DEBUG */
171
172/*
173 * Routine: lck_spin_alloc_init
174 */
175lck_spin_t *
176lck_spin_alloc_init(
177 lck_grp_t *grp,
178 lck_attr_t *attr)
179{
180 lck_spin_t *lck;
181
182 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
183 lck_spin_init(lck, grp, attr);
184
185 return(lck);
186}
187
188/*
189 * Routine: lck_spin_free
190 */
191void
192lck_spin_free(
193 lck_spin_t *lck,
194 lck_grp_t *grp)
195{
196 lck_spin_destroy(lck, grp);
197 kfree(lck, sizeof(lck_spin_t));
198}
199
200/*
201 * Routine: lck_spin_init
202 */
203void
204lck_spin_init(
205 lck_spin_t *lck,
206 lck_grp_t *grp,
207 __unused lck_attr_t *attr)
208{
209 usimple_lock_init((usimple_lock_t) lck, 0);
210 lck_grp_reference(grp);
211 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
212}
213
214/*
215 * Routine: lck_spin_destroy
216 */
217void
218lck_spin_destroy(
219 lck_spin_t *lck,
220 lck_grp_t *grp)
221{
222 if (lck->lck_spin_data[0] == LCK_SPIN_TAG_DESTROYED)
223 return;
224 lck->lck_spin_data[0] = LCK_SPIN_TAG_DESTROYED;
225 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
226 lck_grp_deallocate(grp);
227 return;
228}
229
230/*
231 * Routine: lck_spin_lock
232 */
233void
234lck_spin_lock(
235 lck_spin_t *lck)
236{
237 usimple_lock((usimple_lock_t) lck);
238}
239
240/*
241 * Routine: lck_spin_unlock
242 */
243void
244lck_spin_unlock(
245 lck_spin_t *lck)
246{
247 usimple_unlock((usimple_lock_t) lck);
248}
249
250
251/*
252 * Routine: lck_spin_try_lock
253 */
254boolean_t
255lck_spin_try_lock(
256 lck_spin_t *lck)
257{
8f6c56a5 258 usimple_lock_try((usimple_lock_t) lck);
91447636
A
259}
260
261/*
262 * Initialize a usimple_lock.
263 *
264 * No change in preemption state.
265 */
266void
267usimple_lock_init(
268 usimple_lock_t l,
269 __unused unsigned short tag)
270{
271#ifndef MACHINE_SIMPLE_LOCK
272 USLDBG(usld_lock_init(l, tag));
273 hw_lock_init(&l->interlock);
274#else
275 simple_lock_init((simple_lock_t)l,tag);
276#endif
277}
278
279
280/*
281 * Acquire a usimple_lock.
282 *
283 * Returns with preemption disabled. Note
284 * that the hw_lock routines are responsible for
285 * maintaining preemption state.
286 */
287void
288usimple_lock(
289 usimple_lock_t l)
290{
291#ifndef MACHINE_SIMPLE_LOCK
292 pc_t pc = NULL;
293
294 OBTAIN_PC(pc, l);
295 USLDBG(usld_lock_pre(l, pc));
296
297 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
298 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc);
299
300 USLDBG(usld_lock_post(l, pc));
301#else
302 simple_lock((simple_lock_t)l);
303#endif
304}
305
306
307/*
308 * Release a usimple_lock.
309 *
310 * Returns with preemption enabled. Note
311 * that the hw_lock routines are responsible for
312 * maintaining preemption state.
313 */
314void
315usimple_unlock(
316 usimple_lock_t l)
317{
318#ifndef MACHINE_SIMPLE_LOCK
319 DECL_PC(pc);
320
321 OBTAIN_PC(pc, l);
322 USLDBG(usld_unlock(l, pc));
323 hw_lock_unlock(&l->interlock);
324#else
325 simple_unlock_rwmb((simple_lock_t)l);
326#endif
327}
328
329
330/*
331 * Conditionally acquire a usimple_lock.
332 *
333 * On success, returns with preemption disabled.
334 * On failure, returns with preemption in the same state
335 * as when first invoked. Note that the hw_lock routines
336 * are responsible for maintaining preemption state.
337 *
338 * XXX No stats are gathered on a miss; I preserved this
339 * behavior from the original assembly-language code, but
340 * doesn't it make sense to log misses? XXX
341 */
342unsigned int
343usimple_lock_try(
344 usimple_lock_t l)
345{
346#ifndef MACHINE_SIMPLE_LOCK
347 DECL_PC(pc);
348 unsigned int success;
349
350 OBTAIN_PC(pc, l);
351 USLDBG(usld_lock_try_pre(l, pc));
352 if ((success = hw_lock_try(&l->interlock))) {
353 USLDBG(usld_lock_try_post(l, pc));
354 }
355 return success;
356#else
357 return(simple_lock_try((simple_lock_t)l));
358#endif
359}
360
361#if USLOCK_DEBUG
362/*
363 * States of a usimple_lock. The default when initializing
364 * a usimple_lock is setting it up for debug checking.
365 */
366#define USLOCK_CHECKED 0x0001 /* lock is being checked */
367#define USLOCK_TAKEN 0x0002 /* lock has been taken */
368#define USLOCK_INIT 0xBAA0 /* lock has been initialized */
369#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
370#define USLOCK_CHECKING(l) (uslock_check && \
371 ((l)->debug.state & USLOCK_CHECKED))
372
373/*
374 * Trace activities of a particularly interesting lock.
375 */
376void usl_trace(usimple_lock_t, int, pc_t, const char *);
377
378
379/*
380 * Initialize the debugging information contained
381 * in a usimple_lock.
382 */
383void
384usld_lock_init(
385 usimple_lock_t l,
386 __unused unsigned short tag)
387{
388 if (l == USIMPLE_LOCK_NULL)
389 panic("lock initialization: null lock pointer");
390 l->lock_type = USLOCK_TAG;
391 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
392 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
393 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
394 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
395 l->debug.duration[0] = l->debug.duration[1] = 0;
396 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
397 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
398 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
399}
400
401
402/*
403 * These checks apply to all usimple_locks, not just
404 * those with USLOCK_CHECKED turned on.
405 */
406int
407usld_lock_common_checks(
408 usimple_lock_t l,
409 char *caller)
410{
411 if (l == USIMPLE_LOCK_NULL)
412 panic("%s: null lock pointer", caller);
413 if (l->lock_type != USLOCK_TAG)
414 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
415 if (!(l->debug.state & USLOCK_INIT))
416 panic("%s: 0x%x is not an initialized lock",
417 caller, (integer_t) l);
418 return USLOCK_CHECKING(l);
419}
420
421
422/*
423 * Debug checks on a usimple_lock just before attempting
424 * to acquire it.
425 */
426/* ARGSUSED */
427void
428usld_lock_pre(
429 usimple_lock_t l,
430 pc_t pc)
431{
432 char caller[] = "usimple_lock";
433
434
435 if (!usld_lock_common_checks(l, caller))
436 return;
437
438/*
439 * Note that we have a weird case where we are getting a lock when we are]
440 * in the process of putting the system to sleep. We are running with no
441 * current threads, therefore we can't tell if we are trying to retake a lock
442 * we have or someone on the other processor has it. Therefore we just
443 * ignore this test if the locking thread is 0.
444 */
445
446 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
447 l->debug.lock_thread == (void *) current_thread()) {
448 printf("%s: lock 0x%x already locked (at 0x%x) by",
449 caller, (integer_t) l, l->debug.lock_pc);
450 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
451 l->debug.lock_thread, pc);
452 panic(caller);
453 }
454 mp_disable_preemption();
455 usl_trace(l, cpu_number(), pc, caller);
456 mp_enable_preemption();
457}
458
459
460/*
461 * Debug checks on a usimple_lock just after acquiring it.
462 *
463 * Pre-emption has been disabled at this point,
464 * so we are safe in using cpu_number.
465 */
466void
467usld_lock_post(
468 usimple_lock_t l,
469 pc_t pc)
470{
471 register int mycpu;
472 char caller[] = "successful usimple_lock";
473
474
475 if (!usld_lock_common_checks(l, caller))
476 return;
477
478 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
479 panic("%s: lock 0x%x became uninitialized",
480 caller, (integer_t) l);
481 if ((l->debug.state & USLOCK_TAKEN))
482 panic("%s: lock 0x%x became TAKEN by someone else",
483 caller, (integer_t) l);
484
485 mycpu = cpu_number();
486 l->debug.lock_thread = (void *)current_thread();
487 l->debug.state |= USLOCK_TAKEN;
488 l->debug.lock_pc = pc;
489 l->debug.lock_cpu = mycpu;
490
491 usl_trace(l, mycpu, pc, caller);
492}
493
494
495/*
496 * Debug checks on a usimple_lock just before
497 * releasing it. Note that the caller has not
498 * yet released the hardware lock.
499 *
500 * Preemption is still disabled, so there's
501 * no problem using cpu_number.
502 */
503void
504usld_unlock(
505 usimple_lock_t l,
506 pc_t pc)
507{
508 register int mycpu;
509 char caller[] = "usimple_unlock";
510
511
512 if (!usld_lock_common_checks(l, caller))
513 return;
514
515 mycpu = cpu_number();
516
517 if (!(l->debug.state & USLOCK_TAKEN))
518 panic("%s: lock 0x%x hasn't been taken",
519 caller, (integer_t) l);
520 if (l->debug.lock_thread != (void *) current_thread())
521 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
522 caller, (integer_t) l, l->debug.lock_thread);
523 if (l->debug.lock_cpu != mycpu) {
524 printf("%s: unlocking lock 0x%x on cpu 0x%x",
525 caller, (integer_t) l, mycpu);
526 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
527 panic(caller);
528 }
529 usl_trace(l, mycpu, pc, caller);
530
531 l->debug.unlock_thread = l->debug.lock_thread;
532 l->debug.lock_thread = INVALID_PC;
533 l->debug.state &= ~USLOCK_TAKEN;
534 l->debug.unlock_pc = pc;
535 l->debug.unlock_cpu = mycpu;
536}
537
538
539/*
540 * Debug checks on a usimple_lock just before
541 * attempting to acquire it.
542 *
543 * Preemption isn't guaranteed to be disabled.
544 */
545void
546usld_lock_try_pre(
547 usimple_lock_t l,
548 pc_t pc)
549{
550 char caller[] = "usimple_lock_try";
551
552 if (!usld_lock_common_checks(l, caller))
553 return;
554 mp_disable_preemption();
555 usl_trace(l, cpu_number(), pc, caller);
556 mp_enable_preemption();
557}
558
559
560/*
561 * Debug checks on a usimple_lock just after
562 * successfully attempting to acquire it.
563 *
564 * Preemption has been disabled by the
565 * lock acquisition attempt, so it's safe
566 * to use cpu_number.
567 */
568void
569usld_lock_try_post(
570 usimple_lock_t l,
571 pc_t pc)
572{
573 register int mycpu;
574 char caller[] = "successful usimple_lock_try";
575
576 if (!usld_lock_common_checks(l, caller))
577 return;
578
579 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
580 panic("%s: lock 0x%x became uninitialized",
581 caller, (integer_t) l);
582 if ((l->debug.state & USLOCK_TAKEN))
583 panic("%s: lock 0x%x became TAKEN by someone else",
584 caller, (integer_t) l);
585
586 mycpu = cpu_number();
587 l->debug.lock_thread = (void *) current_thread();
588 l->debug.state |= USLOCK_TAKEN;
589 l->debug.lock_pc = pc;
590 l->debug.lock_cpu = mycpu;
591
592 usl_trace(l, mycpu, pc, caller);
593}
594
595
596/*
597 * For very special cases, set traced_lock to point to a
598 * specific lock of interest. The result is a series of
599 * XPRs showing lock operations on that lock. The lock_seq
600 * value is used to show the order of those operations.
601 */
602usimple_lock_t traced_lock;
603unsigned int lock_seq;
604
605void
606usl_trace(
607 usimple_lock_t l,
608 int mycpu,
609 pc_t pc,
610 const char * op_name)
611{
612 if (traced_lock == l) {
613 XPR(XPR_SLOCK,
614 "seq %d, cpu %d, %s @ %x\n",
615 (integer_t) lock_seq, (integer_t) mycpu,
616 (integer_t) op_name, (integer_t) pc, 0);
617 lock_seq++;
618 }
619}
620
621
622#endif /* USLOCK_DEBUG */
623
624/*
625 * Routine: lock_alloc
626 * Function:
627 * Allocate a lock for external users who cannot
628 * hard-code the structure definition into their
629 * objects.
630 * For now just use kalloc, but a zone is probably
631 * warranted.
632 */
633lock_t *
634lock_alloc(
635 boolean_t can_sleep,
636 unsigned short tag,
637 unsigned short tag1)
638{
639 lock_t *l;
640
641 if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0)
642 lock_init(l, can_sleep, tag, tag1);
643 return(l);
644}
645
646/*
647 * Routine: lock_free
648 * Function:
649 * Free a lock allocated for external users.
650 * For now just use kfree, but a zone is probably
651 * warranted.
652 */
653void
654lock_free(
655 lock_t *l)
656{
657 kfree(l, sizeof(lock_t));
658}
659
660
661/*
662 * Routine: lock_init
663 * Function:
664 * Initialize a lock; required before use.
665 * Note that clients declare the "struct lock"
666 * variables and then initialize them, rather
667 * than getting a new one from this module.
668 */
669void
670lock_init(
671 lock_t *l,
672 boolean_t can_sleep,
673 __unused unsigned short tag,
8f6c56a5 674 unsigned short tag1)
91447636 675{
8f6c56a5
A
676 (void) memset((void *) l, 0, sizeof(lock_t));
677
678 simple_lock_init(&l->interlock, tag1);
91447636
A
679 l->want_write = FALSE;
680 l->want_upgrade = FALSE;
681 l->read_count = 0;
682 l->can_sleep = can_sleep;
683}
684
685
686/*
687 * Sleep locks. These use the same data structure and algorithm
688 * as the spin locks, but the process sleeps while it is waiting
689 * for the lock. These work on uniprocessor systems.
690 */
691
692#define DECREMENTER_TIMEOUT 1000000
693
694void
695lock_write(
696 register lock_t * l)
697{
8f6c56a5
A
698 register int i;
699 boolean_t lock_miss = FALSE;
700#if MACH_LDEBUG
701 int decrementer;
702#endif /* MACH_LDEBUG */
703
704 simple_lock(&l->interlock);
705
706#if MACH_LDEBUG
707 decrementer = DECREMENTER_TIMEOUT;
708#endif /* MACH_LDEBUG */
709
710 /*
711 * Try to acquire the want_write bit.
712 */
713 while (l->want_write) {
714 if (!lock_miss) {
715 lock_miss = TRUE;
716 }
717
718 i = lock_wait_time[l->can_sleep ? 1 : 0];
719 if (i != 0) {
720 simple_unlock(&l->interlock);
721#if MACH_LDEBUG
722 if (!--decrementer)
723 Debugger("timeout - want_write");
724#endif /* MACH_LDEBUG */
725 while (--i != 0 && l->want_write)
726 continue;
727 simple_lock(&l->interlock);
728 }
729
730 if (l->can_sleep && l->want_write) {
731 l->waiting = TRUE;
732 thread_sleep_simple_lock((event_t) l,
733 simple_lock_addr(l->interlock),
734 THREAD_UNINT);
735 /* interlock relocked */
736 }
737 }
738 l->want_write = TRUE;
739
740 /* Wait for readers (and upgrades) to finish */
741
742#if MACH_LDEBUG
743 decrementer = DECREMENTER_TIMEOUT;
744#endif /* MACH_LDEBUG */
745 while ((l->read_count != 0) || l->want_upgrade) {
746 if (!lock_miss) {
747 lock_miss = TRUE;
748 }
749
750 i = lock_wait_time[l->can_sleep ? 1 : 0];
751 if (i != 0) {
752 simple_unlock(&l->interlock);
753#if MACH_LDEBUG
754 if (!--decrementer)
755 Debugger("timeout - wait for readers");
756#endif /* MACH_LDEBUG */
757 while (--i != 0 && (l->read_count != 0 ||
758 l->want_upgrade))
759 continue;
760 simple_lock(&l->interlock);
761 }
762
763 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
764 l->waiting = TRUE;
765 thread_sleep_simple_lock((event_t) l,
766 simple_lock_addr(l->interlock),
767 THREAD_UNINT);
768 /* interlock relocked */
769 }
770 }
771
772 simple_unlock(&l->interlock);
91447636
A
773}
774
775void
776lock_done(
777 register lock_t * l)
778{
8f6c56a5
A
779 boolean_t do_wakeup = FALSE;
780
781
782 simple_lock(&l->interlock);
783
784 if (l->read_count != 0) {
785 l->read_count--;
786 }
787 else
788 if (l->want_upgrade) {
789 l->want_upgrade = FALSE;
790 }
791 else {
792 l->want_write = FALSE;
793 }
794
795 /*
796 * There is no reason to wakeup a waiting thread
797 * if the read-count is non-zero. Consider:
798 * we must be dropping a read lock
799 * threads are waiting only if one wants a write lock
800 * if there are still readers, they can't proceed
801 */
802
803 if (l->waiting && (l->read_count == 0)) {
804 l->waiting = FALSE;
805 do_wakeup = TRUE;
806 }
807
808 simple_unlock(&l->interlock);
809
810 if (do_wakeup)
811 thread_wakeup((event_t) l);
91447636
A
812}
813
814void
815lock_read(
816 register lock_t * l)
817{
8f6c56a5
A
818 register int i;
819#if MACH_LDEBUG
820 int decrementer;
821#endif /* MACH_LDEBUG */
822
823 simple_lock(&l->interlock);
824
825#if MACH_LDEBUG
826 decrementer = DECREMENTER_TIMEOUT;
827#endif /* MACH_LDEBUG */
828 while (l->want_write || l->want_upgrade) {
829 i = lock_wait_time[l->can_sleep ? 1 : 0];
830
831 if (i != 0) {
832 simple_unlock(&l->interlock);
833#if MACH_LDEBUG
834 if (!--decrementer)
835 Debugger("timeout - wait no writers");
836#endif /* MACH_LDEBUG */
837 while (--i != 0 && (l->want_write || l->want_upgrade))
838 continue;
839 simple_lock(&l->interlock);
840 }
841
842 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
843 l->waiting = TRUE;
844 thread_sleep_simple_lock((event_t) l,
845 simple_lock_addr(l->interlock),
846 THREAD_UNINT);
847 /* interlock relocked */
848 }
849 }
850
851 l->read_count++;
852
853 simple_unlock(&l->interlock);
91447636
A
854}
855
856
857/*
858 * Routine: lock_read_to_write
859 * Function:
860 * Improves a read-only lock to one with
861 * write permission. If another reader has
862 * already requested an upgrade to a write lock,
863 * no lock is held upon return.
864 *
865 * Returns TRUE if the upgrade *failed*.
866 */
867
868boolean_t
869lock_read_to_write(
870 register lock_t * l)
871{
8f6c56a5
A
872 register int i;
873 boolean_t do_wakeup = FALSE;
874#if MACH_LDEBUG
875 int decrementer;
876#endif /* MACH_LDEBUG */
877
878 simple_lock(&l->interlock);
879
880 l->read_count--;
881
882 if (l->want_upgrade) {
883 /*
884 * Someone else has requested upgrade.
885 * Since we've released a read lock, wake
886 * him up.
887 */
888 if (l->waiting && (l->read_count == 0)) {
889 l->waiting = FALSE;
890 do_wakeup = TRUE;
891 }
892
893 simple_unlock(&l->interlock);
894
895 if (do_wakeup)
896 thread_wakeup((event_t) l);
897 return (TRUE);
898 }
899
900 l->want_upgrade = TRUE;
901
902#if MACH_LDEBUG
903 decrementer = DECREMENTER_TIMEOUT;
904#endif /* MACH_LDEBUG */
905 while (l->read_count != 0) {
906 i = lock_wait_time[l->can_sleep ? 1 : 0];
907
908 if (i != 0) {
909 simple_unlock(&l->interlock);
910#if MACH_LDEBUG
911 if (!--decrementer)
912 Debugger("timeout - read_count");
913#endif /* MACH_LDEBUG */
914 while (--i != 0 && l->read_count != 0)
915 continue;
916 simple_lock(&l->interlock);
917 }
918
919 if (l->can_sleep && l->read_count != 0) {
920 l->waiting = TRUE;
921 thread_sleep_simple_lock((event_t) l,
922 simple_lock_addr(l->interlock),
923 THREAD_UNINT);
924 /* interlock relocked */
925 }
926 }
927
928 simple_unlock(&l->interlock);
929
930 return (FALSE);
91447636
A
931}
932
933void
934lock_write_to_read(
935 register lock_t * l)
936{
8f6c56a5
A
937 boolean_t do_wakeup = FALSE;
938
939 simple_lock(&l->interlock);
940
941 l->read_count++;
942 if (l->want_upgrade)
943 l->want_upgrade = FALSE;
944 else
945 l->want_write = FALSE;
946
947 if (l->waiting) {
948 l->waiting = FALSE;
949 do_wakeup = TRUE;
950 }
951
952 simple_unlock(&l->interlock);
953
954 if (do_wakeup)
955 thread_wakeup((event_t) l);
91447636
A
956}
957
91447636 958
8f6c56a5
A
959#if 0 /* Unused */
960/*
961 * Routine: lock_try_write
962 * Function:
963 * Tries to get a write lock.
964 *
965 * Returns FALSE if the lock is not held on return.
966 */
967
968boolean_t
969lock_try_write(
970 register lock_t * l)
971{
972 pc_t pc;
973
974 simple_lock(&l->interlock);
975
976 if (l->want_write || l->want_upgrade || l->read_count) {
977 /*
978 * Can't get lock.
979 */
980 simple_unlock(&l->interlock);
981 return(FALSE);
982 }
983
984 /*
985 * Have lock.
986 */
987
988 l->want_write = TRUE;
989
990 simple_unlock(&l->interlock);
991
992 return(TRUE);
993}
994
995/*
996 * Routine: lock_try_read
997 * Function:
998 * Tries to get a read lock.
999 *
1000 * Returns FALSE if the lock is not held on return.
1001 */
1002
1003boolean_t
1004lock_try_read(
1005 register lock_t * l)
1006{
1007 pc_t pc;
1008
1009 simple_lock(&l->interlock);
1010
1011 if (l->want_write || l->want_upgrade) {
1012 simple_unlock(&l->interlock);
1013 return(FALSE);
1014 }
1015
1016 l->read_count++;
1017
1018 simple_unlock(&l->interlock);
1019
1020 return(TRUE);
1021}
1022#endif /* Unused */
1023
91447636
A
1024
1025/*
1026 * Routine: lck_rw_alloc_init
1027 */
1028lck_rw_t *
1029lck_rw_alloc_init(
1030 lck_grp_t *grp,
1031 lck_attr_t *attr) {
1032 lck_rw_t *lck;
1033
1034 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
1035 lck_rw_init(lck, grp, attr);
1036
1037 return(lck);
1038}
1039
1040/*
1041 * Routine: lck_rw_free
1042 */
1043void
1044lck_rw_free(
1045 lck_rw_t *lck,
1046 lck_grp_t *grp) {
1047 lck_rw_destroy(lck, grp);
1048 kfree(lck, sizeof(lck_rw_t));
1049}
1050
1051/*
1052 * Routine: lck_rw_init
1053 */
1054void
1055lck_rw_init(
1056 lck_rw_t *lck,
1057 lck_grp_t *grp,
8f6c56a5 1058 __unused lck_attr_t *attr) {
91447636
A
1059
1060 hw_lock_init(&lck->interlock);
1061 lck->want_write = FALSE;
1062 lck->want_upgrade = FALSE;
1063 lck->read_count = 0;
1064 lck->can_sleep = TRUE;
1065 lck->lck_rw_tag = 0;
1066
1067 lck_grp_reference(grp);
1068 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
1069}
1070
1071/*
1072 * Routine: lck_rw_destroy
1073 */
1074void
1075lck_rw_destroy(
1076 lck_rw_t *lck,
1077 lck_grp_t *grp) {
1078 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
1079 return;
1080 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
1081 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
1082 lck_grp_deallocate(grp);
1083 return;
1084}
1085
1086/*
1087 * Sleep locks. These use the same data structure and algorithm
1088 * as the spin locks, but the process sleeps while it is waiting
1089 * for the lock. These work on uniprocessor systems.
1090 */
1091
1092#define DECREMENTER_TIMEOUT 1000000
1093
1094
1095/*
1096 * We need to disable interrupts while holding the mutex interlock
1097 * to prevent an IPI intervening.
1098 * Hence, local helper functions lck_interlock_lock()/lck_interlock_unlock().
1099 */
1100static boolean_t
1101lck_interlock_lock(lck_rw_t *lck)
1102{
1103 boolean_t istate;
1104
1105 istate = ml_set_interrupts_enabled(FALSE);
1106 hw_lock_lock(&lck->interlock);
1107
1108 return istate;
1109}
1110
1111static void
1112lck_interlock_unlock(lck_rw_t *lck, boolean_t istate)
1113{
1114 hw_lock_unlock(&lck->interlock);
1115 ml_set_interrupts_enabled(istate);
1116}
1117
1118/*
1119 * Routine: lck_rw_lock_exclusive
1120 */
1121void
1122lck_rw_lock_exclusive(
1123 lck_rw_t *lck)
1124{
1125 int i;
1126 boolean_t lock_miss = FALSE;
1127 wait_result_t res;
1128#if MACH_LDEBUG
1129 int decrementer;
1130#endif /* MACH_LDEBUG */
1131 boolean_t istate;
1132
1133 istate = lck_interlock_lock(lck);
1134
1135#if MACH_LDEBUG
1136 decrementer = DECREMENTER_TIMEOUT;
1137#endif /* MACH_LDEBUG */
1138
1139 /*
1140 * Try to acquire the want_write bit.
1141 */
1142 while (lck->want_write) {
1143 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1144
1145 if (!lock_miss) {
1146 lock_miss = TRUE;
1147 }
1148
1149 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1150 if (i != 0) {
1151 lck_interlock_unlock(lck, istate);
1152#if MACH_LDEBUG
1153 if (!--decrementer)
1154 Debugger("timeout - want_write");
1155#endif /* MACH_LDEBUG */
1156 while (--i != 0 && lck->want_write)
8f6c56a5 1157 continue;
91447636
A
1158 istate = lck_interlock_lock(lck);
1159 }
1160
1161 if (lck->can_sleep && lck->want_write) {
1162 lck->waiting = TRUE;
1163 res = assert_wait((event_t) lck, THREAD_UNINT);
1164 if (res == THREAD_WAITING) {
1165 lck_interlock_unlock(lck, istate);
1166 res = thread_block(THREAD_CONTINUE_NULL);
1167 istate = lck_interlock_lock(lck);
1168 }
1169 }
1170 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
1171 }
1172 lck->want_write = TRUE;
1173
1174 /* Wait for readers (and upgrades) to finish */
1175
1176#if MACH_LDEBUG
1177 decrementer = DECREMENTER_TIMEOUT;
1178#endif /* MACH_LDEBUG */
1179 while ((lck->read_count != 0) || lck->want_upgrade) {
1180 if (!lock_miss) {
1181 lock_miss = TRUE;
1182 }
1183
1184 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1185
1186 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1187 (int)lck, lck->read_count, lck->want_upgrade, i, 0);
1188
1189 if (i != 0) {
1190 lck_interlock_unlock(lck, istate);
1191#if MACH_LDEBUG
1192 if (!--decrementer)
1193 Debugger("timeout - wait for readers");
1194#endif /* MACH_LDEBUG */
1195 while (--i != 0 && (lck->read_count != 0 ||
1196 lck->want_upgrade))
8f6c56a5 1197 continue;
91447636
A
1198 istate = lck_interlock_lock(lck);
1199 }
1200
1201 if (lck->can_sleep && (lck->read_count != 0 || lck->want_upgrade)) {
1202 lck->waiting = TRUE;
1203 res = assert_wait((event_t) lck, THREAD_UNINT);
1204 if (res == THREAD_WAITING) {
1205 lck_interlock_unlock(lck, istate);
1206 res = thread_block(THREAD_CONTINUE_NULL);
1207 istate = lck_interlock_lock(lck);
1208 }
1209 }
1210 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1211 (int)lck, lck->read_count, lck->want_upgrade, res, 0);
1212 }
1213
1214 lck_interlock_unlock(lck, istate);
1215}
1216
1217
1218/*
1219 * Routine: lck_rw_done
1220 */
1221lck_rw_type_t
1222lck_rw_done(
1223 lck_rw_t *lck)
1224{
1225 boolean_t do_wakeup = FALSE;
1226 lck_rw_type_t lck_rw_type;
1227 boolean_t istate;
1228
1229
1230 istate = lck_interlock_lock(lck);
1231
1232 if (lck->read_count != 0) {
1233 lck_rw_type = LCK_RW_TYPE_SHARED;
1234 lck->read_count--;
1235 }
1236 else {
1237 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1238 if (lck->want_upgrade)
1239 lck->want_upgrade = FALSE;
1240 else
1241 lck->want_write = FALSE;
1242 }
1243
1244 /*
1245 * There is no reason to wakeup a waiting thread
1246 * if the read-count is non-zero. Consider:
1247 * we must be dropping a read lock
1248 * threads are waiting only if one wants a write lock
1249 * if there are still readers, they can't proceed
1250 */
1251
1252 if (lck->waiting && (lck->read_count == 0)) {
1253 lck->waiting = FALSE;
1254 do_wakeup = TRUE;
1255 }
1256
1257 lck_interlock_unlock(lck, istate);
1258
1259 if (do_wakeup)
1260 thread_wakeup((event_t) lck);
1261 return(lck_rw_type);
1262}
1263
1264
1265
1266
1267/*
1268 * Routine: lck_rw_unlock
1269 */
1270void
1271lck_rw_unlock(
1272 lck_rw_t *lck,
1273 lck_rw_type_t lck_rw_type)
1274{
1275 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1276 lck_rw_unlock_shared(lck);
1277 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1278 lck_rw_unlock_exclusive(lck);
1279 else
1280 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
1281}
1282
1283
1284/*
1285 * Routine: lck_rw_unlock_shared
1286 */
1287void
1288lck_rw_unlock_shared(
1289 lck_rw_t *lck)
1290{
1291 lck_rw_type_t ret;
1292
1293 ret = lck_rw_done(lck);
1294
1295 if (ret != LCK_RW_TYPE_SHARED)
1296 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
1297}
1298
1299
1300/*
1301 * Routine: lck_rw_unlock_exclusive
1302 */
1303void
1304lck_rw_unlock_exclusive(
1305 lck_rw_t *lck)
1306{
1307 lck_rw_type_t ret;
1308
1309 ret = lck_rw_done(lck);
1310
1311 if (ret != LCK_RW_TYPE_EXCLUSIVE)
1312 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
1313}
1314
1315
1316/*
1317 * Routine: lck_rw_lock
1318 */
1319void
1320lck_rw_lock(
1321 lck_rw_t *lck,
1322 lck_rw_type_t lck_rw_type)
1323{
1324 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1325 lck_rw_lock_shared(lck);
1326 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1327 lck_rw_lock_exclusive(lck);
1328 else
1329 panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type);
1330}
1331
1332
1333/*
1334 * Routine: lck_rw_lock_shared
1335 */
1336void
1337lck_rw_lock_shared(
1338 lck_rw_t *lck)
1339{
1340 int i;
1341 wait_result_t res;
1342#if MACH_LDEBUG
1343 int decrementer;
1344#endif /* MACH_LDEBUG */
1345 boolean_t istate;
1346
1347 istate = lck_interlock_lock(lck);
1348
1349#if MACH_LDEBUG
1350 decrementer = DECREMENTER_TIMEOUT;
1351#endif /* MACH_LDEBUG */
8f6c56a5 1352 while (lck->want_write || lck->want_upgrade) {
91447636
A
1353 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1354
1355 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1356 (int)lck, lck->want_write, lck->want_upgrade, i, 0);
1357
1358 if (i != 0) {
1359 lck_interlock_unlock(lck, istate);
1360#if MACH_LDEBUG
1361 if (!--decrementer)
1362 Debugger("timeout - wait no writers");
1363#endif /* MACH_LDEBUG */
8f6c56a5
A
1364 while (--i != 0 && (lck->want_write || lck->want_upgrade))
1365 continue;
91447636
A
1366 istate = lck_interlock_lock(lck);
1367 }
1368
8f6c56a5 1369 if (lck->can_sleep && (lck->want_write || lck->want_upgrade)) {
91447636
A
1370 lck->waiting = TRUE;
1371 res = assert_wait((event_t) lck, THREAD_UNINT);
1372 if (res == THREAD_WAITING) {
1373 lck_interlock_unlock(lck, istate);
1374 res = thread_block(THREAD_CONTINUE_NULL);
1375 istate = lck_interlock_lock(lck);
1376 }
1377 }
1378 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1379 (int)lck, lck->want_write, lck->want_upgrade, res, 0);
1380 }
1381
1382 lck->read_count++;
1383
1384 lck_interlock_unlock(lck, istate);
1385}
1386
1387
1388/*
1389 * Routine: lck_rw_lock_shared_to_exclusive
1390 * Function:
1391 * Improves a read-only lock to one with
1392 * write permission. If another reader has
1393 * already requested an upgrade to a write lock,
1394 * no lock is held upon return.
1395 *
1396 * Returns TRUE if the upgrade *failed*.
1397 */
1398
1399boolean_t
1400lck_rw_lock_shared_to_exclusive(
1401 lck_rw_t *lck)
1402{
1403 int i;
1404 boolean_t do_wakeup = FALSE;
1405 wait_result_t res;
1406#if MACH_LDEBUG
1407 int decrementer;
1408#endif /* MACH_LDEBUG */
1409 boolean_t istate;
1410
1411 istate = lck_interlock_lock(lck);
1412
1413 lck->read_count--;
1414
1415 if (lck->want_upgrade) {
1416 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1417 (int)lck, lck->read_count, lck->want_upgrade, 0, 0);
1418
1419 /*
1420 * Someone else has requested upgrade.
1421 * Since we've released a read lock, wake
1422 * him up.
1423 */
1424 if (lck->waiting && (lck->read_count == 0)) {
1425 lck->waiting = FALSE;
1426 do_wakeup = TRUE;
1427 }
1428
1429 lck_interlock_unlock(lck, istate);
1430
1431 if (do_wakeup)
1432 thread_wakeup((event_t) lck);
1433
1434 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1435 (int)lck, lck->read_count, lck->want_upgrade, 0, 0);
1436
1437 return (TRUE);
1438 }
1439
1440 lck->want_upgrade = TRUE;
1441
1442#if MACH_LDEBUG
1443 decrementer = DECREMENTER_TIMEOUT;
1444#endif /* MACH_LDEBUG */
1445 while (lck->read_count != 0) {
1446 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1447
1448 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1449 (int)lck, lck->read_count, i, 0, 0);
1450
1451 if (i != 0) {
1452 lck_interlock_unlock(lck, istate);
1453#if MACH_LDEBUG
1454 if (!--decrementer)
1455 Debugger("timeout - read_count");
1456#endif /* MACH_LDEBUG */
1457 while (--i != 0 && lck->read_count != 0)
8f6c56a5 1458 continue;
91447636
A
1459 istate = lck_interlock_lock(lck);
1460 }
1461
1462 if (lck->can_sleep && lck->read_count != 0) {
1463 lck->waiting = TRUE;
1464 res = assert_wait((event_t) lck, THREAD_UNINT);
1465 if (res == THREAD_WAITING) {
1466 lck_interlock_unlock(lck, istate);
1467 res = thread_block(THREAD_CONTINUE_NULL);
1468 istate = lck_interlock_lock(lck);
1469 }
1470 }
1471 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1472 (int)lck, lck->read_count, 0, 0, 0);
1473 }
1474
1475 lck_interlock_unlock(lck, istate);
1476
1477 return (FALSE);
1478}
1479
1480/*
1481 * Routine: lck_rw_lock_exclusive_to_shared
1482 */
1483void
1484lck_rw_lock_exclusive_to_shared(
1485 lck_rw_t *lck)
1486{
1487 boolean_t do_wakeup = FALSE;
1488 boolean_t istate;
1489
1490 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1491 (int)lck, lck->want_write, lck->want_upgrade, 0, 0);
1492
1493 istate = lck_interlock_lock(lck);
1494
1495 lck->read_count++;
1496 if (lck->want_upgrade)
1497 lck->want_upgrade = FALSE;
1498 else
1499 lck->want_write = FALSE;
1500
1501 if (lck->waiting) {
1502 lck->waiting = FALSE;
1503 do_wakeup = TRUE;
1504 }
1505
1506 lck_interlock_unlock(lck, istate);
1507
1508 if (do_wakeup)
1509 thread_wakeup((event_t) lck);
1510
1511 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1512 (int)lck, lck->want_write, lck->want_upgrade, lck->read_count, 0);
1513
1514}
1515
1516
1517/*
1518 * Routine: lck_rw_try_lock
1519 */
1520boolean_t
1521lck_rw_try_lock(
1522 lck_rw_t *lck,
1523 lck_rw_type_t lck_rw_type)
1524{
1525 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1526 return(lck_rw_try_lock_shared(lck));
1527 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1528 return(lck_rw_try_lock_exclusive(lck));
1529 else
1530 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
1531 return(FALSE);
1532}
1533
1534/*
1535 * Routine: lck_rw_try_lock_exclusive
1536 * Function:
1537 * Tries to get a write lock.
1538 *
1539 * Returns FALSE if the lock is not held on return.
1540 */
1541
1542boolean_t
1543lck_rw_try_lock_exclusive(
1544 lck_rw_t *lck)
1545{
1546 boolean_t istate;
1547
1548 istate = lck_interlock_lock(lck);
1549
1550 if (lck->want_write || lck->want_upgrade || lck->read_count) {
1551 /*
1552 * Can't get lock.
1553 */
1554 lck_interlock_unlock(lck, istate);
1555 return(FALSE);
1556 }
1557
1558 /*
1559 * Have lock.
1560 */
1561
1562 lck->want_write = TRUE;
1563
1564 lck_interlock_unlock(lck, istate);
1565
1566 return(TRUE);
1567}
1568
1569/*
1570 * Routine: lck_rw_try_lock_shared
1571 * Function:
1572 * Tries to get a read lock.
1573 *
1574 * Returns FALSE if the lock is not held on return.
1575 */
1576
1577boolean_t
1578lck_rw_try_lock_shared(
1579 lck_rw_t *lck)
1580{
1581 boolean_t istate;
1582
1583 istate = lck_interlock_lock(lck);
1584
1585 if (lck->want_write || lck->want_upgrade) {
1586 lck_interlock_unlock(lck, istate);
1587 return(FALSE);
1588 }
1589
1590 lck->read_count++;
1591
1592 lck_interlock_unlock(lck, istate);
1593
1594 return(TRUE);
1595}
1596
1597/*
1598 * Routine: lck_mtx_alloc_init
1599 */
1600lck_mtx_t *
1601lck_mtx_alloc_init(
1602 lck_grp_t *grp,
1603 lck_attr_t *attr)
1604{
1605 lck_mtx_t *lck;
1606
1607 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1608 lck_mtx_init(lck, grp, attr);
1609
1610 return(lck);
1611}
1612
1613/*
1614 * Routine: lck_mtx_free
1615 */
1616void
1617lck_mtx_free(
1618 lck_mtx_t *lck,
1619 lck_grp_t *grp)
1620{
1621 lck_mtx_destroy(lck, grp);
1622 kfree(lck, sizeof(lck_mtx_t));
1623}
1624
1625/*
1626 * Routine: lck_mtx_ext_init
1627 */
1628static void
1629lck_mtx_ext_init(
1630 lck_mtx_ext_t *lck,
1631 lck_grp_t *grp,
1632 lck_attr_t *attr)
1633{
1634 lck->lck_mtx.lck_mtx_ilk = 0;
1635 lck->lck_mtx.lck_mtx_locked = 0;
1636 lck->lck_mtx.lck_mtx_waiters = 0;
1637 lck->lck_mtx.lck_mtx_pri = 0;
1638 lck->lck_mtx_attr = 0;
1639
1640 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1641 lck->lck_mtx_deb.pc = 0;
1642 lck->lck_mtx_deb.thread = 0;
1643 lck->lck_mtx_deb.type = MUTEX_TAG;
1644 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1645 }
1646
1647 lck->lck_mtx_grp = grp;
1648}
1649
1650/*
1651 * Routine: lck_mtx_init
1652 */
1653void
1654lck_mtx_init(
1655 lck_mtx_t *lck,
1656 lck_grp_t *grp,
1657 lck_attr_t *attr)
1658{
1659 lck_mtx_ext_t *lck_ext;
1660
1661 if ((attr != LCK_ATTR_NULL) && ((attr->lck_attr_val) & LCK_ATTR_DEBUG)) {
1662 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1663 lck_mtx_ext_init(lck_ext, grp, attr);
1664 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1665 lck->lck_mtx_ptr = lck_ext;
1666 }
1667 } else {
1668 lck->lck_mtx_ilk = 0;
1669 lck->lck_mtx_locked = 0;
1670 lck->lck_mtx_waiters = 0;
1671 lck->lck_mtx_pri = 0;
1672 }
1673 lck_grp_reference(grp);
1674 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1675}
1676
1677/*
1678 * Routine: lck_mtx_destroy
1679 */
1680void
1681lck_mtx_destroy(
1682 lck_mtx_t *lck,
1683 lck_grp_t *grp)
1684{
1685 boolean_t lck_is_indirect;
1686
1687 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1688 return;
1689 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1690 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1691 if (lck_is_indirect)
1692 kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1693 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1694 lck_grp_deallocate(grp);
1695 return;
1696}
1697
1698/*
1699 * Routine: lck_mtx_assert
1700 */
1701void
1702lck_mtx_assert(
1703 __unused lck_mtx_t *lck,
1704 __unused unsigned int type)
1705{
1706}
1707
1708#if MACH_KDB
1709
1710void db_show_one_lock(lock_t *);
1711
1712void
1713db_show_one_lock(
1714 lock_t *lock)
1715{
1716 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1717 lock->read_count,
1718 lock->want_upgrade ? "" : "!",
1719 lock->want_write ? "" : "!");
1720 db_printf("%swaiting, %scan_sleep\n",
1721 lock->waiting ? "" : "!", lock->can_sleep ? "" : "!");
1722 db_printf("Interlock:\n");
8f6c56a5 1723 db_show_one_simple_lock((db_expr_t)simple_lock_addr(lock->interlock),
91447636
A
1724 TRUE, (db_expr_t)0, (char *)0);
1725}
1726
1727#endif /* MACH_KDB */
1728
1729/*
1730 * The C portion of the mutex package. These routines are only invoked
1731 * if the optimized assembler routines can't do the work.
1732 */
1733
1734/*
1735 * Routine: lock_alloc
1736 * Function:
1737 * Allocate a mutex for external users who cannot
1738 * hard-code the structure definition into their
1739 * objects.
1740 * For now just use kalloc, but a zone is probably
1741 * warranted.
1742 */
1743mutex_t *
1744mutex_alloc(
1745 unsigned short tag)
1746{
1747 mutex_t *m;
1748
1749 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1750 mutex_init(m, tag);
1751 return(m);
1752}
1753
1754/*
1755 * Routine: mutex_free
1756 * Function:
1757 * Free a mutex allocated for external users.
1758 * For now just use kfree, but a zone is probably
1759 * warranted.
1760 */
1761void
1762mutex_free(
1763 mutex_t *m)
1764{
1765 kfree(m, sizeof(mutex_t));
1766}
1767
1768/*
1769 * Routine: _mutex_assert
1770 */
1771void
1772_mutex_assert (
1773 mutex_t *mutex,
1774 unsigned int what)
1775{
1776
1777 thread_t thread = current_thread();
1778 thread_t holder;
1779
1780 if (panicstr != NULL)
1781 return;
1782
1783 holder = (thread_t) mutex->lck_mtx.lck_mtx_locked;
1784
1785 switch (what) {
1786 case MA_OWNED:
1787 if (thread != holder)
1788 panic("mutex %x not owned\n", mutex);
1789 break;
1790
1791 case MA_NOTOWNED:
1792 if (thread == holder)
1793 panic("mutex %x owned\n", mutex);
1794 break;
1795 }
1796
1797}
1798
1799#if MACH_KDB
1800/*
1801 * Routines to print out simple_locks and mutexes in a nicely-formatted
1802 * fashion.
1803 */
1804
8f6c56a5
A
1805char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1806char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
91447636
A
1807
1808void
1809db_show_one_simple_lock (
1810 db_expr_t addr,
1811 boolean_t have_addr,
8f6c56a5
A
1812 db_expr_t count,
1813 char * modif)
91447636 1814{
8f6c56a5 1815 simple_lock_t saddr = (simple_lock_t)addr;
91447636
A
1816
1817 if (saddr == (simple_lock_t)0 || !have_addr) {
1818 db_error ("No simple_lock\n");
1819 }
1820#if USLOCK_DEBUG
1821 else if (saddr->lock_type != USLOCK_TAG)
1822 db_error ("Not a simple_lock\n");
1823#endif /* USLOCK_DEBUG */
1824
1825 db_printf ("%s\n", simple_lock_labels);
1826 db_print_simple_lock (saddr);
1827}
1828
1829void
1830db_print_simple_lock (
1831 simple_lock_t addr)
1832{
1833
1834 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
1835#if USLOCK_DEBUG
1836 db_printf (" %08x", addr->debug.lock_thread);
1837 db_printf (" %08x ", addr->debug.duration[1]);
1838 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
1839#endif /* USLOCK_DEBUG */
1840 db_printf ("\n");
1841}
1842
1843void
1844db_show_one_mutex (
1845 db_expr_t addr,
1846 boolean_t have_addr,
8f6c56a5
A
1847 db_expr_t count,
1848 char * modif)
91447636 1849{
8f6c56a5 1850 mutex_t * maddr = (mutex_t *)addr;
91447636
A
1851
1852 if (maddr == (mutex_t *)0 || !have_addr)
1853 db_error ("No mutex\n");
1854#if MACH_LDEBUG
1855 else if (maddr->type != MUTEX_TAG)
1856 db_error ("Not a mutex\n");
1857#endif /* MACH_LDEBUG */
1858
1859 db_printf ("%s\n", mutex_labels);
1860 db_print_mutex (maddr);
1861}
1862
1863void
1864db_print_mutex (
1865 mutex_t * addr)
1866{
1867 db_printf ("%08x %6d %7d",
1868 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
1869#if MACH_LDEBUG
1870 db_printf (" %08x ", addr->thread);
1871 db_printsym (addr->pc, DB_STGY_ANY);
1872#endif /* MACH_LDEBUG */
1873 db_printf ("\n");
1874}
1875
1876#endif /* MACH_KDB */