]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locks_i386.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 * File: kern/lock.c
53 * Author: Avadis Tevanian, Jr., Michael Wayne Young
54 * Date: 1985
55 *
56 * Locking primitives implementation
57 */
58
59 #include <mach_kdb.h>
60 #include <mach_ldebug.h>
61
62 #include <kern/lock.h>
63 #include <kern/locks.h>
64 #include <kern/kalloc.h>
65 #include <kern/misc_protos.h>
66 #include <kern/thread.h>
67 #include <kern/processor.h>
68 #include <kern/cpu_data.h>
69 #include <kern/cpu_number.h>
70 #include <kern/sched_prim.h>
71 #include <kern/xpr.h>
72 #include <kern/debug.h>
73 #include <string.h>
74
75 #if MACH_KDB
76 #include <ddb/db_command.h>
77 #include <ddb/db_output.h>
78 #include <ddb/db_sym.h>
79 #include <ddb/db_print.h>
80 #endif /* MACH_KDB */
81
82 #ifdef __ppc__
83 #include <ppc/Firmware.h>
84 #endif
85
86 #include <sys/kdebug.h>
87
88 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
89 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
90 #define LCK_RW_LCK_SHARED_CODE 0x102
91 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
92 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
93 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
94
95
96 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
97
98 unsigned int LcksOpts=0;
99 unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ;
100
101 /* Forwards */
102
103 #if MACH_KDB
104 void db_print_simple_lock(
105 simple_lock_t addr);
106
107 void db_print_mutex(
108 mutex_t * addr);
109 #endif /* MACH_KDB */
110
111
112 #if USLOCK_DEBUG
113 /*
114 * Perform simple lock checks.
115 */
116 int uslock_check = 1;
117 int max_lock_loops = 100000000;
118 decl_simple_lock_data(extern , printf_lock)
119 decl_simple_lock_data(extern , panic_lock)
120 #if MACH_KDB
121 decl_simple_lock_data(extern , kdb_lock)
122 #endif /* MACH_KDB */
123 #endif /* USLOCK_DEBUG */
124
125
126 /*
127 * We often want to know the addresses of the callers
128 * of the various lock routines. However, this information
129 * is only used for debugging and statistics.
130 */
131 typedef void *pc_t;
132 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
133 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
134 #if ANY_LOCK_DEBUG
135 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
136 #define DECL_PC(pc) pc_t pc;
137 #else /* ANY_LOCK_DEBUG */
138 #define DECL_PC(pc)
139 #ifdef lint
140 /*
141 * Eliminate lint complaints about unused local pc variables.
142 */
143 #define OBTAIN_PC(pc,l) ++pc
144 #else /* lint */
145 #define OBTAIN_PC(pc,l)
146 #endif /* lint */
147 #endif /* USLOCK_DEBUG */
148
149
150 /*
151 * Portable lock package implementation of usimple_locks.
152 */
153
154 #if USLOCK_DEBUG
155 #define USLDBG(stmt) stmt
156 void usld_lock_init(usimple_lock_t, unsigned short);
157 void usld_lock_pre(usimple_lock_t, pc_t);
158 void usld_lock_post(usimple_lock_t, pc_t);
159 void usld_unlock(usimple_lock_t, pc_t);
160 void usld_lock_try_pre(usimple_lock_t, pc_t);
161 void usld_lock_try_post(usimple_lock_t, pc_t);
162 int usld_lock_common_checks(usimple_lock_t, char *);
163 #else /* USLOCK_DEBUG */
164 #define USLDBG(stmt)
165 #endif /* USLOCK_DEBUG */
166
167 /*
168 * Routine: lck_spin_alloc_init
169 */
170 lck_spin_t *
171 lck_spin_alloc_init(
172 lck_grp_t *grp,
173 lck_attr_t *attr)
174 {
175 lck_spin_t *lck;
176
177 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
178 lck_spin_init(lck, grp, attr);
179
180 return(lck);
181 }
182
183 /*
184 * Routine: lck_spin_free
185 */
186 void
187 lck_spin_free(
188 lck_spin_t *lck,
189 lck_grp_t *grp)
190 {
191 lck_spin_destroy(lck, grp);
192 kfree(lck, sizeof(lck_spin_t));
193 }
194
195 /*
196 * Routine: lck_spin_init
197 */
198 void
199 lck_spin_init(
200 lck_spin_t *lck,
201 lck_grp_t *grp,
202 __unused lck_attr_t *attr)
203 {
204 usimple_lock_init((usimple_lock_t) lck, 0);
205 lck_grp_reference(grp);
206 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
207 }
208
209 /*
210 * Routine: lck_spin_destroy
211 */
212 void
213 lck_spin_destroy(
214 lck_spin_t *lck,
215 lck_grp_t *grp)
216 {
217 if (lck->lck_spin_data[0] == LCK_SPIN_TAG_DESTROYED)
218 return;
219 lck->lck_spin_data[0] = LCK_SPIN_TAG_DESTROYED;
220 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
221 lck_grp_deallocate(grp);
222 return;
223 }
224
225 /*
226 * Routine: lck_spin_lock
227 */
228 void
229 lck_spin_lock(
230 lck_spin_t *lck)
231 {
232 usimple_lock((usimple_lock_t) lck);
233 }
234
235 /*
236 * Routine: lck_spin_unlock
237 */
238 void
239 lck_spin_unlock(
240 lck_spin_t *lck)
241 {
242 usimple_unlock((usimple_lock_t) lck);
243 }
244
245
246 /*
247 * Routine: lck_spin_try_lock
248 */
249 boolean_t
250 lck_spin_try_lock(
251 lck_spin_t *lck)
252 {
253 usimple_lock_try((usimple_lock_t) lck);
254 }
255
256 /*
257 * Initialize a usimple_lock.
258 *
259 * No change in preemption state.
260 */
261 void
262 usimple_lock_init(
263 usimple_lock_t l,
264 __unused unsigned short tag)
265 {
266 #ifndef MACHINE_SIMPLE_LOCK
267 USLDBG(usld_lock_init(l, tag));
268 hw_lock_init(&l->interlock);
269 #else
270 simple_lock_init((simple_lock_t)l,tag);
271 #endif
272 }
273
274
275 /*
276 * Acquire a usimple_lock.
277 *
278 * Returns with preemption disabled. Note
279 * that the hw_lock routines are responsible for
280 * maintaining preemption state.
281 */
282 void
283 usimple_lock(
284 usimple_lock_t l)
285 {
286 #ifndef MACHINE_SIMPLE_LOCK
287 pc_t pc = NULL;
288
289 OBTAIN_PC(pc, l);
290 USLDBG(usld_lock_pre(l, pc));
291
292 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
293 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc);
294
295 USLDBG(usld_lock_post(l, pc));
296 #else
297 simple_lock((simple_lock_t)l);
298 #endif
299 }
300
301
302 /*
303 * Release a usimple_lock.
304 *
305 * Returns with preemption enabled. Note
306 * that the hw_lock routines are responsible for
307 * maintaining preemption state.
308 */
309 void
310 usimple_unlock(
311 usimple_lock_t l)
312 {
313 #ifndef MACHINE_SIMPLE_LOCK
314 DECL_PC(pc);
315
316 OBTAIN_PC(pc, l);
317 USLDBG(usld_unlock(l, pc));
318 hw_lock_unlock(&l->interlock);
319 #else
320 simple_unlock_rwmb((simple_lock_t)l);
321 #endif
322 }
323
324
325 /*
326 * Conditionally acquire a usimple_lock.
327 *
328 * On success, returns with preemption disabled.
329 * On failure, returns with preemption in the same state
330 * as when first invoked. Note that the hw_lock routines
331 * are responsible for maintaining preemption state.
332 *
333 * XXX No stats are gathered on a miss; I preserved this
334 * behavior from the original assembly-language code, but
335 * doesn't it make sense to log misses? XXX
336 */
337 unsigned int
338 usimple_lock_try(
339 usimple_lock_t l)
340 {
341 #ifndef MACHINE_SIMPLE_LOCK
342 DECL_PC(pc);
343 unsigned int success;
344
345 OBTAIN_PC(pc, l);
346 USLDBG(usld_lock_try_pre(l, pc));
347 if ((success = hw_lock_try(&l->interlock))) {
348 USLDBG(usld_lock_try_post(l, pc));
349 }
350 return success;
351 #else
352 return(simple_lock_try((simple_lock_t)l));
353 #endif
354 }
355
356 #if USLOCK_DEBUG
357 /*
358 * States of a usimple_lock. The default when initializing
359 * a usimple_lock is setting it up for debug checking.
360 */
361 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
362 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
363 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
364 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
365 #define USLOCK_CHECKING(l) (uslock_check && \
366 ((l)->debug.state & USLOCK_CHECKED))
367
368 /*
369 * Trace activities of a particularly interesting lock.
370 */
371 void usl_trace(usimple_lock_t, int, pc_t, const char *);
372
373
374 /*
375 * Initialize the debugging information contained
376 * in a usimple_lock.
377 */
378 void
379 usld_lock_init(
380 usimple_lock_t l,
381 __unused unsigned short tag)
382 {
383 if (l == USIMPLE_LOCK_NULL)
384 panic("lock initialization: null lock pointer");
385 l->lock_type = USLOCK_TAG;
386 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
387 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
388 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
389 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
390 l->debug.duration[0] = l->debug.duration[1] = 0;
391 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
392 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
393 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
394 }
395
396
397 /*
398 * These checks apply to all usimple_locks, not just
399 * those with USLOCK_CHECKED turned on.
400 */
401 int
402 usld_lock_common_checks(
403 usimple_lock_t l,
404 char *caller)
405 {
406 if (l == USIMPLE_LOCK_NULL)
407 panic("%s: null lock pointer", caller);
408 if (l->lock_type != USLOCK_TAG)
409 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
410 if (!(l->debug.state & USLOCK_INIT))
411 panic("%s: 0x%x is not an initialized lock",
412 caller, (integer_t) l);
413 return USLOCK_CHECKING(l);
414 }
415
416
417 /*
418 * Debug checks on a usimple_lock just before attempting
419 * to acquire it.
420 */
421 /* ARGSUSED */
422 void
423 usld_lock_pre(
424 usimple_lock_t l,
425 pc_t pc)
426 {
427 char caller[] = "usimple_lock";
428
429
430 if (!usld_lock_common_checks(l, caller))
431 return;
432
433 /*
434 * Note that we have a weird case where we are getting a lock when we are]
435 * in the process of putting the system to sleep. We are running with no
436 * current threads, therefore we can't tell if we are trying to retake a lock
437 * we have or someone on the other processor has it. Therefore we just
438 * ignore this test if the locking thread is 0.
439 */
440
441 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
442 l->debug.lock_thread == (void *) current_thread()) {
443 printf("%s: lock 0x%x already locked (at 0x%x) by",
444 caller, (integer_t) l, l->debug.lock_pc);
445 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
446 l->debug.lock_thread, pc);
447 panic(caller);
448 }
449 mp_disable_preemption();
450 usl_trace(l, cpu_number(), pc, caller);
451 mp_enable_preemption();
452 }
453
454
455 /*
456 * Debug checks on a usimple_lock just after acquiring it.
457 *
458 * Pre-emption has been disabled at this point,
459 * so we are safe in using cpu_number.
460 */
461 void
462 usld_lock_post(
463 usimple_lock_t l,
464 pc_t pc)
465 {
466 register int mycpu;
467 char caller[] = "successful usimple_lock";
468
469
470 if (!usld_lock_common_checks(l, caller))
471 return;
472
473 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
474 panic("%s: lock 0x%x became uninitialized",
475 caller, (integer_t) l);
476 if ((l->debug.state & USLOCK_TAKEN))
477 panic("%s: lock 0x%x became TAKEN by someone else",
478 caller, (integer_t) l);
479
480 mycpu = cpu_number();
481 l->debug.lock_thread = (void *)current_thread();
482 l->debug.state |= USLOCK_TAKEN;
483 l->debug.lock_pc = pc;
484 l->debug.lock_cpu = mycpu;
485
486 usl_trace(l, mycpu, pc, caller);
487 }
488
489
490 /*
491 * Debug checks on a usimple_lock just before
492 * releasing it. Note that the caller has not
493 * yet released the hardware lock.
494 *
495 * Preemption is still disabled, so there's
496 * no problem using cpu_number.
497 */
498 void
499 usld_unlock(
500 usimple_lock_t l,
501 pc_t pc)
502 {
503 register int mycpu;
504 char caller[] = "usimple_unlock";
505
506
507 if (!usld_lock_common_checks(l, caller))
508 return;
509
510 mycpu = cpu_number();
511
512 if (!(l->debug.state & USLOCK_TAKEN))
513 panic("%s: lock 0x%x hasn't been taken",
514 caller, (integer_t) l);
515 if (l->debug.lock_thread != (void *) current_thread())
516 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
517 caller, (integer_t) l, l->debug.lock_thread);
518 if (l->debug.lock_cpu != mycpu) {
519 printf("%s: unlocking lock 0x%x on cpu 0x%x",
520 caller, (integer_t) l, mycpu);
521 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
522 panic(caller);
523 }
524 usl_trace(l, mycpu, pc, caller);
525
526 l->debug.unlock_thread = l->debug.lock_thread;
527 l->debug.lock_thread = INVALID_PC;
528 l->debug.state &= ~USLOCK_TAKEN;
529 l->debug.unlock_pc = pc;
530 l->debug.unlock_cpu = mycpu;
531 }
532
533
534 /*
535 * Debug checks on a usimple_lock just before
536 * attempting to acquire it.
537 *
538 * Preemption isn't guaranteed to be disabled.
539 */
540 void
541 usld_lock_try_pre(
542 usimple_lock_t l,
543 pc_t pc)
544 {
545 char caller[] = "usimple_lock_try";
546
547 if (!usld_lock_common_checks(l, caller))
548 return;
549 mp_disable_preemption();
550 usl_trace(l, cpu_number(), pc, caller);
551 mp_enable_preemption();
552 }
553
554
555 /*
556 * Debug checks on a usimple_lock just after
557 * successfully attempting to acquire it.
558 *
559 * Preemption has been disabled by the
560 * lock acquisition attempt, so it's safe
561 * to use cpu_number.
562 */
563 void
564 usld_lock_try_post(
565 usimple_lock_t l,
566 pc_t pc)
567 {
568 register int mycpu;
569 char caller[] = "successful usimple_lock_try";
570
571 if (!usld_lock_common_checks(l, caller))
572 return;
573
574 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
575 panic("%s: lock 0x%x became uninitialized",
576 caller, (integer_t) l);
577 if ((l->debug.state & USLOCK_TAKEN))
578 panic("%s: lock 0x%x became TAKEN by someone else",
579 caller, (integer_t) l);
580
581 mycpu = cpu_number();
582 l->debug.lock_thread = (void *) current_thread();
583 l->debug.state |= USLOCK_TAKEN;
584 l->debug.lock_pc = pc;
585 l->debug.lock_cpu = mycpu;
586
587 usl_trace(l, mycpu, pc, caller);
588 }
589
590
591 /*
592 * For very special cases, set traced_lock to point to a
593 * specific lock of interest. The result is a series of
594 * XPRs showing lock operations on that lock. The lock_seq
595 * value is used to show the order of those operations.
596 */
597 usimple_lock_t traced_lock;
598 unsigned int lock_seq;
599
600 void
601 usl_trace(
602 usimple_lock_t l,
603 int mycpu,
604 pc_t pc,
605 const char * op_name)
606 {
607 if (traced_lock == l) {
608 XPR(XPR_SLOCK,
609 "seq %d, cpu %d, %s @ %x\n",
610 (integer_t) lock_seq, (integer_t) mycpu,
611 (integer_t) op_name, (integer_t) pc, 0);
612 lock_seq++;
613 }
614 }
615
616
617 #endif /* USLOCK_DEBUG */
618
619 /*
620 * Routine: lock_alloc
621 * Function:
622 * Allocate a lock for external users who cannot
623 * hard-code the structure definition into their
624 * objects.
625 * For now just use kalloc, but a zone is probably
626 * warranted.
627 */
628 lock_t *
629 lock_alloc(
630 boolean_t can_sleep,
631 unsigned short tag,
632 unsigned short tag1)
633 {
634 lock_t *l;
635
636 if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0)
637 lock_init(l, can_sleep, tag, tag1);
638 return(l);
639 }
640
641 /*
642 * Routine: lock_free
643 * Function:
644 * Free a lock allocated for external users.
645 * For now just use kfree, but a zone is probably
646 * warranted.
647 */
648 void
649 lock_free(
650 lock_t *l)
651 {
652 kfree(l, sizeof(lock_t));
653 }
654
655
656 /*
657 * Routine: lock_init
658 * Function:
659 * Initialize a lock; required before use.
660 * Note that clients declare the "struct lock"
661 * variables and then initialize them, rather
662 * than getting a new one from this module.
663 */
664 void
665 lock_init(
666 lock_t *l,
667 boolean_t can_sleep,
668 __unused unsigned short tag,
669 unsigned short tag1)
670 {
671 (void) memset((void *) l, 0, sizeof(lock_t));
672
673 simple_lock_init(&l->interlock, tag1);
674 l->want_write = FALSE;
675 l->want_upgrade = FALSE;
676 l->read_count = 0;
677 l->can_sleep = can_sleep;
678 }
679
680
681 /*
682 * Sleep locks. These use the same data structure and algorithm
683 * as the spin locks, but the process sleeps while it is waiting
684 * for the lock. These work on uniprocessor systems.
685 */
686
687 #define DECREMENTER_TIMEOUT 1000000
688
689 void
690 lock_write(
691 register lock_t * l)
692 {
693 register int i;
694 boolean_t lock_miss = FALSE;
695 #if MACH_LDEBUG
696 int decrementer;
697 #endif /* MACH_LDEBUG */
698
699 simple_lock(&l->interlock);
700
701 #if MACH_LDEBUG
702 decrementer = DECREMENTER_TIMEOUT;
703 #endif /* MACH_LDEBUG */
704
705 /*
706 * Try to acquire the want_write bit.
707 */
708 while (l->want_write) {
709 if (!lock_miss) {
710 lock_miss = TRUE;
711 }
712
713 i = lock_wait_time[l->can_sleep ? 1 : 0];
714 if (i != 0) {
715 simple_unlock(&l->interlock);
716 #if MACH_LDEBUG
717 if (!--decrementer)
718 Debugger("timeout - want_write");
719 #endif /* MACH_LDEBUG */
720 while (--i != 0 && l->want_write)
721 continue;
722 simple_lock(&l->interlock);
723 }
724
725 if (l->can_sleep && l->want_write) {
726 l->waiting = TRUE;
727 thread_sleep_simple_lock((event_t) l,
728 simple_lock_addr(l->interlock),
729 THREAD_UNINT);
730 /* interlock relocked */
731 }
732 }
733 l->want_write = TRUE;
734
735 /* Wait for readers (and upgrades) to finish */
736
737 #if MACH_LDEBUG
738 decrementer = DECREMENTER_TIMEOUT;
739 #endif /* MACH_LDEBUG */
740 while ((l->read_count != 0) || l->want_upgrade) {
741 if (!lock_miss) {
742 lock_miss = TRUE;
743 }
744
745 i = lock_wait_time[l->can_sleep ? 1 : 0];
746 if (i != 0) {
747 simple_unlock(&l->interlock);
748 #if MACH_LDEBUG
749 if (!--decrementer)
750 Debugger("timeout - wait for readers");
751 #endif /* MACH_LDEBUG */
752 while (--i != 0 && (l->read_count != 0 ||
753 l->want_upgrade))
754 continue;
755 simple_lock(&l->interlock);
756 }
757
758 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
759 l->waiting = TRUE;
760 thread_sleep_simple_lock((event_t) l,
761 simple_lock_addr(l->interlock),
762 THREAD_UNINT);
763 /* interlock relocked */
764 }
765 }
766
767 simple_unlock(&l->interlock);
768 }
769
770 void
771 lock_done(
772 register lock_t * l)
773 {
774 boolean_t do_wakeup = FALSE;
775
776
777 simple_lock(&l->interlock);
778
779 if (l->read_count != 0) {
780 l->read_count--;
781 }
782 else
783 if (l->want_upgrade) {
784 l->want_upgrade = FALSE;
785 }
786 else {
787 l->want_write = FALSE;
788 }
789
790 /*
791 * There is no reason to wakeup a waiting thread
792 * if the read-count is non-zero. Consider:
793 * we must be dropping a read lock
794 * threads are waiting only if one wants a write lock
795 * if there are still readers, they can't proceed
796 */
797
798 if (l->waiting && (l->read_count == 0)) {
799 l->waiting = FALSE;
800 do_wakeup = TRUE;
801 }
802
803 simple_unlock(&l->interlock);
804
805 if (do_wakeup)
806 thread_wakeup((event_t) l);
807 }
808
809 void
810 lock_read(
811 register lock_t * l)
812 {
813 register int i;
814 #if MACH_LDEBUG
815 int decrementer;
816 #endif /* MACH_LDEBUG */
817
818 simple_lock(&l->interlock);
819
820 #if MACH_LDEBUG
821 decrementer = DECREMENTER_TIMEOUT;
822 #endif /* MACH_LDEBUG */
823 while (l->want_write || l->want_upgrade) {
824 i = lock_wait_time[l->can_sleep ? 1 : 0];
825
826 if (i != 0) {
827 simple_unlock(&l->interlock);
828 #if MACH_LDEBUG
829 if (!--decrementer)
830 Debugger("timeout - wait no writers");
831 #endif /* MACH_LDEBUG */
832 while (--i != 0 && (l->want_write || l->want_upgrade))
833 continue;
834 simple_lock(&l->interlock);
835 }
836
837 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
838 l->waiting = TRUE;
839 thread_sleep_simple_lock((event_t) l,
840 simple_lock_addr(l->interlock),
841 THREAD_UNINT);
842 /* interlock relocked */
843 }
844 }
845
846 l->read_count++;
847
848 simple_unlock(&l->interlock);
849 }
850
851
852 /*
853 * Routine: lock_read_to_write
854 * Function:
855 * Improves a read-only lock to one with
856 * write permission. If another reader has
857 * already requested an upgrade to a write lock,
858 * no lock is held upon return.
859 *
860 * Returns TRUE if the upgrade *failed*.
861 */
862
863 boolean_t
864 lock_read_to_write(
865 register lock_t * l)
866 {
867 register int i;
868 boolean_t do_wakeup = FALSE;
869 #if MACH_LDEBUG
870 int decrementer;
871 #endif /* MACH_LDEBUG */
872
873 simple_lock(&l->interlock);
874
875 l->read_count--;
876
877 if (l->want_upgrade) {
878 /*
879 * Someone else has requested upgrade.
880 * Since we've released a read lock, wake
881 * him up.
882 */
883 if (l->waiting && (l->read_count == 0)) {
884 l->waiting = FALSE;
885 do_wakeup = TRUE;
886 }
887
888 simple_unlock(&l->interlock);
889
890 if (do_wakeup)
891 thread_wakeup((event_t) l);
892 return (TRUE);
893 }
894
895 l->want_upgrade = TRUE;
896
897 #if MACH_LDEBUG
898 decrementer = DECREMENTER_TIMEOUT;
899 #endif /* MACH_LDEBUG */
900 while (l->read_count != 0) {
901 i = lock_wait_time[l->can_sleep ? 1 : 0];
902
903 if (i != 0) {
904 simple_unlock(&l->interlock);
905 #if MACH_LDEBUG
906 if (!--decrementer)
907 Debugger("timeout - read_count");
908 #endif /* MACH_LDEBUG */
909 while (--i != 0 && l->read_count != 0)
910 continue;
911 simple_lock(&l->interlock);
912 }
913
914 if (l->can_sleep && l->read_count != 0) {
915 l->waiting = TRUE;
916 thread_sleep_simple_lock((event_t) l,
917 simple_lock_addr(l->interlock),
918 THREAD_UNINT);
919 /* interlock relocked */
920 }
921 }
922
923 simple_unlock(&l->interlock);
924
925 return (FALSE);
926 }
927
928 void
929 lock_write_to_read(
930 register lock_t * l)
931 {
932 boolean_t do_wakeup = FALSE;
933
934 simple_lock(&l->interlock);
935
936 l->read_count++;
937 if (l->want_upgrade)
938 l->want_upgrade = FALSE;
939 else
940 l->want_write = FALSE;
941
942 if (l->waiting) {
943 l->waiting = FALSE;
944 do_wakeup = TRUE;
945 }
946
947 simple_unlock(&l->interlock);
948
949 if (do_wakeup)
950 thread_wakeup((event_t) l);
951 }
952
953
954 #if 0 /* Unused */
955 /*
956 * Routine: lock_try_write
957 * Function:
958 * Tries to get a write lock.
959 *
960 * Returns FALSE if the lock is not held on return.
961 */
962
963 boolean_t
964 lock_try_write(
965 register lock_t * l)
966 {
967 pc_t pc;
968
969 simple_lock(&l->interlock);
970
971 if (l->want_write || l->want_upgrade || l->read_count) {
972 /*
973 * Can't get lock.
974 */
975 simple_unlock(&l->interlock);
976 return(FALSE);
977 }
978
979 /*
980 * Have lock.
981 */
982
983 l->want_write = TRUE;
984
985 simple_unlock(&l->interlock);
986
987 return(TRUE);
988 }
989
990 /*
991 * Routine: lock_try_read
992 * Function:
993 * Tries to get a read lock.
994 *
995 * Returns FALSE if the lock is not held on return.
996 */
997
998 boolean_t
999 lock_try_read(
1000 register lock_t * l)
1001 {
1002 pc_t pc;
1003
1004 simple_lock(&l->interlock);
1005
1006 if (l->want_write || l->want_upgrade) {
1007 simple_unlock(&l->interlock);
1008 return(FALSE);
1009 }
1010
1011 l->read_count++;
1012
1013 simple_unlock(&l->interlock);
1014
1015 return(TRUE);
1016 }
1017 #endif /* Unused */
1018
1019
1020 /*
1021 * Routine: lck_rw_alloc_init
1022 */
1023 lck_rw_t *
1024 lck_rw_alloc_init(
1025 lck_grp_t *grp,
1026 lck_attr_t *attr) {
1027 lck_rw_t *lck;
1028
1029 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
1030 lck_rw_init(lck, grp, attr);
1031
1032 return(lck);
1033 }
1034
1035 /*
1036 * Routine: lck_rw_free
1037 */
1038 void
1039 lck_rw_free(
1040 lck_rw_t *lck,
1041 lck_grp_t *grp) {
1042 lck_rw_destroy(lck, grp);
1043 kfree(lck, sizeof(lck_rw_t));
1044 }
1045
1046 /*
1047 * Routine: lck_rw_init
1048 */
1049 void
1050 lck_rw_init(
1051 lck_rw_t *lck,
1052 lck_grp_t *grp,
1053 __unused lck_attr_t *attr) {
1054
1055 hw_lock_init(&lck->interlock);
1056 lck->want_write = FALSE;
1057 lck->want_upgrade = FALSE;
1058 lck->read_count = 0;
1059 lck->can_sleep = TRUE;
1060 lck->lck_rw_tag = 0;
1061
1062 lck_grp_reference(grp);
1063 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
1064 }
1065
1066 /*
1067 * Routine: lck_rw_destroy
1068 */
1069 void
1070 lck_rw_destroy(
1071 lck_rw_t *lck,
1072 lck_grp_t *grp) {
1073 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
1074 return;
1075 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
1076 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
1077 lck_grp_deallocate(grp);
1078 return;
1079 }
1080
1081 /*
1082 * Sleep locks. These use the same data structure and algorithm
1083 * as the spin locks, but the process sleeps while it is waiting
1084 * for the lock. These work on uniprocessor systems.
1085 */
1086
1087 #define DECREMENTER_TIMEOUT 1000000
1088
1089
1090 /*
1091 * We need to disable interrupts while holding the mutex interlock
1092 * to prevent an IPI intervening.
1093 * Hence, local helper functions lck_interlock_lock()/lck_interlock_unlock().
1094 */
1095 static boolean_t
1096 lck_interlock_lock(lck_rw_t *lck)
1097 {
1098 boolean_t istate;
1099
1100 istate = ml_set_interrupts_enabled(FALSE);
1101 hw_lock_lock(&lck->interlock);
1102
1103 return istate;
1104 }
1105
1106 static void
1107 lck_interlock_unlock(lck_rw_t *lck, boolean_t istate)
1108 {
1109 hw_lock_unlock(&lck->interlock);
1110 ml_set_interrupts_enabled(istate);
1111 }
1112
1113 /*
1114 * Routine: lck_rw_lock_exclusive
1115 */
1116 void
1117 lck_rw_lock_exclusive(
1118 lck_rw_t *lck)
1119 {
1120 int i;
1121 boolean_t lock_miss = FALSE;
1122 wait_result_t res;
1123 #if MACH_LDEBUG
1124 int decrementer;
1125 #endif /* MACH_LDEBUG */
1126 boolean_t istate;
1127
1128 istate = lck_interlock_lock(lck);
1129
1130 #if MACH_LDEBUG
1131 decrementer = DECREMENTER_TIMEOUT;
1132 #endif /* MACH_LDEBUG */
1133
1134 /*
1135 * Try to acquire the want_write bit.
1136 */
1137 while (lck->want_write) {
1138 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1139
1140 if (!lock_miss) {
1141 lock_miss = TRUE;
1142 }
1143
1144 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1145 if (i != 0) {
1146 lck_interlock_unlock(lck, istate);
1147 #if MACH_LDEBUG
1148 if (!--decrementer)
1149 Debugger("timeout - want_write");
1150 #endif /* MACH_LDEBUG */
1151 while (--i != 0 && lck->want_write)
1152 continue;
1153 istate = lck_interlock_lock(lck);
1154 }
1155
1156 if (lck->can_sleep && lck->want_write) {
1157 lck->waiting = TRUE;
1158 res = assert_wait((event_t) lck, THREAD_UNINT);
1159 if (res == THREAD_WAITING) {
1160 lck_interlock_unlock(lck, istate);
1161 res = thread_block(THREAD_CONTINUE_NULL);
1162 istate = lck_interlock_lock(lck);
1163 }
1164 }
1165 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
1166 }
1167 lck->want_write = TRUE;
1168
1169 /* Wait for readers (and upgrades) to finish */
1170
1171 #if MACH_LDEBUG
1172 decrementer = DECREMENTER_TIMEOUT;
1173 #endif /* MACH_LDEBUG */
1174 while ((lck->read_count != 0) || lck->want_upgrade) {
1175 if (!lock_miss) {
1176 lock_miss = TRUE;
1177 }
1178
1179 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1180
1181 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1182 (int)lck, lck->read_count, lck->want_upgrade, i, 0);
1183
1184 if (i != 0) {
1185 lck_interlock_unlock(lck, istate);
1186 #if MACH_LDEBUG
1187 if (!--decrementer)
1188 Debugger("timeout - wait for readers");
1189 #endif /* MACH_LDEBUG */
1190 while (--i != 0 && (lck->read_count != 0 ||
1191 lck->want_upgrade))
1192 continue;
1193 istate = lck_interlock_lock(lck);
1194 }
1195
1196 if (lck->can_sleep && (lck->read_count != 0 || lck->want_upgrade)) {
1197 lck->waiting = TRUE;
1198 res = assert_wait((event_t) lck, THREAD_UNINT);
1199 if (res == THREAD_WAITING) {
1200 lck_interlock_unlock(lck, istate);
1201 res = thread_block(THREAD_CONTINUE_NULL);
1202 istate = lck_interlock_lock(lck);
1203 }
1204 }
1205 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1206 (int)lck, lck->read_count, lck->want_upgrade, res, 0);
1207 }
1208
1209 lck_interlock_unlock(lck, istate);
1210 }
1211
1212
1213 /*
1214 * Routine: lck_rw_done
1215 */
1216 lck_rw_type_t
1217 lck_rw_done(
1218 lck_rw_t *lck)
1219 {
1220 boolean_t do_wakeup = FALSE;
1221 lck_rw_type_t lck_rw_type;
1222 boolean_t istate;
1223
1224
1225 istate = lck_interlock_lock(lck);
1226
1227 if (lck->read_count != 0) {
1228 lck_rw_type = LCK_RW_TYPE_SHARED;
1229 lck->read_count--;
1230 }
1231 else {
1232 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1233 if (lck->want_upgrade)
1234 lck->want_upgrade = FALSE;
1235 else
1236 lck->want_write = FALSE;
1237 }
1238
1239 /*
1240 * There is no reason to wakeup a waiting thread
1241 * if the read-count is non-zero. Consider:
1242 * we must be dropping a read lock
1243 * threads are waiting only if one wants a write lock
1244 * if there are still readers, they can't proceed
1245 */
1246
1247 if (lck->waiting && (lck->read_count == 0)) {
1248 lck->waiting = FALSE;
1249 do_wakeup = TRUE;
1250 }
1251
1252 lck_interlock_unlock(lck, istate);
1253
1254 if (do_wakeup)
1255 thread_wakeup((event_t) lck);
1256 return(lck_rw_type);
1257 }
1258
1259
1260
1261
1262 /*
1263 * Routine: lck_rw_unlock
1264 */
1265 void
1266 lck_rw_unlock(
1267 lck_rw_t *lck,
1268 lck_rw_type_t lck_rw_type)
1269 {
1270 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1271 lck_rw_unlock_shared(lck);
1272 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1273 lck_rw_unlock_exclusive(lck);
1274 else
1275 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
1276 }
1277
1278
1279 /*
1280 * Routine: lck_rw_unlock_shared
1281 */
1282 void
1283 lck_rw_unlock_shared(
1284 lck_rw_t *lck)
1285 {
1286 lck_rw_type_t ret;
1287
1288 ret = lck_rw_done(lck);
1289
1290 if (ret != LCK_RW_TYPE_SHARED)
1291 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
1292 }
1293
1294
1295 /*
1296 * Routine: lck_rw_unlock_exclusive
1297 */
1298 void
1299 lck_rw_unlock_exclusive(
1300 lck_rw_t *lck)
1301 {
1302 lck_rw_type_t ret;
1303
1304 ret = lck_rw_done(lck);
1305
1306 if (ret != LCK_RW_TYPE_EXCLUSIVE)
1307 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
1308 }
1309
1310
1311 /*
1312 * Routine: lck_rw_lock
1313 */
1314 void
1315 lck_rw_lock(
1316 lck_rw_t *lck,
1317 lck_rw_type_t lck_rw_type)
1318 {
1319 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1320 lck_rw_lock_shared(lck);
1321 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1322 lck_rw_lock_exclusive(lck);
1323 else
1324 panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type);
1325 }
1326
1327
1328 /*
1329 * Routine: lck_rw_lock_shared
1330 */
1331 void
1332 lck_rw_lock_shared(
1333 lck_rw_t *lck)
1334 {
1335 int i;
1336 wait_result_t res;
1337 #if MACH_LDEBUG
1338 int decrementer;
1339 #endif /* MACH_LDEBUG */
1340 boolean_t istate;
1341
1342 istate = lck_interlock_lock(lck);
1343
1344 #if MACH_LDEBUG
1345 decrementer = DECREMENTER_TIMEOUT;
1346 #endif /* MACH_LDEBUG */
1347 while (lck->want_write || lck->want_upgrade) {
1348 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1349
1350 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1351 (int)lck, lck->want_write, lck->want_upgrade, i, 0);
1352
1353 if (i != 0) {
1354 lck_interlock_unlock(lck, istate);
1355 #if MACH_LDEBUG
1356 if (!--decrementer)
1357 Debugger("timeout - wait no writers");
1358 #endif /* MACH_LDEBUG */
1359 while (--i != 0 && (lck->want_write || lck->want_upgrade))
1360 continue;
1361 istate = lck_interlock_lock(lck);
1362 }
1363
1364 if (lck->can_sleep && (lck->want_write || lck->want_upgrade)) {
1365 lck->waiting = TRUE;
1366 res = assert_wait((event_t) lck, THREAD_UNINT);
1367 if (res == THREAD_WAITING) {
1368 lck_interlock_unlock(lck, istate);
1369 res = thread_block(THREAD_CONTINUE_NULL);
1370 istate = lck_interlock_lock(lck);
1371 }
1372 }
1373 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1374 (int)lck, lck->want_write, lck->want_upgrade, res, 0);
1375 }
1376
1377 lck->read_count++;
1378
1379 lck_interlock_unlock(lck, istate);
1380 }
1381
1382
1383 /*
1384 * Routine: lck_rw_lock_shared_to_exclusive
1385 * Function:
1386 * Improves a read-only lock to one with
1387 * write permission. If another reader has
1388 * already requested an upgrade to a write lock,
1389 * no lock is held upon return.
1390 *
1391 * Returns TRUE if the upgrade *failed*.
1392 */
1393
1394 boolean_t
1395 lck_rw_lock_shared_to_exclusive(
1396 lck_rw_t *lck)
1397 {
1398 int i;
1399 boolean_t do_wakeup = FALSE;
1400 wait_result_t res;
1401 #if MACH_LDEBUG
1402 int decrementer;
1403 #endif /* MACH_LDEBUG */
1404 boolean_t istate;
1405
1406 istate = lck_interlock_lock(lck);
1407
1408 lck->read_count--;
1409
1410 if (lck->want_upgrade) {
1411 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1412 (int)lck, lck->read_count, lck->want_upgrade, 0, 0);
1413
1414 /*
1415 * Someone else has requested upgrade.
1416 * Since we've released a read lock, wake
1417 * him up.
1418 */
1419 if (lck->waiting && (lck->read_count == 0)) {
1420 lck->waiting = FALSE;
1421 do_wakeup = TRUE;
1422 }
1423
1424 lck_interlock_unlock(lck, istate);
1425
1426 if (do_wakeup)
1427 thread_wakeup((event_t) lck);
1428
1429 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1430 (int)lck, lck->read_count, lck->want_upgrade, 0, 0);
1431
1432 return (TRUE);
1433 }
1434
1435 lck->want_upgrade = TRUE;
1436
1437 #if MACH_LDEBUG
1438 decrementer = DECREMENTER_TIMEOUT;
1439 #endif /* MACH_LDEBUG */
1440 while (lck->read_count != 0) {
1441 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1442
1443 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1444 (int)lck, lck->read_count, i, 0, 0);
1445
1446 if (i != 0) {
1447 lck_interlock_unlock(lck, istate);
1448 #if MACH_LDEBUG
1449 if (!--decrementer)
1450 Debugger("timeout - read_count");
1451 #endif /* MACH_LDEBUG */
1452 while (--i != 0 && lck->read_count != 0)
1453 continue;
1454 istate = lck_interlock_lock(lck);
1455 }
1456
1457 if (lck->can_sleep && lck->read_count != 0) {
1458 lck->waiting = TRUE;
1459 res = assert_wait((event_t) lck, THREAD_UNINT);
1460 if (res == THREAD_WAITING) {
1461 lck_interlock_unlock(lck, istate);
1462 res = thread_block(THREAD_CONTINUE_NULL);
1463 istate = lck_interlock_lock(lck);
1464 }
1465 }
1466 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1467 (int)lck, lck->read_count, 0, 0, 0);
1468 }
1469
1470 lck_interlock_unlock(lck, istate);
1471
1472 return (FALSE);
1473 }
1474
1475 /*
1476 * Routine: lck_rw_lock_exclusive_to_shared
1477 */
1478 void
1479 lck_rw_lock_exclusive_to_shared(
1480 lck_rw_t *lck)
1481 {
1482 boolean_t do_wakeup = FALSE;
1483 boolean_t istate;
1484
1485 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1486 (int)lck, lck->want_write, lck->want_upgrade, 0, 0);
1487
1488 istate = lck_interlock_lock(lck);
1489
1490 lck->read_count++;
1491 if (lck->want_upgrade)
1492 lck->want_upgrade = FALSE;
1493 else
1494 lck->want_write = FALSE;
1495
1496 if (lck->waiting) {
1497 lck->waiting = FALSE;
1498 do_wakeup = TRUE;
1499 }
1500
1501 lck_interlock_unlock(lck, istate);
1502
1503 if (do_wakeup)
1504 thread_wakeup((event_t) lck);
1505
1506 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1507 (int)lck, lck->want_write, lck->want_upgrade, lck->read_count, 0);
1508
1509 }
1510
1511
1512 /*
1513 * Routine: lck_rw_try_lock
1514 */
1515 boolean_t
1516 lck_rw_try_lock(
1517 lck_rw_t *lck,
1518 lck_rw_type_t lck_rw_type)
1519 {
1520 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1521 return(lck_rw_try_lock_shared(lck));
1522 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1523 return(lck_rw_try_lock_exclusive(lck));
1524 else
1525 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
1526 return(FALSE);
1527 }
1528
1529 /*
1530 * Routine: lck_rw_try_lock_exclusive
1531 * Function:
1532 * Tries to get a write lock.
1533 *
1534 * Returns FALSE if the lock is not held on return.
1535 */
1536
1537 boolean_t
1538 lck_rw_try_lock_exclusive(
1539 lck_rw_t *lck)
1540 {
1541 boolean_t istate;
1542
1543 istate = lck_interlock_lock(lck);
1544
1545 if (lck->want_write || lck->want_upgrade || lck->read_count) {
1546 /*
1547 * Can't get lock.
1548 */
1549 lck_interlock_unlock(lck, istate);
1550 return(FALSE);
1551 }
1552
1553 /*
1554 * Have lock.
1555 */
1556
1557 lck->want_write = TRUE;
1558
1559 lck_interlock_unlock(lck, istate);
1560
1561 return(TRUE);
1562 }
1563
1564 /*
1565 * Routine: lck_rw_try_lock_shared
1566 * Function:
1567 * Tries to get a read lock.
1568 *
1569 * Returns FALSE if the lock is not held on return.
1570 */
1571
1572 boolean_t
1573 lck_rw_try_lock_shared(
1574 lck_rw_t *lck)
1575 {
1576 boolean_t istate;
1577
1578 istate = lck_interlock_lock(lck);
1579
1580 if (lck->want_write || lck->want_upgrade) {
1581 lck_interlock_unlock(lck, istate);
1582 return(FALSE);
1583 }
1584
1585 lck->read_count++;
1586
1587 lck_interlock_unlock(lck, istate);
1588
1589 return(TRUE);
1590 }
1591
1592 /*
1593 * Routine: lck_mtx_alloc_init
1594 */
1595 lck_mtx_t *
1596 lck_mtx_alloc_init(
1597 lck_grp_t *grp,
1598 lck_attr_t *attr)
1599 {
1600 lck_mtx_t *lck;
1601
1602 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1603 lck_mtx_init(lck, grp, attr);
1604
1605 return(lck);
1606 }
1607
1608 /*
1609 * Routine: lck_mtx_free
1610 */
1611 void
1612 lck_mtx_free(
1613 lck_mtx_t *lck,
1614 lck_grp_t *grp)
1615 {
1616 lck_mtx_destroy(lck, grp);
1617 kfree(lck, sizeof(lck_mtx_t));
1618 }
1619
1620 /*
1621 * Routine: lck_mtx_ext_init
1622 */
1623 static void
1624 lck_mtx_ext_init(
1625 lck_mtx_ext_t *lck,
1626 lck_grp_t *grp,
1627 lck_attr_t *attr)
1628 {
1629 lck->lck_mtx.lck_mtx_ilk = 0;
1630 lck->lck_mtx.lck_mtx_locked = 0;
1631 lck->lck_mtx.lck_mtx_waiters = 0;
1632 lck->lck_mtx.lck_mtx_pri = 0;
1633 lck->lck_mtx_attr = 0;
1634
1635 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1636 lck->lck_mtx_deb.pc = 0;
1637 lck->lck_mtx_deb.thread = 0;
1638 lck->lck_mtx_deb.type = MUTEX_TAG;
1639 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1640 }
1641
1642 lck->lck_mtx_grp = grp;
1643 }
1644
1645 /*
1646 * Routine: lck_mtx_init
1647 */
1648 void
1649 lck_mtx_init(
1650 lck_mtx_t *lck,
1651 lck_grp_t *grp,
1652 lck_attr_t *attr)
1653 {
1654 lck_mtx_ext_t *lck_ext;
1655
1656 if ((attr != LCK_ATTR_NULL) && ((attr->lck_attr_val) & LCK_ATTR_DEBUG)) {
1657 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1658 lck_mtx_ext_init(lck_ext, grp, attr);
1659 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1660 lck->lck_mtx_ptr = lck_ext;
1661 }
1662 } else {
1663 lck->lck_mtx_ilk = 0;
1664 lck->lck_mtx_locked = 0;
1665 lck->lck_mtx_waiters = 0;
1666 lck->lck_mtx_pri = 0;
1667 }
1668 lck_grp_reference(grp);
1669 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1670 }
1671
1672 /*
1673 * Routine: lck_mtx_destroy
1674 */
1675 void
1676 lck_mtx_destroy(
1677 lck_mtx_t *lck,
1678 lck_grp_t *grp)
1679 {
1680 boolean_t lck_is_indirect;
1681
1682 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1683 return;
1684 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1685 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1686 if (lck_is_indirect)
1687 kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1688 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1689 lck_grp_deallocate(grp);
1690 return;
1691 }
1692
1693 /*
1694 * Routine: lck_mtx_assert
1695 */
1696 void
1697 lck_mtx_assert(
1698 __unused lck_mtx_t *lck,
1699 __unused unsigned int type)
1700 {
1701 }
1702
1703 #if MACH_KDB
1704
1705 void db_show_one_lock(lock_t *);
1706
1707 void
1708 db_show_one_lock(
1709 lock_t *lock)
1710 {
1711 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1712 lock->read_count,
1713 lock->want_upgrade ? "" : "!",
1714 lock->want_write ? "" : "!");
1715 db_printf("%swaiting, %scan_sleep\n",
1716 lock->waiting ? "" : "!", lock->can_sleep ? "" : "!");
1717 db_printf("Interlock:\n");
1718 db_show_one_simple_lock((db_expr_t)simple_lock_addr(lock->interlock),
1719 TRUE, (db_expr_t)0, (char *)0);
1720 }
1721
1722 #endif /* MACH_KDB */
1723
1724 /*
1725 * The C portion of the mutex package. These routines are only invoked
1726 * if the optimized assembler routines can't do the work.
1727 */
1728
1729 /*
1730 * Routine: lock_alloc
1731 * Function:
1732 * Allocate a mutex for external users who cannot
1733 * hard-code the structure definition into their
1734 * objects.
1735 * For now just use kalloc, but a zone is probably
1736 * warranted.
1737 */
1738 mutex_t *
1739 mutex_alloc(
1740 unsigned short tag)
1741 {
1742 mutex_t *m;
1743
1744 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1745 mutex_init(m, tag);
1746 return(m);
1747 }
1748
1749 /*
1750 * Routine: mutex_free
1751 * Function:
1752 * Free a mutex allocated for external users.
1753 * For now just use kfree, but a zone is probably
1754 * warranted.
1755 */
1756 void
1757 mutex_free(
1758 mutex_t *m)
1759 {
1760 kfree(m, sizeof(mutex_t));
1761 }
1762
1763 /*
1764 * Routine: _mutex_assert
1765 */
1766 void
1767 _mutex_assert (
1768 mutex_t *mutex,
1769 unsigned int what)
1770 {
1771
1772 thread_t thread = current_thread();
1773 thread_t holder;
1774
1775 if (panicstr != NULL)
1776 return;
1777
1778 holder = (thread_t) mutex->lck_mtx.lck_mtx_locked;
1779
1780 switch (what) {
1781 case MA_OWNED:
1782 if (thread != holder)
1783 panic("mutex %x not owned\n", mutex);
1784 break;
1785
1786 case MA_NOTOWNED:
1787 if (thread == holder)
1788 panic("mutex %x owned\n", mutex);
1789 break;
1790 }
1791
1792 }
1793
1794 #if MACH_KDB
1795 /*
1796 * Routines to print out simple_locks and mutexes in a nicely-formatted
1797 * fashion.
1798 */
1799
1800 char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1801 char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1802
1803 void
1804 db_show_one_simple_lock (
1805 db_expr_t addr,
1806 boolean_t have_addr,
1807 db_expr_t count,
1808 char * modif)
1809 {
1810 simple_lock_t saddr = (simple_lock_t)addr;
1811
1812 if (saddr == (simple_lock_t)0 || !have_addr) {
1813 db_error ("No simple_lock\n");
1814 }
1815 #if USLOCK_DEBUG
1816 else if (saddr->lock_type != USLOCK_TAG)
1817 db_error ("Not a simple_lock\n");
1818 #endif /* USLOCK_DEBUG */
1819
1820 db_printf ("%s\n", simple_lock_labels);
1821 db_print_simple_lock (saddr);
1822 }
1823
1824 void
1825 db_print_simple_lock (
1826 simple_lock_t addr)
1827 {
1828
1829 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
1830 #if USLOCK_DEBUG
1831 db_printf (" %08x", addr->debug.lock_thread);
1832 db_printf (" %08x ", addr->debug.duration[1]);
1833 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
1834 #endif /* USLOCK_DEBUG */
1835 db_printf ("\n");
1836 }
1837
1838 void
1839 db_show_one_mutex (
1840 db_expr_t addr,
1841 boolean_t have_addr,
1842 db_expr_t count,
1843 char * modif)
1844 {
1845 mutex_t * maddr = (mutex_t *)addr;
1846
1847 if (maddr == (mutex_t *)0 || !have_addr)
1848 db_error ("No mutex\n");
1849 #if MACH_LDEBUG
1850 else if (maddr->type != MUTEX_TAG)
1851 db_error ("Not a mutex\n");
1852 #endif /* MACH_LDEBUG */
1853
1854 db_printf ("%s\n", mutex_labels);
1855 db_print_mutex (maddr);
1856 }
1857
1858 void
1859 db_print_mutex (
1860 mutex_t * addr)
1861 {
1862 db_printf ("%08x %6d %7d",
1863 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
1864 #if MACH_LDEBUG
1865 db_printf (" %08x ", addr->thread);
1866 db_printsym (addr->pc, DB_STGY_ANY);
1867 #endif /* MACH_LDEBUG */
1868 db_printf ("\n");
1869 }
1870
1871 #endif /* MACH_KDB */