]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/locks_i386.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / i386 / locks_i386.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 * File: kern/lock.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Locking primitives implementation
64 */
65
66 #include <mach_kdb.h>
67 #include <mach_ldebug.h>
68
69 #include <kern/lock.h>
70 #include <kern/locks.h>
71 #include <kern/kalloc.h>
72 #include <kern/misc_protos.h>
73 #include <kern/thread.h>
74 #include <kern/processor.h>
75 #include <kern/cpu_data.h>
76 #include <kern/cpu_number.h>
77 #include <kern/sched_prim.h>
78 #include <kern/xpr.h>
79 #include <kern/debug.h>
80 #include <string.h>
81
82 #if MACH_KDB
83 #include <ddb/db_command.h>
84 #include <ddb/db_output.h>
85 #include <ddb/db_sym.h>
86 #include <ddb/db_print.h>
87 #endif /* MACH_KDB */
88
89 #ifdef __ppc__
90 #include <ppc/Firmware.h>
91 #endif
92
93 #include <sys/kdebug.h>
94
95 #define LCK_RW_LCK_EXCLUSIVE_CODE 0x100
96 #define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101
97 #define LCK_RW_LCK_SHARED_CODE 0x102
98 #define LCK_RW_LCK_SH_TO_EX_CODE 0x103
99 #define LCK_RW_LCK_SH_TO_EX1_CODE 0x104
100 #define LCK_RW_LCK_EX_TO_SH_CODE 0x105
101
102
103 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
104
105 unsigned int LcksOpts=0;
106 unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ;
107
108 /* Forwards */
109
110 #if MACH_KDB
111 void db_print_simple_lock(
112 simple_lock_t addr);
113
114 void db_print_mutex(
115 mutex_t * addr);
116 #endif /* MACH_KDB */
117
118
119 #if USLOCK_DEBUG
120 /*
121 * Perform simple lock checks.
122 */
123 int uslock_check = 1;
124 int max_lock_loops = 100000000;
125 decl_simple_lock_data(extern , printf_lock)
126 decl_simple_lock_data(extern , panic_lock)
127 #if MACH_KDB
128 decl_simple_lock_data(extern , kdb_lock)
129 #endif /* MACH_KDB */
130 #endif /* USLOCK_DEBUG */
131
132
133 /*
134 * We often want to know the addresses of the callers
135 * of the various lock routines. However, this information
136 * is only used for debugging and statistics.
137 */
138 typedef void *pc_t;
139 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
140 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
141 #if ANY_LOCK_DEBUG
142 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
143 #define DECL_PC(pc) pc_t pc;
144 #else /* ANY_LOCK_DEBUG */
145 #define DECL_PC(pc)
146 #ifdef lint
147 /*
148 * Eliminate lint complaints about unused local pc variables.
149 */
150 #define OBTAIN_PC(pc,l) ++pc
151 #else /* lint */
152 #define OBTAIN_PC(pc,l)
153 #endif /* lint */
154 #endif /* USLOCK_DEBUG */
155
156
157 /*
158 * Portable lock package implementation of usimple_locks.
159 */
160
161 #if USLOCK_DEBUG
162 #define USLDBG(stmt) stmt
163 void usld_lock_init(usimple_lock_t, unsigned short);
164 void usld_lock_pre(usimple_lock_t, pc_t);
165 void usld_lock_post(usimple_lock_t, pc_t);
166 void usld_unlock(usimple_lock_t, pc_t);
167 void usld_lock_try_pre(usimple_lock_t, pc_t);
168 void usld_lock_try_post(usimple_lock_t, pc_t);
169 int usld_lock_common_checks(usimple_lock_t, char *);
170 #else /* USLOCK_DEBUG */
171 #define USLDBG(stmt)
172 #endif /* USLOCK_DEBUG */
173
174 /*
175 * Routine: lck_spin_alloc_init
176 */
177 lck_spin_t *
178 lck_spin_alloc_init(
179 lck_grp_t *grp,
180 lck_attr_t *attr)
181 {
182 lck_spin_t *lck;
183
184 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
185 lck_spin_init(lck, grp, attr);
186
187 return(lck);
188 }
189
190 /*
191 * Routine: lck_spin_free
192 */
193 void
194 lck_spin_free(
195 lck_spin_t *lck,
196 lck_grp_t *grp)
197 {
198 lck_spin_destroy(lck, grp);
199 kfree(lck, sizeof(lck_spin_t));
200 }
201
202 /*
203 * Routine: lck_spin_init
204 */
205 void
206 lck_spin_init(
207 lck_spin_t *lck,
208 lck_grp_t *grp,
209 __unused lck_attr_t *attr)
210 {
211 usimple_lock_init((usimple_lock_t) lck, 0);
212 lck_grp_reference(grp);
213 lck_grp_lckcnt_incr(grp, LCK_TYPE_SPIN);
214 }
215
216 /*
217 * Routine: lck_spin_destroy
218 */
219 void
220 lck_spin_destroy(
221 lck_spin_t *lck,
222 lck_grp_t *grp)
223 {
224 if (lck->lck_spin_data[0] == LCK_SPIN_TAG_DESTROYED)
225 return;
226 lck->lck_spin_data[0] = LCK_SPIN_TAG_DESTROYED;
227 lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN);
228 lck_grp_deallocate(grp);
229 return;
230 }
231
232 /*
233 * Routine: lck_spin_lock
234 */
235 void
236 lck_spin_lock(
237 lck_spin_t *lck)
238 {
239 usimple_lock((usimple_lock_t) lck);
240 }
241
242 /*
243 * Routine: lck_spin_unlock
244 */
245 void
246 lck_spin_unlock(
247 lck_spin_t *lck)
248 {
249 usimple_unlock((usimple_lock_t) lck);
250 }
251
252
253 /*
254 * Routine: lck_spin_try_lock
255 */
256 boolean_t
257 lck_spin_try_lock(
258 lck_spin_t *lck)
259 {
260 usimple_lock_try((usimple_lock_t) lck);
261 }
262
263 /*
264 * Initialize a usimple_lock.
265 *
266 * No change in preemption state.
267 */
268 void
269 usimple_lock_init(
270 usimple_lock_t l,
271 __unused unsigned short tag)
272 {
273 #ifndef MACHINE_SIMPLE_LOCK
274 USLDBG(usld_lock_init(l, tag));
275 hw_lock_init(&l->interlock);
276 #else
277 simple_lock_init((simple_lock_t)l,tag);
278 #endif
279 }
280
281
282 /*
283 * Acquire a usimple_lock.
284 *
285 * Returns with preemption disabled. Note
286 * that the hw_lock routines are responsible for
287 * maintaining preemption state.
288 */
289 void
290 usimple_lock(
291 usimple_lock_t l)
292 {
293 #ifndef MACHINE_SIMPLE_LOCK
294 pc_t pc = NULL;
295
296 OBTAIN_PC(pc, l);
297 USLDBG(usld_lock_pre(l, pc));
298
299 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
300 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc);
301
302 USLDBG(usld_lock_post(l, pc));
303 #else
304 simple_lock((simple_lock_t)l);
305 #endif
306 }
307
308
309 /*
310 * Release a usimple_lock.
311 *
312 * Returns with preemption enabled. Note
313 * that the hw_lock routines are responsible for
314 * maintaining preemption state.
315 */
316 void
317 usimple_unlock(
318 usimple_lock_t l)
319 {
320 #ifndef MACHINE_SIMPLE_LOCK
321 DECL_PC(pc);
322
323 OBTAIN_PC(pc, l);
324 USLDBG(usld_unlock(l, pc));
325 hw_lock_unlock(&l->interlock);
326 #else
327 simple_unlock_rwmb((simple_lock_t)l);
328 #endif
329 }
330
331
332 /*
333 * Conditionally acquire a usimple_lock.
334 *
335 * On success, returns with preemption disabled.
336 * On failure, returns with preemption in the same state
337 * as when first invoked. Note that the hw_lock routines
338 * are responsible for maintaining preemption state.
339 *
340 * XXX No stats are gathered on a miss; I preserved this
341 * behavior from the original assembly-language code, but
342 * doesn't it make sense to log misses? XXX
343 */
344 unsigned int
345 usimple_lock_try(
346 usimple_lock_t l)
347 {
348 #ifndef MACHINE_SIMPLE_LOCK
349 DECL_PC(pc);
350 unsigned int success;
351
352 OBTAIN_PC(pc, l);
353 USLDBG(usld_lock_try_pre(l, pc));
354 if ((success = hw_lock_try(&l->interlock))) {
355 USLDBG(usld_lock_try_post(l, pc));
356 }
357 return success;
358 #else
359 return(simple_lock_try((simple_lock_t)l));
360 #endif
361 }
362
363 #if USLOCK_DEBUG
364 /*
365 * States of a usimple_lock. The default when initializing
366 * a usimple_lock is setting it up for debug checking.
367 */
368 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
369 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
370 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
371 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
372 #define USLOCK_CHECKING(l) (uslock_check && \
373 ((l)->debug.state & USLOCK_CHECKED))
374
375 /*
376 * Trace activities of a particularly interesting lock.
377 */
378 void usl_trace(usimple_lock_t, int, pc_t, const char *);
379
380
381 /*
382 * Initialize the debugging information contained
383 * in a usimple_lock.
384 */
385 void
386 usld_lock_init(
387 usimple_lock_t l,
388 __unused unsigned short tag)
389 {
390 if (l == USIMPLE_LOCK_NULL)
391 panic("lock initialization: null lock pointer");
392 l->lock_type = USLOCK_TAG;
393 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
394 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
395 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
396 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
397 l->debug.duration[0] = l->debug.duration[1] = 0;
398 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
399 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
400 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
401 }
402
403
404 /*
405 * These checks apply to all usimple_locks, not just
406 * those with USLOCK_CHECKED turned on.
407 */
408 int
409 usld_lock_common_checks(
410 usimple_lock_t l,
411 char *caller)
412 {
413 if (l == USIMPLE_LOCK_NULL)
414 panic("%s: null lock pointer", caller);
415 if (l->lock_type != USLOCK_TAG)
416 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
417 if (!(l->debug.state & USLOCK_INIT))
418 panic("%s: 0x%x is not an initialized lock",
419 caller, (integer_t) l);
420 return USLOCK_CHECKING(l);
421 }
422
423
424 /*
425 * Debug checks on a usimple_lock just before attempting
426 * to acquire it.
427 */
428 /* ARGSUSED */
429 void
430 usld_lock_pre(
431 usimple_lock_t l,
432 pc_t pc)
433 {
434 char caller[] = "usimple_lock";
435
436
437 if (!usld_lock_common_checks(l, caller))
438 return;
439
440 /*
441 * Note that we have a weird case where we are getting a lock when we are]
442 * in the process of putting the system to sleep. We are running with no
443 * current threads, therefore we can't tell if we are trying to retake a lock
444 * we have or someone on the other processor has it. Therefore we just
445 * ignore this test if the locking thread is 0.
446 */
447
448 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
449 l->debug.lock_thread == (void *) current_thread()) {
450 printf("%s: lock 0x%x already locked (at 0x%x) by",
451 caller, (integer_t) l, l->debug.lock_pc);
452 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
453 l->debug.lock_thread, pc);
454 panic(caller);
455 }
456 mp_disable_preemption();
457 usl_trace(l, cpu_number(), pc, caller);
458 mp_enable_preemption();
459 }
460
461
462 /*
463 * Debug checks on a usimple_lock just after acquiring it.
464 *
465 * Pre-emption has been disabled at this point,
466 * so we are safe in using cpu_number.
467 */
468 void
469 usld_lock_post(
470 usimple_lock_t l,
471 pc_t pc)
472 {
473 register int mycpu;
474 char caller[] = "successful usimple_lock";
475
476
477 if (!usld_lock_common_checks(l, caller))
478 return;
479
480 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
481 panic("%s: lock 0x%x became uninitialized",
482 caller, (integer_t) l);
483 if ((l->debug.state & USLOCK_TAKEN))
484 panic("%s: lock 0x%x became TAKEN by someone else",
485 caller, (integer_t) l);
486
487 mycpu = cpu_number();
488 l->debug.lock_thread = (void *)current_thread();
489 l->debug.state |= USLOCK_TAKEN;
490 l->debug.lock_pc = pc;
491 l->debug.lock_cpu = mycpu;
492
493 usl_trace(l, mycpu, pc, caller);
494 }
495
496
497 /*
498 * Debug checks on a usimple_lock just before
499 * releasing it. Note that the caller has not
500 * yet released the hardware lock.
501 *
502 * Preemption is still disabled, so there's
503 * no problem using cpu_number.
504 */
505 void
506 usld_unlock(
507 usimple_lock_t l,
508 pc_t pc)
509 {
510 register int mycpu;
511 char caller[] = "usimple_unlock";
512
513
514 if (!usld_lock_common_checks(l, caller))
515 return;
516
517 mycpu = cpu_number();
518
519 if (!(l->debug.state & USLOCK_TAKEN))
520 panic("%s: lock 0x%x hasn't been taken",
521 caller, (integer_t) l);
522 if (l->debug.lock_thread != (void *) current_thread())
523 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
524 caller, (integer_t) l, l->debug.lock_thread);
525 if (l->debug.lock_cpu != mycpu) {
526 printf("%s: unlocking lock 0x%x on cpu 0x%x",
527 caller, (integer_t) l, mycpu);
528 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
529 panic(caller);
530 }
531 usl_trace(l, mycpu, pc, caller);
532
533 l->debug.unlock_thread = l->debug.lock_thread;
534 l->debug.lock_thread = INVALID_PC;
535 l->debug.state &= ~USLOCK_TAKEN;
536 l->debug.unlock_pc = pc;
537 l->debug.unlock_cpu = mycpu;
538 }
539
540
541 /*
542 * Debug checks on a usimple_lock just before
543 * attempting to acquire it.
544 *
545 * Preemption isn't guaranteed to be disabled.
546 */
547 void
548 usld_lock_try_pre(
549 usimple_lock_t l,
550 pc_t pc)
551 {
552 char caller[] = "usimple_lock_try";
553
554 if (!usld_lock_common_checks(l, caller))
555 return;
556 mp_disable_preemption();
557 usl_trace(l, cpu_number(), pc, caller);
558 mp_enable_preemption();
559 }
560
561
562 /*
563 * Debug checks on a usimple_lock just after
564 * successfully attempting to acquire it.
565 *
566 * Preemption has been disabled by the
567 * lock acquisition attempt, so it's safe
568 * to use cpu_number.
569 */
570 void
571 usld_lock_try_post(
572 usimple_lock_t l,
573 pc_t pc)
574 {
575 register int mycpu;
576 char caller[] = "successful usimple_lock_try";
577
578 if (!usld_lock_common_checks(l, caller))
579 return;
580
581 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
582 panic("%s: lock 0x%x became uninitialized",
583 caller, (integer_t) l);
584 if ((l->debug.state & USLOCK_TAKEN))
585 panic("%s: lock 0x%x became TAKEN by someone else",
586 caller, (integer_t) l);
587
588 mycpu = cpu_number();
589 l->debug.lock_thread = (void *) current_thread();
590 l->debug.state |= USLOCK_TAKEN;
591 l->debug.lock_pc = pc;
592 l->debug.lock_cpu = mycpu;
593
594 usl_trace(l, mycpu, pc, caller);
595 }
596
597
598 /*
599 * For very special cases, set traced_lock to point to a
600 * specific lock of interest. The result is a series of
601 * XPRs showing lock operations on that lock. The lock_seq
602 * value is used to show the order of those operations.
603 */
604 usimple_lock_t traced_lock;
605 unsigned int lock_seq;
606
607 void
608 usl_trace(
609 usimple_lock_t l,
610 int mycpu,
611 pc_t pc,
612 const char * op_name)
613 {
614 if (traced_lock == l) {
615 XPR(XPR_SLOCK,
616 "seq %d, cpu %d, %s @ %x\n",
617 (integer_t) lock_seq, (integer_t) mycpu,
618 (integer_t) op_name, (integer_t) pc, 0);
619 lock_seq++;
620 }
621 }
622
623
624 #endif /* USLOCK_DEBUG */
625
626 /*
627 * Routine: lock_alloc
628 * Function:
629 * Allocate a lock for external users who cannot
630 * hard-code the structure definition into their
631 * objects.
632 * For now just use kalloc, but a zone is probably
633 * warranted.
634 */
635 lock_t *
636 lock_alloc(
637 boolean_t can_sleep,
638 unsigned short tag,
639 unsigned short tag1)
640 {
641 lock_t *l;
642
643 if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0)
644 lock_init(l, can_sleep, tag, tag1);
645 return(l);
646 }
647
648 /*
649 * Routine: lock_free
650 * Function:
651 * Free a lock allocated for external users.
652 * For now just use kfree, but a zone is probably
653 * warranted.
654 */
655 void
656 lock_free(
657 lock_t *l)
658 {
659 kfree(l, sizeof(lock_t));
660 }
661
662
663 /*
664 * Routine: lock_init
665 * Function:
666 * Initialize a lock; required before use.
667 * Note that clients declare the "struct lock"
668 * variables and then initialize them, rather
669 * than getting a new one from this module.
670 */
671 void
672 lock_init(
673 lock_t *l,
674 boolean_t can_sleep,
675 __unused unsigned short tag,
676 unsigned short tag1)
677 {
678 (void) memset((void *) l, 0, sizeof(lock_t));
679
680 simple_lock_init(&l->interlock, tag1);
681 l->want_write = FALSE;
682 l->want_upgrade = FALSE;
683 l->read_count = 0;
684 l->can_sleep = can_sleep;
685 }
686
687
688 /*
689 * Sleep locks. These use the same data structure and algorithm
690 * as the spin locks, but the process sleeps while it is waiting
691 * for the lock. These work on uniprocessor systems.
692 */
693
694 #define DECREMENTER_TIMEOUT 1000000
695
696 void
697 lock_write(
698 register lock_t * l)
699 {
700 register int i;
701 boolean_t lock_miss = FALSE;
702 #if MACH_LDEBUG
703 int decrementer;
704 #endif /* MACH_LDEBUG */
705
706 simple_lock(&l->interlock);
707
708 #if MACH_LDEBUG
709 decrementer = DECREMENTER_TIMEOUT;
710 #endif /* MACH_LDEBUG */
711
712 /*
713 * Try to acquire the want_write bit.
714 */
715 while (l->want_write) {
716 if (!lock_miss) {
717 lock_miss = TRUE;
718 }
719
720 i = lock_wait_time[l->can_sleep ? 1 : 0];
721 if (i != 0) {
722 simple_unlock(&l->interlock);
723 #if MACH_LDEBUG
724 if (!--decrementer)
725 Debugger("timeout - want_write");
726 #endif /* MACH_LDEBUG */
727 while (--i != 0 && l->want_write)
728 continue;
729 simple_lock(&l->interlock);
730 }
731
732 if (l->can_sleep && l->want_write) {
733 l->waiting = TRUE;
734 thread_sleep_simple_lock((event_t) l,
735 simple_lock_addr(l->interlock),
736 THREAD_UNINT);
737 /* interlock relocked */
738 }
739 }
740 l->want_write = TRUE;
741
742 /* Wait for readers (and upgrades) to finish */
743
744 #if MACH_LDEBUG
745 decrementer = DECREMENTER_TIMEOUT;
746 #endif /* MACH_LDEBUG */
747 while ((l->read_count != 0) || l->want_upgrade) {
748 if (!lock_miss) {
749 lock_miss = TRUE;
750 }
751
752 i = lock_wait_time[l->can_sleep ? 1 : 0];
753 if (i != 0) {
754 simple_unlock(&l->interlock);
755 #if MACH_LDEBUG
756 if (!--decrementer)
757 Debugger("timeout - wait for readers");
758 #endif /* MACH_LDEBUG */
759 while (--i != 0 && (l->read_count != 0 ||
760 l->want_upgrade))
761 continue;
762 simple_lock(&l->interlock);
763 }
764
765 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
766 l->waiting = TRUE;
767 thread_sleep_simple_lock((event_t) l,
768 simple_lock_addr(l->interlock),
769 THREAD_UNINT);
770 /* interlock relocked */
771 }
772 }
773
774 simple_unlock(&l->interlock);
775 }
776
777 void
778 lock_done(
779 register lock_t * l)
780 {
781 boolean_t do_wakeup = FALSE;
782
783
784 simple_lock(&l->interlock);
785
786 if (l->read_count != 0) {
787 l->read_count--;
788 }
789 else
790 if (l->want_upgrade) {
791 l->want_upgrade = FALSE;
792 }
793 else {
794 l->want_write = FALSE;
795 }
796
797 /*
798 * There is no reason to wakeup a waiting thread
799 * if the read-count is non-zero. Consider:
800 * we must be dropping a read lock
801 * threads are waiting only if one wants a write lock
802 * if there are still readers, they can't proceed
803 */
804
805 if (l->waiting && (l->read_count == 0)) {
806 l->waiting = FALSE;
807 do_wakeup = TRUE;
808 }
809
810 simple_unlock(&l->interlock);
811
812 if (do_wakeup)
813 thread_wakeup((event_t) l);
814 }
815
816 void
817 lock_read(
818 register lock_t * l)
819 {
820 register int i;
821 #if MACH_LDEBUG
822 int decrementer;
823 #endif /* MACH_LDEBUG */
824
825 simple_lock(&l->interlock);
826
827 #if MACH_LDEBUG
828 decrementer = DECREMENTER_TIMEOUT;
829 #endif /* MACH_LDEBUG */
830 while (l->want_write || l->want_upgrade) {
831 i = lock_wait_time[l->can_sleep ? 1 : 0];
832
833 if (i != 0) {
834 simple_unlock(&l->interlock);
835 #if MACH_LDEBUG
836 if (!--decrementer)
837 Debugger("timeout - wait no writers");
838 #endif /* MACH_LDEBUG */
839 while (--i != 0 && (l->want_write || l->want_upgrade))
840 continue;
841 simple_lock(&l->interlock);
842 }
843
844 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
845 l->waiting = TRUE;
846 thread_sleep_simple_lock((event_t) l,
847 simple_lock_addr(l->interlock),
848 THREAD_UNINT);
849 /* interlock relocked */
850 }
851 }
852
853 l->read_count++;
854
855 simple_unlock(&l->interlock);
856 }
857
858
859 /*
860 * Routine: lock_read_to_write
861 * Function:
862 * Improves a read-only lock to one with
863 * write permission. If another reader has
864 * already requested an upgrade to a write lock,
865 * no lock is held upon return.
866 *
867 * Returns TRUE if the upgrade *failed*.
868 */
869
870 boolean_t
871 lock_read_to_write(
872 register lock_t * l)
873 {
874 register int i;
875 boolean_t do_wakeup = FALSE;
876 #if MACH_LDEBUG
877 int decrementer;
878 #endif /* MACH_LDEBUG */
879
880 simple_lock(&l->interlock);
881
882 l->read_count--;
883
884 if (l->want_upgrade) {
885 /*
886 * Someone else has requested upgrade.
887 * Since we've released a read lock, wake
888 * him up.
889 */
890 if (l->waiting && (l->read_count == 0)) {
891 l->waiting = FALSE;
892 do_wakeup = TRUE;
893 }
894
895 simple_unlock(&l->interlock);
896
897 if (do_wakeup)
898 thread_wakeup((event_t) l);
899 return (TRUE);
900 }
901
902 l->want_upgrade = TRUE;
903
904 #if MACH_LDEBUG
905 decrementer = DECREMENTER_TIMEOUT;
906 #endif /* MACH_LDEBUG */
907 while (l->read_count != 0) {
908 i = lock_wait_time[l->can_sleep ? 1 : 0];
909
910 if (i != 0) {
911 simple_unlock(&l->interlock);
912 #if MACH_LDEBUG
913 if (!--decrementer)
914 Debugger("timeout - read_count");
915 #endif /* MACH_LDEBUG */
916 while (--i != 0 && l->read_count != 0)
917 continue;
918 simple_lock(&l->interlock);
919 }
920
921 if (l->can_sleep && l->read_count != 0) {
922 l->waiting = TRUE;
923 thread_sleep_simple_lock((event_t) l,
924 simple_lock_addr(l->interlock),
925 THREAD_UNINT);
926 /* interlock relocked */
927 }
928 }
929
930 simple_unlock(&l->interlock);
931
932 return (FALSE);
933 }
934
935 void
936 lock_write_to_read(
937 register lock_t * l)
938 {
939 boolean_t do_wakeup = FALSE;
940
941 simple_lock(&l->interlock);
942
943 l->read_count++;
944 if (l->want_upgrade)
945 l->want_upgrade = FALSE;
946 else
947 l->want_write = FALSE;
948
949 if (l->waiting) {
950 l->waiting = FALSE;
951 do_wakeup = TRUE;
952 }
953
954 simple_unlock(&l->interlock);
955
956 if (do_wakeup)
957 thread_wakeup((event_t) l);
958 }
959
960
961 #if 0 /* Unused */
962 /*
963 * Routine: lock_try_write
964 * Function:
965 * Tries to get a write lock.
966 *
967 * Returns FALSE if the lock is not held on return.
968 */
969
970 boolean_t
971 lock_try_write(
972 register lock_t * l)
973 {
974 pc_t pc;
975
976 simple_lock(&l->interlock);
977
978 if (l->want_write || l->want_upgrade || l->read_count) {
979 /*
980 * Can't get lock.
981 */
982 simple_unlock(&l->interlock);
983 return(FALSE);
984 }
985
986 /*
987 * Have lock.
988 */
989
990 l->want_write = TRUE;
991
992 simple_unlock(&l->interlock);
993
994 return(TRUE);
995 }
996
997 /*
998 * Routine: lock_try_read
999 * Function:
1000 * Tries to get a read lock.
1001 *
1002 * Returns FALSE if the lock is not held on return.
1003 */
1004
1005 boolean_t
1006 lock_try_read(
1007 register lock_t * l)
1008 {
1009 pc_t pc;
1010
1011 simple_lock(&l->interlock);
1012
1013 if (l->want_write || l->want_upgrade) {
1014 simple_unlock(&l->interlock);
1015 return(FALSE);
1016 }
1017
1018 l->read_count++;
1019
1020 simple_unlock(&l->interlock);
1021
1022 return(TRUE);
1023 }
1024 #endif /* Unused */
1025
1026
1027 /*
1028 * Routine: lck_rw_alloc_init
1029 */
1030 lck_rw_t *
1031 lck_rw_alloc_init(
1032 lck_grp_t *grp,
1033 lck_attr_t *attr) {
1034 lck_rw_t *lck;
1035
1036 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0)
1037 lck_rw_init(lck, grp, attr);
1038
1039 return(lck);
1040 }
1041
1042 /*
1043 * Routine: lck_rw_free
1044 */
1045 void
1046 lck_rw_free(
1047 lck_rw_t *lck,
1048 lck_grp_t *grp) {
1049 lck_rw_destroy(lck, grp);
1050 kfree(lck, sizeof(lck_rw_t));
1051 }
1052
1053 /*
1054 * Routine: lck_rw_init
1055 */
1056 void
1057 lck_rw_init(
1058 lck_rw_t *lck,
1059 lck_grp_t *grp,
1060 __unused lck_attr_t *attr) {
1061
1062 hw_lock_init(&lck->interlock);
1063 lck->want_write = FALSE;
1064 lck->want_upgrade = FALSE;
1065 lck->read_count = 0;
1066 lck->can_sleep = TRUE;
1067 lck->lck_rw_tag = 0;
1068
1069 lck_grp_reference(grp);
1070 lck_grp_lckcnt_incr(grp, LCK_TYPE_RW);
1071 }
1072
1073 /*
1074 * Routine: lck_rw_destroy
1075 */
1076 void
1077 lck_rw_destroy(
1078 lck_rw_t *lck,
1079 lck_grp_t *grp) {
1080 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
1081 return;
1082 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
1083 lck_grp_lckcnt_decr(grp, LCK_TYPE_RW);
1084 lck_grp_deallocate(grp);
1085 return;
1086 }
1087
1088 /*
1089 * Sleep locks. These use the same data structure and algorithm
1090 * as the spin locks, but the process sleeps while it is waiting
1091 * for the lock. These work on uniprocessor systems.
1092 */
1093
1094 #define DECREMENTER_TIMEOUT 1000000
1095
1096
1097 /*
1098 * We need to disable interrupts while holding the mutex interlock
1099 * to prevent an IPI intervening.
1100 * Hence, local helper functions lck_interlock_lock()/lck_interlock_unlock().
1101 */
1102 static boolean_t
1103 lck_interlock_lock(lck_rw_t *lck)
1104 {
1105 boolean_t istate;
1106
1107 istate = ml_set_interrupts_enabled(FALSE);
1108 hw_lock_lock(&lck->interlock);
1109
1110 return istate;
1111 }
1112
1113 static void
1114 lck_interlock_unlock(lck_rw_t *lck, boolean_t istate)
1115 {
1116 hw_lock_unlock(&lck->interlock);
1117 ml_set_interrupts_enabled(istate);
1118 }
1119
1120 /*
1121 * Routine: lck_rw_lock_exclusive
1122 */
1123 void
1124 lck_rw_lock_exclusive(
1125 lck_rw_t *lck)
1126 {
1127 int i;
1128 boolean_t lock_miss = FALSE;
1129 wait_result_t res;
1130 #if MACH_LDEBUG
1131 int decrementer;
1132 #endif /* MACH_LDEBUG */
1133 boolean_t istate;
1134
1135 istate = lck_interlock_lock(lck);
1136
1137 #if MACH_LDEBUG
1138 decrementer = DECREMENTER_TIMEOUT;
1139 #endif /* MACH_LDEBUG */
1140
1141 /*
1142 * Try to acquire the want_write bit.
1143 */
1144 while (lck->want_write) {
1145 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1146
1147 if (!lock_miss) {
1148 lock_miss = TRUE;
1149 }
1150
1151 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1152 if (i != 0) {
1153 lck_interlock_unlock(lck, istate);
1154 #if MACH_LDEBUG
1155 if (!--decrementer)
1156 Debugger("timeout - want_write");
1157 #endif /* MACH_LDEBUG */
1158 while (--i != 0 && lck->want_write)
1159 continue;
1160 istate = lck_interlock_lock(lck);
1161 }
1162
1163 if (lck->can_sleep && lck->want_write) {
1164 lck->waiting = TRUE;
1165 res = assert_wait((event_t) lck, THREAD_UNINT);
1166 if (res == THREAD_WAITING) {
1167 lck_interlock_unlock(lck, istate);
1168 res = thread_block(THREAD_CONTINUE_NULL);
1169 istate = lck_interlock_lock(lck);
1170 }
1171 }
1172 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0);
1173 }
1174 lck->want_write = TRUE;
1175
1176 /* Wait for readers (and upgrades) to finish */
1177
1178 #if MACH_LDEBUG
1179 decrementer = DECREMENTER_TIMEOUT;
1180 #endif /* MACH_LDEBUG */
1181 while ((lck->read_count != 0) || lck->want_upgrade) {
1182 if (!lock_miss) {
1183 lock_miss = TRUE;
1184 }
1185
1186 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1187
1188 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START,
1189 (int)lck, lck->read_count, lck->want_upgrade, i, 0);
1190
1191 if (i != 0) {
1192 lck_interlock_unlock(lck, istate);
1193 #if MACH_LDEBUG
1194 if (!--decrementer)
1195 Debugger("timeout - wait for readers");
1196 #endif /* MACH_LDEBUG */
1197 while (--i != 0 && (lck->read_count != 0 ||
1198 lck->want_upgrade))
1199 continue;
1200 istate = lck_interlock_lock(lck);
1201 }
1202
1203 if (lck->can_sleep && (lck->read_count != 0 || lck->want_upgrade)) {
1204 lck->waiting = TRUE;
1205 res = assert_wait((event_t) lck, THREAD_UNINT);
1206 if (res == THREAD_WAITING) {
1207 lck_interlock_unlock(lck, istate);
1208 res = thread_block(THREAD_CONTINUE_NULL);
1209 istate = lck_interlock_lock(lck);
1210 }
1211 }
1212 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END,
1213 (int)lck, lck->read_count, lck->want_upgrade, res, 0);
1214 }
1215
1216 lck_interlock_unlock(lck, istate);
1217 }
1218
1219
1220 /*
1221 * Routine: lck_rw_done
1222 */
1223 lck_rw_type_t
1224 lck_rw_done(
1225 lck_rw_t *lck)
1226 {
1227 boolean_t do_wakeup = FALSE;
1228 lck_rw_type_t lck_rw_type;
1229 boolean_t istate;
1230
1231
1232 istate = lck_interlock_lock(lck);
1233
1234 if (lck->read_count != 0) {
1235 lck_rw_type = LCK_RW_TYPE_SHARED;
1236 lck->read_count--;
1237 }
1238 else {
1239 lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
1240 if (lck->want_upgrade)
1241 lck->want_upgrade = FALSE;
1242 else
1243 lck->want_write = FALSE;
1244 }
1245
1246 /*
1247 * There is no reason to wakeup a waiting thread
1248 * if the read-count is non-zero. Consider:
1249 * we must be dropping a read lock
1250 * threads are waiting only if one wants a write lock
1251 * if there are still readers, they can't proceed
1252 */
1253
1254 if (lck->waiting && (lck->read_count == 0)) {
1255 lck->waiting = FALSE;
1256 do_wakeup = TRUE;
1257 }
1258
1259 lck_interlock_unlock(lck, istate);
1260
1261 if (do_wakeup)
1262 thread_wakeup((event_t) lck);
1263 return(lck_rw_type);
1264 }
1265
1266
1267
1268
1269 /*
1270 * Routine: lck_rw_unlock
1271 */
1272 void
1273 lck_rw_unlock(
1274 lck_rw_t *lck,
1275 lck_rw_type_t lck_rw_type)
1276 {
1277 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1278 lck_rw_unlock_shared(lck);
1279 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1280 lck_rw_unlock_exclusive(lck);
1281 else
1282 panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type);
1283 }
1284
1285
1286 /*
1287 * Routine: lck_rw_unlock_shared
1288 */
1289 void
1290 lck_rw_unlock_shared(
1291 lck_rw_t *lck)
1292 {
1293 lck_rw_type_t ret;
1294
1295 ret = lck_rw_done(lck);
1296
1297 if (ret != LCK_RW_TYPE_SHARED)
1298 panic("lck_rw_unlock(): lock held in mode: %d\n", ret);
1299 }
1300
1301
1302 /*
1303 * Routine: lck_rw_unlock_exclusive
1304 */
1305 void
1306 lck_rw_unlock_exclusive(
1307 lck_rw_t *lck)
1308 {
1309 lck_rw_type_t ret;
1310
1311 ret = lck_rw_done(lck);
1312
1313 if (ret != LCK_RW_TYPE_EXCLUSIVE)
1314 panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret);
1315 }
1316
1317
1318 /*
1319 * Routine: lck_rw_lock
1320 */
1321 void
1322 lck_rw_lock(
1323 lck_rw_t *lck,
1324 lck_rw_type_t lck_rw_type)
1325 {
1326 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1327 lck_rw_lock_shared(lck);
1328 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1329 lck_rw_lock_exclusive(lck);
1330 else
1331 panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type);
1332 }
1333
1334
1335 /*
1336 * Routine: lck_rw_lock_shared
1337 */
1338 void
1339 lck_rw_lock_shared(
1340 lck_rw_t *lck)
1341 {
1342 int i;
1343 wait_result_t res;
1344 #if MACH_LDEBUG
1345 int decrementer;
1346 #endif /* MACH_LDEBUG */
1347 boolean_t istate;
1348
1349 istate = lck_interlock_lock(lck);
1350
1351 #if MACH_LDEBUG
1352 decrementer = DECREMENTER_TIMEOUT;
1353 #endif /* MACH_LDEBUG */
1354 while (lck->want_write || lck->want_upgrade) {
1355 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1356
1357 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START,
1358 (int)lck, lck->want_write, lck->want_upgrade, i, 0);
1359
1360 if (i != 0) {
1361 lck_interlock_unlock(lck, istate);
1362 #if MACH_LDEBUG
1363 if (!--decrementer)
1364 Debugger("timeout - wait no writers");
1365 #endif /* MACH_LDEBUG */
1366 while (--i != 0 && (lck->want_write || lck->want_upgrade))
1367 continue;
1368 istate = lck_interlock_lock(lck);
1369 }
1370
1371 if (lck->can_sleep && (lck->want_write || lck->want_upgrade)) {
1372 lck->waiting = TRUE;
1373 res = assert_wait((event_t) lck, THREAD_UNINT);
1374 if (res == THREAD_WAITING) {
1375 lck_interlock_unlock(lck, istate);
1376 res = thread_block(THREAD_CONTINUE_NULL);
1377 istate = lck_interlock_lock(lck);
1378 }
1379 }
1380 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END,
1381 (int)lck, lck->want_write, lck->want_upgrade, res, 0);
1382 }
1383
1384 lck->read_count++;
1385
1386 lck_interlock_unlock(lck, istate);
1387 }
1388
1389
1390 /*
1391 * Routine: lck_rw_lock_shared_to_exclusive
1392 * Function:
1393 * Improves a read-only lock to one with
1394 * write permission. If another reader has
1395 * already requested an upgrade to a write lock,
1396 * no lock is held upon return.
1397 *
1398 * Returns TRUE if the upgrade *failed*.
1399 */
1400
1401 boolean_t
1402 lck_rw_lock_shared_to_exclusive(
1403 lck_rw_t *lck)
1404 {
1405 int i;
1406 boolean_t do_wakeup = FALSE;
1407 wait_result_t res;
1408 #if MACH_LDEBUG
1409 int decrementer;
1410 #endif /* MACH_LDEBUG */
1411 boolean_t istate;
1412
1413 istate = lck_interlock_lock(lck);
1414
1415 lck->read_count--;
1416
1417 if (lck->want_upgrade) {
1418 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START,
1419 (int)lck, lck->read_count, lck->want_upgrade, 0, 0);
1420
1421 /*
1422 * Someone else has requested upgrade.
1423 * Since we've released a read lock, wake
1424 * him up.
1425 */
1426 if (lck->waiting && (lck->read_count == 0)) {
1427 lck->waiting = FALSE;
1428 do_wakeup = TRUE;
1429 }
1430
1431 lck_interlock_unlock(lck, istate);
1432
1433 if (do_wakeup)
1434 thread_wakeup((event_t) lck);
1435
1436 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END,
1437 (int)lck, lck->read_count, lck->want_upgrade, 0, 0);
1438
1439 return (TRUE);
1440 }
1441
1442 lck->want_upgrade = TRUE;
1443
1444 #if MACH_LDEBUG
1445 decrementer = DECREMENTER_TIMEOUT;
1446 #endif /* MACH_LDEBUG */
1447 while (lck->read_count != 0) {
1448 i = lock_wait_time[lck->can_sleep ? 1 : 0];
1449
1450 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START,
1451 (int)lck, lck->read_count, i, 0, 0);
1452
1453 if (i != 0) {
1454 lck_interlock_unlock(lck, istate);
1455 #if MACH_LDEBUG
1456 if (!--decrementer)
1457 Debugger("timeout - read_count");
1458 #endif /* MACH_LDEBUG */
1459 while (--i != 0 && lck->read_count != 0)
1460 continue;
1461 istate = lck_interlock_lock(lck);
1462 }
1463
1464 if (lck->can_sleep && lck->read_count != 0) {
1465 lck->waiting = TRUE;
1466 res = assert_wait((event_t) lck, THREAD_UNINT);
1467 if (res == THREAD_WAITING) {
1468 lck_interlock_unlock(lck, istate);
1469 res = thread_block(THREAD_CONTINUE_NULL);
1470 istate = lck_interlock_lock(lck);
1471 }
1472 }
1473 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END,
1474 (int)lck, lck->read_count, 0, 0, 0);
1475 }
1476
1477 lck_interlock_unlock(lck, istate);
1478
1479 return (FALSE);
1480 }
1481
1482 /*
1483 * Routine: lck_rw_lock_exclusive_to_shared
1484 */
1485 void
1486 lck_rw_lock_exclusive_to_shared(
1487 lck_rw_t *lck)
1488 {
1489 boolean_t do_wakeup = FALSE;
1490 boolean_t istate;
1491
1492 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START,
1493 (int)lck, lck->want_write, lck->want_upgrade, 0, 0);
1494
1495 istate = lck_interlock_lock(lck);
1496
1497 lck->read_count++;
1498 if (lck->want_upgrade)
1499 lck->want_upgrade = FALSE;
1500 else
1501 lck->want_write = FALSE;
1502
1503 if (lck->waiting) {
1504 lck->waiting = FALSE;
1505 do_wakeup = TRUE;
1506 }
1507
1508 lck_interlock_unlock(lck, istate);
1509
1510 if (do_wakeup)
1511 thread_wakeup((event_t) lck);
1512
1513 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END,
1514 (int)lck, lck->want_write, lck->want_upgrade, lck->read_count, 0);
1515
1516 }
1517
1518
1519 /*
1520 * Routine: lck_rw_try_lock
1521 */
1522 boolean_t
1523 lck_rw_try_lock(
1524 lck_rw_t *lck,
1525 lck_rw_type_t lck_rw_type)
1526 {
1527 if (lck_rw_type == LCK_RW_TYPE_SHARED)
1528 return(lck_rw_try_lock_shared(lck));
1529 else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
1530 return(lck_rw_try_lock_exclusive(lck));
1531 else
1532 panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type);
1533 return(FALSE);
1534 }
1535
1536 /*
1537 * Routine: lck_rw_try_lock_exclusive
1538 * Function:
1539 * Tries to get a write lock.
1540 *
1541 * Returns FALSE if the lock is not held on return.
1542 */
1543
1544 boolean_t
1545 lck_rw_try_lock_exclusive(
1546 lck_rw_t *lck)
1547 {
1548 boolean_t istate;
1549
1550 istate = lck_interlock_lock(lck);
1551
1552 if (lck->want_write || lck->want_upgrade || lck->read_count) {
1553 /*
1554 * Can't get lock.
1555 */
1556 lck_interlock_unlock(lck, istate);
1557 return(FALSE);
1558 }
1559
1560 /*
1561 * Have lock.
1562 */
1563
1564 lck->want_write = TRUE;
1565
1566 lck_interlock_unlock(lck, istate);
1567
1568 return(TRUE);
1569 }
1570
1571 /*
1572 * Routine: lck_rw_try_lock_shared
1573 * Function:
1574 * Tries to get a read lock.
1575 *
1576 * Returns FALSE if the lock is not held on return.
1577 */
1578
1579 boolean_t
1580 lck_rw_try_lock_shared(
1581 lck_rw_t *lck)
1582 {
1583 boolean_t istate;
1584
1585 istate = lck_interlock_lock(lck);
1586
1587 if (lck->want_write || lck->want_upgrade) {
1588 lck_interlock_unlock(lck, istate);
1589 return(FALSE);
1590 }
1591
1592 lck->read_count++;
1593
1594 lck_interlock_unlock(lck, istate);
1595
1596 return(TRUE);
1597 }
1598
1599 /*
1600 * Routine: lck_mtx_alloc_init
1601 */
1602 lck_mtx_t *
1603 lck_mtx_alloc_init(
1604 lck_grp_t *grp,
1605 lck_attr_t *attr)
1606 {
1607 lck_mtx_t *lck;
1608
1609 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1610 lck_mtx_init(lck, grp, attr);
1611
1612 return(lck);
1613 }
1614
1615 /*
1616 * Routine: lck_mtx_free
1617 */
1618 void
1619 lck_mtx_free(
1620 lck_mtx_t *lck,
1621 lck_grp_t *grp)
1622 {
1623 lck_mtx_destroy(lck, grp);
1624 kfree(lck, sizeof(lck_mtx_t));
1625 }
1626
1627 /*
1628 * Routine: lck_mtx_ext_init
1629 */
1630 static void
1631 lck_mtx_ext_init(
1632 lck_mtx_ext_t *lck,
1633 lck_grp_t *grp,
1634 lck_attr_t *attr)
1635 {
1636 lck->lck_mtx.lck_mtx_ilk = 0;
1637 lck->lck_mtx.lck_mtx_locked = 0;
1638 lck->lck_mtx.lck_mtx_waiters = 0;
1639 lck->lck_mtx.lck_mtx_pri = 0;
1640 lck->lck_mtx_attr = 0;
1641
1642 if ((attr->lck_attr_val) & LCK_ATTR_DEBUG) {
1643 lck->lck_mtx_deb.pc = 0;
1644 lck->lck_mtx_deb.thread = 0;
1645 lck->lck_mtx_deb.type = MUTEX_TAG;
1646 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1647 }
1648
1649 lck->lck_mtx_grp = grp;
1650 }
1651
1652 /*
1653 * Routine: lck_mtx_init
1654 */
1655 void
1656 lck_mtx_init(
1657 lck_mtx_t *lck,
1658 lck_grp_t *grp,
1659 lck_attr_t *attr)
1660 {
1661 lck_mtx_ext_t *lck_ext;
1662
1663 if ((attr != LCK_ATTR_NULL) && ((attr->lck_attr_val) & LCK_ATTR_DEBUG)) {
1664 if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) {
1665 lck_mtx_ext_init(lck_ext, grp, attr);
1666 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1667 lck->lck_mtx_ptr = lck_ext;
1668 }
1669 } else {
1670 lck->lck_mtx_ilk = 0;
1671 lck->lck_mtx_locked = 0;
1672 lck->lck_mtx_waiters = 0;
1673 lck->lck_mtx_pri = 0;
1674 }
1675 lck_grp_reference(grp);
1676 lck_grp_lckcnt_incr(grp, LCK_TYPE_MTX);
1677 }
1678
1679 /*
1680 * Routine: lck_mtx_destroy
1681 */
1682 void
1683 lck_mtx_destroy(
1684 lck_mtx_t *lck,
1685 lck_grp_t *grp)
1686 {
1687 boolean_t lck_is_indirect;
1688
1689 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1690 return;
1691 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1692 lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED;
1693 if (lck_is_indirect)
1694 kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));
1695 lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX);
1696 lck_grp_deallocate(grp);
1697 return;
1698 }
1699
1700 /*
1701 * Routine: lck_mtx_assert
1702 */
1703 void
1704 lck_mtx_assert(
1705 __unused lck_mtx_t *lck,
1706 __unused unsigned int type)
1707 {
1708 }
1709
1710 #if MACH_KDB
1711
1712 void db_show_one_lock(lock_t *);
1713
1714 void
1715 db_show_one_lock(
1716 lock_t *lock)
1717 {
1718 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1719 lock->read_count,
1720 lock->want_upgrade ? "" : "!",
1721 lock->want_write ? "" : "!");
1722 db_printf("%swaiting, %scan_sleep\n",
1723 lock->waiting ? "" : "!", lock->can_sleep ? "" : "!");
1724 db_printf("Interlock:\n");
1725 db_show_one_simple_lock((db_expr_t)simple_lock_addr(lock->interlock),
1726 TRUE, (db_expr_t)0, (char *)0);
1727 }
1728
1729 #endif /* MACH_KDB */
1730
1731 /*
1732 * The C portion of the mutex package. These routines are only invoked
1733 * if the optimized assembler routines can't do the work.
1734 */
1735
1736 /*
1737 * Routine: lock_alloc
1738 * Function:
1739 * Allocate a mutex for external users who cannot
1740 * hard-code the structure definition into their
1741 * objects.
1742 * For now just use kalloc, but a zone is probably
1743 * warranted.
1744 */
1745 mutex_t *
1746 mutex_alloc(
1747 unsigned short tag)
1748 {
1749 mutex_t *m;
1750
1751 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1752 mutex_init(m, tag);
1753 return(m);
1754 }
1755
1756 /*
1757 * Routine: mutex_free
1758 * Function:
1759 * Free a mutex allocated for external users.
1760 * For now just use kfree, but a zone is probably
1761 * warranted.
1762 */
1763 void
1764 mutex_free(
1765 mutex_t *m)
1766 {
1767 kfree(m, sizeof(mutex_t));
1768 }
1769
1770 /*
1771 * Routine: _mutex_assert
1772 */
1773 void
1774 _mutex_assert (
1775 mutex_t *mutex,
1776 unsigned int what)
1777 {
1778
1779 thread_t thread = current_thread();
1780 thread_t holder;
1781
1782 if (panicstr != NULL)
1783 return;
1784
1785 holder = (thread_t) mutex->lck_mtx.lck_mtx_locked;
1786
1787 switch (what) {
1788 case MA_OWNED:
1789 if (thread != holder)
1790 panic("mutex %x not owned\n", mutex);
1791 break;
1792
1793 case MA_NOTOWNED:
1794 if (thread == holder)
1795 panic("mutex %x owned\n", mutex);
1796 break;
1797 }
1798
1799 }
1800
1801 #if MACH_KDB
1802 /*
1803 * Routines to print out simple_locks and mutexes in a nicely-formatted
1804 * fashion.
1805 */
1806
1807 char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1808 char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1809
1810 void
1811 db_show_one_simple_lock (
1812 db_expr_t addr,
1813 boolean_t have_addr,
1814 db_expr_t count,
1815 char * modif)
1816 {
1817 simple_lock_t saddr = (simple_lock_t)addr;
1818
1819 if (saddr == (simple_lock_t)0 || !have_addr) {
1820 db_error ("No simple_lock\n");
1821 }
1822 #if USLOCK_DEBUG
1823 else if (saddr->lock_type != USLOCK_TAG)
1824 db_error ("Not a simple_lock\n");
1825 #endif /* USLOCK_DEBUG */
1826
1827 db_printf ("%s\n", simple_lock_labels);
1828 db_print_simple_lock (saddr);
1829 }
1830
1831 void
1832 db_print_simple_lock (
1833 simple_lock_t addr)
1834 {
1835
1836 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
1837 #if USLOCK_DEBUG
1838 db_printf (" %08x", addr->debug.lock_thread);
1839 db_printf (" %08x ", addr->debug.duration[1]);
1840 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
1841 #endif /* USLOCK_DEBUG */
1842 db_printf ("\n");
1843 }
1844
1845 void
1846 db_show_one_mutex (
1847 db_expr_t addr,
1848 boolean_t have_addr,
1849 db_expr_t count,
1850 char * modif)
1851 {
1852 mutex_t * maddr = (mutex_t *)addr;
1853
1854 if (maddr == (mutex_t *)0 || !have_addr)
1855 db_error ("No mutex\n");
1856 #if MACH_LDEBUG
1857 else if (maddr->type != MUTEX_TAG)
1858 db_error ("Not a mutex\n");
1859 #endif /* MACH_LDEBUG */
1860
1861 db_printf ("%s\n", mutex_labels);
1862 db_print_mutex (maddr);
1863 }
1864
1865 void
1866 db_print_mutex (
1867 mutex_t * addr)
1868 {
1869 db_printf ("%08x %6d %7d",
1870 addr, *addr, addr->lck_mtx.lck_mtx_waiters);
1871 #if MACH_LDEBUG
1872 db_printf (" %08x ", addr->thread);
1873 db_printsym (addr->pc, DB_STGY_ANY);
1874 #endif /* MACH_LDEBUG */
1875 db_printf ("\n");
1876 }
1877
1878 #endif /* MACH_KDB */