2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 * Locking primitives implementation
63 #include <mach_ldebug.h>
65 #include <kern/lock.h>
66 #include <kern/etap_macros.h>
67 #include <kern/misc_protos.h>
68 #include <kern/thread.h>
69 #include <kern/processor.h>
70 #include <kern/sched_prim.h>
72 #include <kern/debug.h>
76 #include <ddb/db_command.h>
77 #include <ddb/db_output.h>
78 #include <ddb/db_sym.h>
79 #include <ddb/db_print.h>
83 #include <ppc/Firmware.h>
84 #include <ppc/POWERMAC/mp/MPPlugIn.h>
87 #include <sys/kdebug.h>
89 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
92 * Some portions of the lock debugging code must run with
93 * interrupts disabled. This can be machine-dependent,
94 * but we don't have any good hooks for that at the moment.
95 * If your architecture is different, add a machine-dependent
96 * ifdef here for these macros. XXX
99 #define DISABLE_INTERRUPTS(s) s = ml_set_interrupts_enabled(FALSE)
100 #define ENABLE_INTERRUPTS(s) (void)ml_set_interrupts_enabled(s)
103 /* Time we loop without holding the interlock.
104 * The former is for when we cannot sleep, the latter
105 * for when our thread can go to sleep (loop less)
106 * we shouldn't retake the interlock at all frequently
107 * if we cannot go to sleep, since it interferes with
108 * any other processors. In particular, 100 is too small
109 * a number for powerpc MP systems because of cache
110 * coherency issues and differing lock fetch times between
113 unsigned int lock_wait_time
[2] = { (unsigned int)-1, 100 } ;
114 #else /* NCPUS > 1 */
117 * It is silly to spin on a uni-processor as if we
118 * thought something magical would happen to the
119 * want_write bit while we are executing.
122 unsigned int lock_wait_time
[2] = { 0, 0 };
123 #endif /* NCPUS > 1 */
128 void db_print_simple_lock(
133 #endif /* MACH_KDB */
138 * Perform simple lock checks.
140 int uslock_check
= 1;
141 int max_lock_loops
= 100000000;
142 decl_simple_lock_data(extern , printf_lock
)
143 decl_simple_lock_data(extern , panic_lock
)
144 #if MACH_KDB && NCPUS > 1
145 decl_simple_lock_data(extern , kdb_lock
)
146 #endif /* MACH_KDB && NCPUS >1 */
147 #endif /* USLOCK_DEBUG */
151 * We often want to know the addresses of the callers
152 * of the various lock routines. However, this information
153 * is only used for debugging and statistics.
156 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
157 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
158 #if ANY_LOCK_DEBUG || ETAP_LOCK_TRACE
159 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
160 #else /* ANY_LOCK_DEBUG || ETAP_LOCK_TRACE */
163 * Eliminate lint complaints about unused local pc variables.
165 #define OBTAIN_PC(pc,l) ++pc
167 #define OBTAIN_PC(pc,l)
169 #endif /* USLOCK_DEBUG || ETAP_LOCK_TRACE */
172 /* #ifndef USIMPLE_LOCK_CALLS
173 * The i386 production version of usimple_locks isn't ready yet.
176 * Portable lock package implementation of usimple_locks.
180 #define ETAPCALL(stmt) stmt
181 void etap_simplelock_init(simple_lock_t
, etap_event_t
);
182 void etap_simplelock_unlock(simple_lock_t
);
183 void etap_simplelock_hold(simple_lock_t
, pc_t
, etap_time_t
);
184 etap_time_t
etap_simplelock_miss(simple_lock_t
);
186 void etap_mutex_init(mutex_t
*, etap_event_t
);
187 void etap_mutex_unlock(mutex_t
*);
188 void etap_mutex_hold(mutex_t
*, pc_t
, etap_time_t
);
189 etap_time_t
etap_mutex_miss(mutex_t
*);
190 #else /* ETAP_LOCK_TRACE */
191 #define ETAPCALL(stmt)
192 #endif /* ETAP_LOCK_TRACE */
195 #define USLDBG(stmt) stmt
196 void usld_lock_init(usimple_lock_t
, etap_event_t
);
197 void usld_lock_pre(usimple_lock_t
, pc_t
);
198 void usld_lock_post(usimple_lock_t
, pc_t
);
199 void usld_unlock(usimple_lock_t
, pc_t
);
200 void usld_lock_try_pre(usimple_lock_t
, pc_t
);
201 void usld_lock_try_post(usimple_lock_t
, pc_t
);
202 void usld_lock_held(usimple_lock_t
);
203 void usld_lock_none_held(void);
204 int usld_lock_common_checks(usimple_lock_t
, char *);
205 #else /* USLOCK_DEBUG */
207 #endif /* USLOCK_DEBUG */
210 * Initialize a usimple_lock.
212 * No change in preemption state.
219 USLDBG(usld_lock_init(l
, event
));
220 ETAPCALL(etap_simplelock_init((l
),(event
)));
221 hw_lock_init(&l
->interlock
);
226 * Acquire a usimple_lock.
228 * Returns with preemption disabled. Note
229 * that the hw_lock routines are responsible for
230 * maintaining preemption state.
239 etap_time_t start_wait_time
;
240 int no_miss_info
= 0;
241 #endif /* ETAP_LOCK_TRACE */
244 #endif /* USLOCK_DEBUG */
247 USLDBG(usld_lock_pre(l
, pc
));
249 ETAP_TIME_CLEAR(start_wait_time
);
250 #endif /* ETAP_LOCK_TRACE */
252 if(!hw_lock_to(&l
->interlock
, LockTimeOut
)) /* Try to get the lock with a timeout */
253 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l
, cpu_number(), pc
);
255 ETAPCALL(etap_simplelock_hold(l
, pc
, start_wait_time
));
256 USLDBG(usld_lock_post(l
, pc
));
261 * Release a usimple_lock.
263 * Returns with preemption enabled. Note
264 * that the hw_lock routines are responsible for
265 * maintaining preemption state.
273 // checkNMI(); /* (TEST/DEBUG) */
276 USLDBG(usld_unlock(l
, pc
));
277 ETAPCALL(etap_simplelock_unlock(l
));
278 hw_lock_unlock(&l
->interlock
);
283 * Conditionally acquire a usimple_lock.
285 * On success, returns with preemption disabled.
286 * On failure, returns with preemption in the same state
287 * as when first invoked. Note that the hw_lock routines
288 * are responsible for maintaining preemption state.
290 * XXX No stats are gathered on a miss; I preserved this
291 * behavior from the original assembly-language code, but
292 * doesn't it make sense to log misses? XXX
299 unsigned int success
;
300 etap_time_t zero_time
;
303 USLDBG(usld_lock_try_pre(l
, pc
));
304 if (success
= hw_lock_try(&l
->interlock
)) {
305 USLDBG(usld_lock_try_post(l
, pc
));
306 ETAP_TIME_CLEAR(zero_time
);
307 ETAPCALL(etap_simplelock_hold(l
, pc
, zero_time
));
314 simple_lock_no_trace(
320 USLDBG(usld_lock_pre(l
, pc
));
321 while (!hw_lock_try(&l
->interlock
)) {
322 while (hw_lock_held(&l
->interlock
)) {
324 * Spin watching the lock value in cache,
325 * without consuming external bus cycles.
326 * On most SMP architectures, the atomic
327 * instruction(s) used by hw_lock_try
328 * cost much, much more than an ordinary
333 USLDBG(usld_lock_post(l
, pc
));
337 simple_unlock_no_trace(
343 USLDBG(usld_unlock(l
, pc
));
344 hw_lock_unlock(&l
->interlock
);
348 simple_lock_try_no_trace(
352 unsigned int success
;
355 USLDBG(usld_lock_try_pre(l
, pc
));
356 if (success
= hw_lock_try(&l
->interlock
)) {
357 USLDBG(usld_lock_try_post(l
, pc
));
361 #endif /* ETAP_LOCK_TRACE */
366 * Verify that the lock is locked and owned by
367 * the current thread.
378 * Verify that no usimple_locks are held by
379 * this processor. Typically used in a
380 * trap handler when returning to user mode
381 * or in a path known to relinquish the processor.
384 usimple_lock_none_held(void)
386 usld_lock_none_held();
388 #endif /* USLOCK_DEBUG */
393 * States of a usimple_lock. The default when initializing
394 * a usimple_lock is setting it up for debug checking.
396 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
397 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
398 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
399 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
400 #define USLOCK_CHECKING(l) (uslock_check && \
401 ((l)->debug.state & USLOCK_CHECKED))
404 * Maintain a per-cpu stack of acquired usimple_locks.
406 void usl_stack_push(usimple_lock_t
, int);
407 void usl_stack_pop(usimple_lock_t
, int);
410 * Trace activities of a particularly interesting lock.
412 void usl_trace(usimple_lock_t
, int, pc_t
, const char *);
416 * Initialize the debugging information contained
424 if (l
== USIMPLE_LOCK_NULL
)
425 panic("lock initialization: null lock pointer");
426 l
->lock_type
= USLOCK_TAG
;
427 l
->debug
.state
= uslock_check
? USLOCK_INITIALIZED
: 0;
428 l
->debug
.lock_cpu
= l
->debug
.unlock_cpu
= 0;
429 l
->debug
.lock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
430 l
->debug
.lock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
431 l
->debug
.duration
[0] = l
->debug
.duration
[1] = 0;
432 l
->debug
.unlock_cpu
= l
->debug
.unlock_cpu
= 0;
433 l
->debug
.unlock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
434 l
->debug
.unlock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
439 * These checks apply to all usimple_locks, not just
440 * those with USLOCK_CHECKED turned on.
443 usld_lock_common_checks(
447 if (l
== USIMPLE_LOCK_NULL
)
448 panic("%s: null lock pointer", caller
);
449 if (l
->lock_type
!= USLOCK_TAG
)
450 panic("%s: 0x%x is not a usimple lock", caller
, (integer_t
) l
);
451 if (!(l
->debug
.state
& USLOCK_INIT
))
452 panic("%s: 0x%x is not an initialized lock",
453 caller
, (integer_t
) l
);
454 return USLOCK_CHECKING(l
);
459 * Debug checks on a usimple_lock just before attempting
468 char *caller
= "usimple_lock";
472 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
474 l
->debug
.lock_thread
,
477 l
->debug
.unlock_thread
,
483 if (!usld_lock_common_checks(l
, caller
))
487 * Note that we have a weird case where we are getting a lock when we are]
488 * in the process of putting the system to sleep. We are running with no
489 * current threads, therefore we can't tell if we are trying to retake a lock
490 * we have or someone on the other processor has it. Therefore we just
491 * ignore this test if the locking thread is 0.
494 if ((l
->debug
.state
& USLOCK_TAKEN
) && l
->debug
.lock_thread
&&
495 l
->debug
.lock_thread
== (void *) current_thread()) {
496 printf("%s: lock 0x%x already locked (at 0x%x) by",
497 caller
, (integer_t
) l
, l
->debug
.lock_pc
);
498 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
499 l
->debug
.lock_thread
, pc
);
502 mp_disable_preemption();
503 usl_trace(l
, cpu_number(), pc
, caller
);
504 mp_enable_preemption();
509 * Debug checks on a usimple_lock just after acquiring it.
511 * Pre-emption has been disabled at this point,
512 * so we are safe in using cpu_number.
520 char *caller
= "successful usimple_lock";
524 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
526 l
->debug
.lock_thread
,
529 l
->debug
.unlock_thread
,
535 if (!usld_lock_common_checks(l
, caller
))
538 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
539 panic("%s: lock 0x%x became uninitialized",
540 caller
, (integer_t
) l
);
541 if ((l
->debug
.state
& USLOCK_TAKEN
))
542 panic("%s: lock 0x%x became TAKEN by someone else",
543 caller
, (integer_t
) l
);
545 mycpu
= cpu_number();
546 l
->debug
.lock_thread
= (void *)current_thread();
547 l
->debug
.state
|= USLOCK_TAKEN
;
548 l
->debug
.lock_pc
= pc
;
549 l
->debug
.lock_cpu
= mycpu
;
551 usl_stack_push(l
, mycpu
);
552 usl_trace(l
, mycpu
, pc
, caller
);
557 * Debug checks on a usimple_lock just before
558 * releasing it. Note that the caller has not
559 * yet released the hardware lock.
561 * Preemption is still disabled, so there's
562 * no problem using cpu_number.
570 char *caller
= "usimple_unlock";
574 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
576 l
->debug
.lock_thread
,
579 l
->debug
.unlock_thread
,
585 if (!usld_lock_common_checks(l
, caller
))
588 mycpu
= cpu_number();
590 if (!(l
->debug
.state
& USLOCK_TAKEN
))
591 panic("%s: lock 0x%x hasn't been taken",
592 caller
, (integer_t
) l
);
593 if (l
->debug
.lock_thread
!= (void *) current_thread())
594 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
595 caller
, (integer_t
) l
, l
->debug
.lock_thread
);
596 if (l
->debug
.lock_cpu
!= mycpu
) {
597 printf("%s: unlocking lock 0x%x on cpu 0x%x",
598 caller
, (integer_t
) l
, mycpu
);
599 printf(" (acquired on cpu 0x%x)\n", l
->debug
.lock_cpu
);
602 usl_trace(l
, mycpu
, pc
, caller
);
603 usl_stack_pop(l
, mycpu
);
605 l
->debug
.unlock_thread
= l
->debug
.lock_thread
;
606 l
->debug
.lock_thread
= INVALID_PC
;
607 l
->debug
.state
&= ~USLOCK_TAKEN
;
608 l
->debug
.unlock_pc
= pc
;
609 l
->debug
.unlock_cpu
= mycpu
;
614 * Debug checks on a usimple_lock just before
615 * attempting to acquire it.
617 * Preemption isn't guaranteed to be disabled.
624 char *caller
= "usimple_lock_try";
626 if (!usld_lock_common_checks(l
, caller
))
628 mp_disable_preemption();
629 usl_trace(l
, cpu_number(), pc
, caller
);
630 mp_enable_preemption();
635 * Debug checks on a usimple_lock just after
636 * successfully attempting to acquire it.
638 * Preemption has been disabled by the
639 * lock acquisition attempt, so it's safe
648 char *caller
= "successful usimple_lock_try";
650 if (!usld_lock_common_checks(l
, caller
))
653 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
654 panic("%s: lock 0x%x became uninitialized",
655 caller
, (integer_t
) l
);
656 if ((l
->debug
.state
& USLOCK_TAKEN
))
657 panic("%s: lock 0x%x became TAKEN by someone else",
658 caller
, (integer_t
) l
);
660 mycpu
= cpu_number();
661 l
->debug
.lock_thread
= (void *) current_thread();
662 l
->debug
.state
|= USLOCK_TAKEN
;
663 l
->debug
.lock_pc
= pc
;
664 l
->debug
.lock_cpu
= mycpu
;
667 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
669 l
->debug
.lock_thread
,
672 l
->debug
.unlock_thread
,
678 usl_stack_push(l
, mycpu
);
679 usl_trace(l
, mycpu
, pc
, caller
);
684 * Determine whether the lock in question is owned
685 * by the current thread.
691 char *caller
= "usimple_lock_held";
695 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
697 l
->debug
.lock_thread
,
700 l
->debug
.unlock_thread
,
706 if (!usld_lock_common_checks(l
, caller
))
709 if (!(l
->debug
.state
& USLOCK_TAKEN
))
710 panic("%s: lock 0x%x hasn't been taken",
711 caller
, (integer_t
) l
);
712 if (l
->debug
.lock_thread
!= (void *) current_thread())
713 panic("%s: lock 0x%x is owned by thread 0x%x", caller
,
714 (integer_t
) l
, (integer_t
) l
->debug
.lock_thread
);
717 * The usimple_lock is active, so preemption
718 * is disabled and the current cpu should
719 * match the one recorded at lock acquisition time.
721 if (l
->debug
.lock_cpu
!= cpu_number())
722 panic("%s: current cpu 0x%x isn't acquiring cpu 0x%x",
723 caller
, cpu_number(), (integer_t
) l
->debug
.lock_cpu
);
728 * Per-cpu stack of currently active usimple_locks.
729 * Requires spl protection so that interrupt-level
730 * locks plug-n-play with their thread-context friends.
732 #define USLOCK_STACK_DEPTH 20
733 usimple_lock_t uslock_stack
[NCPUS
][USLOCK_STACK_DEPTH
];
734 unsigned int uslock_stack_index
[NCPUS
];
735 boolean_t uslock_stack_enabled
= FALSE
;
739 * Record a usimple_lock just acquired on
740 * the current processor.
742 * Preemption has been disabled by lock
743 * acquisition, so it's safe to use the cpu number
744 * specified by the caller.
753 if (uslock_stack_enabled
== FALSE
)
756 DISABLE_INTERRUPTS(s
);
757 assert(uslock_stack_index
[mycpu
] >= 0);
758 assert(uslock_stack_index
[mycpu
] < USLOCK_STACK_DEPTH
);
759 if (uslock_stack_index
[mycpu
] >= USLOCK_STACK_DEPTH
) {
760 printf("usl_stack_push (cpu 0x%x): too many locks (%d)",
761 mycpu
, uslock_stack_index
[mycpu
]);
762 printf(" disabling stacks\n");
763 uslock_stack_enabled
= FALSE
;
764 ENABLE_INTERRUPTS(s
);
767 uslock_stack
[mycpu
][uslock_stack_index
[mycpu
]] = l
;
768 uslock_stack_index
[mycpu
]++;
769 ENABLE_INTERRUPTS(s
);
774 * Eliminate the entry for a usimple_lock
775 * that had been active on the current processor.
777 * Preemption has been disabled by lock
778 * acquisition, and we haven't yet actually
779 * released the hardware lock associated with
780 * this usimple_lock, so it's safe to use the
781 * cpu number supplied by the caller.
788 unsigned int i
, index
;
791 if (uslock_stack_enabled
== FALSE
)
794 DISABLE_INTERRUPTS(s
);
795 assert(uslock_stack_index
[mycpu
] > 0);
796 assert(uslock_stack_index
[mycpu
] <= USLOCK_STACK_DEPTH
);
797 if (uslock_stack_index
[mycpu
] == 0) {
798 printf("usl_stack_pop (cpu 0x%x): not enough locks (%d)",
799 mycpu
, uslock_stack_index
[mycpu
]);
800 printf(" disabling stacks\n");
801 uslock_stack_enabled
= FALSE
;
802 ENABLE_INTERRUPTS(s
);
805 index
= --uslock_stack_index
[mycpu
];
806 for (i
= 0; i
<= index
; ++i
) {
807 if (uslock_stack
[mycpu
][i
] == l
) {
809 uslock_stack
[mycpu
][i
] =
810 uslock_stack
[mycpu
][index
];
811 ENABLE_INTERRUPTS(s
);
815 ENABLE_INTERRUPTS(s
);
816 panic("usl_stack_pop: can't find usimple_lock 0x%x", l
);
821 * Determine whether any usimple_locks are currently held.
823 * Caller's preemption state is uncertain. If
824 * preemption has been disabled, this check is accurate.
825 * Otherwise, this check is just a guess. We do the best
826 * we can by disabling scheduler interrupts, so at least
827 * the check is accurate w.r.t. whatever cpu we're running
828 * on while in this routine.
831 usld_lock_none_held()
835 unsigned int locks_held
;
836 char *caller
= "usimple_lock_none_held";
838 DISABLE_INTERRUPTS(s
);
839 mp_disable_preemption();
840 mycpu
= cpu_number();
841 locks_held
= uslock_stack_index
[mycpu
];
842 mp_enable_preemption();
843 ENABLE_INTERRUPTS(s
);
845 panic("%s: no locks should be held (0x%x locks held)",
846 caller
, (integer_t
) locks_held
);
851 * For very special cases, set traced_lock to point to a
852 * specific lock of interest. The result is a series of
853 * XPRs showing lock operations on that lock. The lock_seq
854 * value is used to show the order of those operations.
856 usimple_lock_t traced_lock
;
857 unsigned int lock_seq
;
864 const char * op_name
)
866 if (traced_lock
== l
) {
868 "seq %d, cpu %d, %s @ %x\n",
869 (integer_t
) lock_seq
, (integer_t
) mycpu
,
870 (integer_t
) op_name
, (integer_t
) pc
, 0);
878 #define printf kdbprintf
879 void db_show_all_slocks(void);
881 db_show_all_slocks(void)
883 unsigned int i
, index
;
884 int mycpu
= cpu_number();
887 if (uslock_stack_enabled
== FALSE
) {
888 printf("Lock stack not enabled\n");
893 if (!mach_slocks_init
)
894 iprintf("WARNING: simple locks stack may not be accurate\n");
896 assert(uslock_stack_index
[mycpu
] >= 0);
897 assert(uslock_stack_index
[mycpu
] <= USLOCK_STACK_DEPTH
);
898 index
= uslock_stack_index
[mycpu
];
899 for (i
= 0; i
< index
; ++i
) {
900 l
= uslock_stack
[mycpu
][i
];
902 db_printsym((vm_offset_t
)l
, DB_STGY_ANY
);
903 if (l
->debug
.lock_pc
!= INVALID_PC
) {
904 printf(" locked by ");
905 db_printsym((int)l
->debug
.lock_pc
, DB_STGY_PROC
);
910 #endif /* MACH_KDB */
912 #endif /* USLOCK_DEBUG */
914 /* #endif USIMPLE_LOCK_CALLS */
917 * Routine: lock_alloc
919 * Allocate a lock for external users who cannot
920 * hard-code the structure definition into their
922 * For now just use kalloc, but a zone is probably
929 etap_event_t i_event
)
933 if ((l
= (lock_t
*)kalloc(sizeof(lock_t
))) != 0)
934 lock_init(l
, can_sleep
, event
, i_event
);
941 * Free a lock allocated for external users.
942 * For now just use kfree, but a zone is probably
949 kfree((vm_offset_t
)l
, sizeof(lock_t
));
956 * Initialize a lock; required before use.
957 * Note that clients declare the "struct lock"
958 * variables and then initialize them, rather
959 * than getting a new one from this module.
966 etap_event_t i_event
)
968 (void) memset((void *) l
, 0, sizeof(lock_t
));
971 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
972 l
->u
.s
.start_list
= SD_ENTRY_NULL
;
973 #endif /* ETAP_LOCK_TRACE */
975 simple_lock_init(&l
->interlock
, i_event
);
976 l
->want_write
= FALSE
;
977 l
->want_upgrade
= FALSE
;
979 l
->can_sleep
= can_sleep
;
981 #if ETAP_LOCK_ACCUMULATE
982 l
->cbuff_write
= etap_cbuff_reserve(lock_event_table(l
));
983 if (l
->cbuff_write
!= CBUFF_ENTRY_NULL
) {
984 l
->cbuff_write
->event
= event
;
985 l
->cbuff_write
->instance
= (unsigned long) l
;
986 l
->cbuff_write
->kind
= WRITE_LOCK
;
988 l
->cbuff_read
= CBUFF_ENTRY_NULL
;
989 #endif /* ETAP_LOCK_ACCUMULATE */
994 * Sleep locks. These use the same data structure and algorithm
995 * as the spin locks, but the process sleeps while it is waiting
996 * for the lock. These work on uniprocessor systems.
999 #define DECREMENTER_TIMEOUT 1000000
1003 register lock_t
* l
)
1006 start_data_node_t entry
= {0};
1007 boolean_t lock_miss
= FALSE
;
1008 unsigned short dynamic
= 0;
1009 unsigned short trace
= 0;
1010 etap_time_t total_time
;
1011 etap_time_t stop_wait_time
;
1015 #endif /* MACH_LDEBUG */
1018 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1019 ETAP_CREATE_ENTRY(entry
, trace
);
1020 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1022 simple_lock(&l
->interlock
);
1025 * Link the new start_list entry
1027 ETAP_LINK_ENTRY(l
, entry
, trace
);
1030 decrementer
= DECREMENTER_TIMEOUT
;
1031 #endif /* MACH_LDEBUG */
1034 * Try to acquire the want_write bit.
1036 while (l
->want_write
) {
1038 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1042 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1044 simple_unlock(&l
->interlock
);
1047 Debugger("timeout - want_write");
1048 #endif /* MACH_LDEBUG */
1049 while (--i
!= 0 && l
->want_write
)
1051 simple_lock(&l
->interlock
);
1054 if (l
->can_sleep
&& l
->want_write
) {
1056 ETAP_SET_REASON(current_thread(),
1057 BLOCKED_ON_COMPLEX_LOCK
);
1058 thread_sleep_simple_lock((event_t
) l
,
1059 simple_lock_addr(l
->interlock
),
1061 /* interlock relocked */
1064 l
->want_write
= TRUE
;
1066 /* Wait for readers (and upgrades) to finish */
1069 decrementer
= DECREMENTER_TIMEOUT
;
1070 #endif /* MACH_LDEBUG */
1071 while ((l
->read_count
!= 0) || l
->want_upgrade
) {
1073 ETAP_CONTENTION_TIMESTAMP(entry
,trace
);
1077 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1079 simple_unlock(&l
->interlock
);
1082 Debugger("timeout - wait for readers");
1083 #endif /* MACH_LDEBUG */
1084 while (--i
!= 0 && (l
->read_count
!= 0 ||
1087 simple_lock(&l
->interlock
);
1090 if (l
->can_sleep
&& (l
->read_count
!= 0 || l
->want_upgrade
)) {
1092 ETAP_SET_REASON(current_thread(),
1093 BLOCKED_ON_COMPLEX_LOCK
);
1094 thread_sleep_simple_lock((event_t
) l
,
1095 simple_lock_addr(l
->interlock
),
1097 /* interlock relocked */
1102 * do not collect wait data if either the lock
1103 * was free or no wait traces are enabled.
1106 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1107 ETAP_TIMESTAMP(stop_wait_time
);
1108 ETAP_TOTAL_TIME(total_time
,
1110 entry
->start_wait_time
);
1111 CUM_WAIT_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1120 simple_unlock(&l
->interlock
);
1123 * Set start hold time if some type of hold tracing is enabled.
1125 * Note: if the stop_wait_time was already stamped, use
1126 * it as the start_hold_time instead of doing an
1127 * expensive bus access.
1131 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1132 ETAP_COPY_START_HOLD_TIME(entry
, stop_wait_time
, trace
);
1134 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1140 register lock_t
* l
)
1142 boolean_t do_wakeup
= FALSE
;
1143 start_data_node_t entry
;
1144 unsigned short dynamic
= 0;
1145 unsigned short trace
= 0;
1146 etap_time_t stop_hold_time
;
1147 etap_time_t total_time
;
1148 unsigned long lock_kind
;
1152 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1154 simple_lock(&l
->interlock
);
1156 if (l
->read_count
!= 0) {
1158 lock_kind
= READ_LOCK
;
1161 if (l
->want_upgrade
) {
1162 l
->want_upgrade
= FALSE
;
1163 lock_kind
= WRITE_LOCK
;
1166 l
->want_write
= FALSE
;
1167 lock_kind
= WRITE_LOCK
;
1171 * There is no reason to wakeup a waiting thread
1172 * if the read-count is non-zero. Consider:
1173 * we must be dropping a read lock
1174 * threads are waiting only if one wants a write lock
1175 * if there are still readers, they can't proceed
1178 if (l
->waiting
&& (l
->read_count
== 0)) {
1183 * Collect hold data if hold tracing is
1188 * NOTE: All complex locks whose tracing was on when the
1189 * lock was acquired will have an entry in the start_data
1193 ETAP_UNLINK_ENTRY(l
,entry
);
1194 if (ETAP_DURATION_ENABLED(trace
) && entry
!= SD_ENTRY_NULL
) {
1195 ETAP_TIMESTAMP (stop_hold_time
);
1196 ETAP_TOTAL_TIME (total_time
,
1198 entry
->start_hold_time
);
1200 if (lock_kind
& WRITE_LOCK
)
1201 CUM_HOLD_ACCUMULATE (l
->cbuff_write
,
1206 CUM_READ_ENTRY_RESERVE(l
,l
->cbuff_read
,trace
);
1207 CUM_HOLD_ACCUMULATE (l
->cbuff_read
,
1212 MON_ASSIGN_PC(entry
->end_pc
,pc
,trace
);
1213 MON_DATA_COLLECT(l
,entry
,
1220 simple_unlock(&l
->interlock
);
1222 ETAP_DESTROY_ENTRY(entry
);
1225 thread_wakeup((event_t
) l
);
1230 register lock_t
* l
)
1233 start_data_node_t entry
= {0};
1234 boolean_t lock_miss
= FALSE
;
1235 unsigned short dynamic
= 0;
1236 unsigned short trace
= 0;
1237 etap_time_t total_time
;
1238 etap_time_t stop_wait_time
;
1242 #endif /* MACH_LDEBUG */
1244 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1245 ETAP_CREATE_ENTRY(entry
, trace
);
1246 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1248 simple_lock(&l
->interlock
);
1251 * Link the new start_list entry
1253 ETAP_LINK_ENTRY(l
,entry
,trace
);
1256 decrementer
= DECREMENTER_TIMEOUT
;
1257 #endif /* MACH_LDEBUG */
1258 while (l
->want_write
|| l
->want_upgrade
) {
1260 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1264 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1267 simple_unlock(&l
->interlock
);
1270 Debugger("timeout - wait no writers");
1271 #endif /* MACH_LDEBUG */
1272 while (--i
!= 0 && (l
->want_write
|| l
->want_upgrade
))
1274 simple_lock(&l
->interlock
);
1277 if (l
->can_sleep
&& (l
->want_write
|| l
->want_upgrade
)) {
1279 thread_sleep_simple_lock((event_t
) l
,
1280 simple_lock_addr(l
->interlock
),
1282 /* interlock relocked */
1289 * Do not collect wait data if the lock was free
1290 * or if no wait traces are enabled.
1293 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1294 ETAP_TIMESTAMP(stop_wait_time
);
1295 ETAP_TOTAL_TIME(total_time
,
1297 entry
->start_wait_time
);
1298 CUM_READ_ENTRY_RESERVE(l
, l
->cbuff_read
, trace
);
1299 CUM_WAIT_ACCUMULATE(l
->cbuff_read
, total_time
, dynamic
, trace
);
1307 simple_unlock(&l
->interlock
);
1310 * Set start hold time if some type of hold tracing is enabled.
1312 * Note: if the stop_wait_time was already stamped, use
1313 * it instead of doing an expensive bus access.
1317 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1318 ETAP_COPY_START_HOLD_TIME(entry
, stop_wait_time
, trace
);
1320 ETAP_DURATION_TIMESTAMP(entry
,trace
);
1325 * Routine: lock_read_to_write
1327 * Improves a read-only lock to one with
1328 * write permission. If another reader has
1329 * already requested an upgrade to a write lock,
1330 * no lock is held upon return.
1332 * Returns TRUE if the upgrade *failed*.
1337 register lock_t
* l
)
1340 boolean_t do_wakeup
= FALSE
;
1341 start_data_node_t entry
= {0};
1342 boolean_t lock_miss
= FALSE
;
1343 unsigned short dynamic
= 0;
1344 unsigned short trace
= 0;
1345 etap_time_t total_time
;
1346 etap_time_t stop_time
;
1350 #endif /* MACH_LDEBUG */
1353 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1355 simple_lock(&l
->interlock
);
1360 * Since the read lock is lost whether the write lock
1361 * is acquired or not, read hold data is collected here.
1362 * This, of course, is assuming some type of hold
1363 * tracing is enabled.
1365 * Note: trace is set to zero if the entry does not exist.
1368 ETAP_FIND_ENTRY(l
, entry
, trace
);
1370 if (ETAP_DURATION_ENABLED(trace
)) {
1371 ETAP_TIMESTAMP(stop_time
);
1372 ETAP_TOTAL_TIME(total_time
, stop_time
, entry
->start_hold_time
);
1373 CUM_HOLD_ACCUMULATE(l
->cbuff_read
, total_time
, dynamic
, trace
);
1374 MON_ASSIGN_PC(entry
->end_pc
, pc
, trace
);
1383 if (l
->want_upgrade
) {
1385 * Someone else has requested upgrade.
1386 * Since we've released a read lock, wake
1389 if (l
->waiting
&& (l
->read_count
== 0)) {
1394 ETAP_UNLINK_ENTRY(l
, entry
);
1395 simple_unlock(&l
->interlock
);
1396 ETAP_DESTROY_ENTRY(entry
);
1399 thread_wakeup((event_t
) l
);
1403 l
->want_upgrade
= TRUE
;
1405 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1408 decrementer
= DECREMENTER_TIMEOUT
;
1409 #endif /* MACH_LDEBUG */
1410 while (l
->read_count
!= 0) {
1412 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1416 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1419 simple_unlock(&l
->interlock
);
1422 Debugger("timeout - read_count");
1423 #endif /* MACH_LDEBUG */
1424 while (--i
!= 0 && l
->read_count
!= 0)
1426 simple_lock(&l
->interlock
);
1429 if (l
->can_sleep
&& l
->read_count
!= 0) {
1431 thread_sleep_simple_lock((event_t
) l
,
1432 simple_lock_addr(l
->interlock
),
1434 /* interlock relocked */
1439 * do not collect wait data if the lock was free
1440 * or if no wait traces are enabled.
1443 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1444 ETAP_TIMESTAMP (stop_time
);
1445 ETAP_TOTAL_TIME(total_time
, stop_time
, entry
->start_wait_time
);
1446 CUM_WAIT_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1455 simple_unlock(&l
->interlock
);
1458 * Set start hold time if some type of hold tracing is enabled
1460 * Note: if the stop_time was already stamped, use
1461 * it as the new start_hold_time instead of doing
1462 * an expensive VME access.
1466 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1467 ETAP_COPY_START_HOLD_TIME(entry
, stop_time
, trace
);
1469 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1476 register lock_t
* l
)
1478 boolean_t do_wakeup
= FALSE
;
1479 start_data_node_t entry
= {0};
1480 unsigned short dynamic
= 0;
1481 unsigned short trace
= 0;
1482 etap_time_t stop_hold_time
;
1483 etap_time_t total_time
;
1486 ETAP_STAMP(lock_event_table(l
), trace
,dynamic
);
1488 simple_lock(&l
->interlock
);
1491 if (l
->want_upgrade
)
1492 l
->want_upgrade
= FALSE
;
1494 l
->want_write
= FALSE
;
1502 * Since we are switching from a write lock to a read lock,
1503 * the write lock data is stored and the read lock data
1504 * collection begins.
1506 * Note: trace is set to zero if the entry does not exist.
1509 ETAP_FIND_ENTRY(l
, entry
, trace
);
1511 if (ETAP_DURATION_ENABLED(trace
)) {
1512 ETAP_TIMESTAMP (stop_hold_time
);
1513 ETAP_TOTAL_TIME(total_time
, stop_hold_time
, entry
->start_hold_time
);
1514 CUM_HOLD_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1515 MON_ASSIGN_PC(entry
->end_pc
, pc
, trace
);
1524 simple_unlock(&l
->interlock
);
1527 * Set start hold time if some type of hold tracing is enabled
1529 * Note: if the stop_hold_time was already stamped, use
1530 * it as the new start_hold_time instead of doing
1531 * an expensive bus access.
1535 if (ETAP_DURATION_ENABLED(trace
))
1536 ETAP_COPY_START_HOLD_TIME(entry
, stop_hold_time
, trace
);
1538 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1540 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1543 thread_wakeup((event_t
) l
);
1549 * Routine: lock_try_write
1551 * Tries to get a write lock.
1553 * Returns FALSE if the lock is not held on return.
1558 register lock_t
* l
)
1560 start_data_node_t entry
= {0};
1561 unsigned short trace
= 0;
1564 ETAP_STAMP(lock_event_table(l
), trace
, trace
);
1565 ETAP_CREATE_ENTRY(entry
, trace
);
1567 simple_lock(&l
->interlock
);
1569 if (l
->want_write
|| l
->want_upgrade
|| l
->read_count
) {
1573 simple_unlock(&l
->interlock
);
1574 ETAP_DESTROY_ENTRY(entry
);
1582 l
->want_write
= TRUE
;
1584 ETAP_LINK_ENTRY(l
, entry
, trace
);
1586 simple_unlock(&l
->interlock
);
1588 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1589 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1595 * Routine: lock_try_read
1597 * Tries to get a read lock.
1599 * Returns FALSE if the lock is not held on return.
1604 register lock_t
* l
)
1606 start_data_node_t entry
= {0};
1607 unsigned short trace
= 0;
1610 ETAP_STAMP(lock_event_table(l
), trace
, trace
);
1611 ETAP_CREATE_ENTRY(entry
, trace
);
1613 simple_lock(&l
->interlock
);
1615 if (l
->want_write
|| l
->want_upgrade
) {
1616 simple_unlock(&l
->interlock
);
1617 ETAP_DESTROY_ENTRY(entry
);
1623 ETAP_LINK_ENTRY(l
, entry
, trace
);
1625 simple_unlock(&l
->interlock
);
1627 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1628 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1636 void db_show_one_lock(lock_t
*);
1643 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1645 lock
->want_upgrade
? "" : "!",
1646 lock
->want_write
? "" : "!");
1647 db_printf("%swaiting, %scan_sleep\n",
1648 lock
->waiting
? "" : "!", lock
->can_sleep
? "" : "!");
1649 db_printf("Interlock:\n");
1650 db_show_one_simple_lock((db_expr_t
)simple_lock_addr(lock
->interlock
),
1651 TRUE
, (db_expr_t
)0, (char *)0);
1653 #endif /* MACH_KDB */
1656 * The C portion of the mutex package. These routines are only invoked
1657 * if the optimized assembler routines can't do the work.
1661 * Routine: lock_alloc
1663 * Allocate a mutex for external users who cannot
1664 * hard-code the structure definition into their
1666 * For now just use kalloc, but a zone is probably
1675 if ((m
= (mutex_t
*)kalloc(sizeof(mutex_t
))) != 0)
1676 mutex_init(m
, event
);
1681 * Routine: mutex_free
1683 * Free a mutex allocated for external users.
1684 * For now just use kfree, but a zone is probably
1691 kfree((vm_offset_t
)m
, sizeof(mutex_t
));
1697 * Invoked in order to wait on contention.
1699 * Called with the interlock locked and
1700 * returns it unlocked.
1705 thread_act_t holder
)
1707 thread_t thread
, self
= current_thread();
1710 spl_t s
= splsched();
1712 priority
= self
->last_processor
->current_pri
;
1713 if (priority
< self
->priority
)
1714 priority
= self
->priority
;
1715 if (priority
> MINPRI_KERNEL
)
1716 priority
= MINPRI_KERNEL
;
1718 if (priority
< BASEPRI_DEFAULT
)
1719 priority
= BASEPRI_DEFAULT
;
1721 thread
= holder
->thread
;
1722 assert(thread
->top_act
== holder
); /* XXX */
1723 thread_lock(thread
);
1724 if (mutex
->promoted_pri
== 0)
1725 thread
->promotions
++;
1726 if (thread
->priority
< MINPRI_KERNEL
) {
1727 thread
->sched_mode
|= TH_MODE_PROMOTED
;
1728 if ( mutex
->promoted_pri
< priority
&&
1729 thread
->sched_pri
< priority
) {
1730 KERNEL_DEBUG_CONSTANT(
1731 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
,
1732 thread
->sched_pri
, priority
, (int)thread
, (int)mutex
, 0);
1734 set_sched_pri(thread
, priority
);
1737 thread_unlock(thread
);
1740 if (mutex
->promoted_pri
< priority
)
1741 mutex
->promoted_pri
= priority
;
1744 if (self
->pending_promoter
[self
->pending_promoter_index
] == NULL
) {
1745 self
->pending_promoter
[self
->pending_promoter_index
] = mutex
;
1749 if (self
->pending_promoter
[self
->pending_promoter_index
] != mutex
) {
1750 self
->pending_promoter
[++self
->pending_promoter_index
] = mutex
;
1754 assert_wait(mutex
, THREAD_UNINT
);
1755 interlock_unlock(&mutex
->interlock
);
1757 thread_block(THREAD_CONTINUE_NULL
);
1761 * mutex_lock_acquire
1763 * Invoked on acquiring the mutex when there is
1766 * Returns the current number of waiters.
1768 * Called with the interlock locked.
1774 thread_t thread
= current_thread();
1776 if (thread
->pending_promoter
[thread
->pending_promoter_index
] == mutex
) {
1777 thread
->pending_promoter
[thread
->pending_promoter_index
] = NULL
;
1778 if (thread
->pending_promoter_index
> 0)
1779 thread
->pending_promoter_index
--;
1784 if (mutex
->waiters
> 0) {
1785 integer_t priority
= mutex
->promoted_pri
;
1786 spl_t s
= splsched();
1788 thread_lock(thread
);
1789 thread
->promotions
++;
1790 if (thread
->priority
< MINPRI_KERNEL
) {
1791 thread
->sched_mode
|= TH_MODE_PROMOTED
;
1792 if (thread
->sched_pri
< priority
) {
1793 KERNEL_DEBUG_CONSTANT(
1794 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
,
1795 thread
->sched_pri
, priority
, 0, (int)mutex
, 0);
1797 set_sched_pri(thread
, priority
);
1800 thread_unlock(thread
);
1804 mutex
->promoted_pri
= 0;
1807 return (mutex
->waiters
);
1811 * mutex_unlock_wakeup
1813 * Invoked on unlock when there is contention.
1815 * Called with the interlock locked.
1818 mutex_unlock_wakeup (
1820 thread_act_t holder
)
1823 thread_t thread
= current_thread();
1825 if (thread
->top_act
!= holder
)
1826 panic("mutex_unlock_wakeup: mutex %x holder %x\n", mutex
, holder
);
1828 if (thread
->promotions
> 0) {
1829 spl_t s
= splsched();
1831 thread_lock(thread
);
1832 if ( --thread
->promotions
== 0 &&
1833 (thread
->sched_mode
& TH_MODE_PROMOTED
) ) {
1834 thread
->sched_mode
&= ~TH_MODE_PROMOTED
;
1835 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
1836 KERNEL_DEBUG_CONSTANT(
1837 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) | DBG_FUNC_NONE
,
1838 thread
->sched_pri
, DEPRESSPRI
, 0, (int)mutex
, 0);
1840 set_sched_pri(thread
, DEPRESSPRI
);
1843 if (thread
->priority
< thread
->sched_pri
) {
1844 KERNEL_DEBUG_CONSTANT(
1845 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) |
1847 thread
->sched_pri
, thread
->priority
,
1851 compute_priority(thread
, FALSE
);
1854 thread_unlock(thread
);
1859 assert(mutex
->waiters
> 0);
1860 thread_wakeup_one(mutex
);
1864 * mutex_pause: Called by former callers of simple_lock_pause().
1870 wait_result_t wait_result
;
1872 wait_result
= assert_wait_timeout( 1, THREAD_UNINT
);
1873 assert(wait_result
== THREAD_WAITING
);
1875 ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK
);
1877 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1878 assert(wait_result
== THREAD_TIMED_OUT
);
1883 * Routines to print out simple_locks and mutexes in a nicely-formatted
1887 char *simple_lock_labels
= "ENTRY ILK THREAD DURATION CALLER";
1888 char *mutex_labels
= "ENTRY LOCKED WAITERS THREAD CALLER";
1891 db_show_one_simple_lock (
1893 boolean_t have_addr
,
1897 simple_lock_t saddr
= (simple_lock_t
)addr
;
1899 if (saddr
== (simple_lock_t
)0 || !have_addr
) {
1900 db_error ("No simple_lock\n");
1903 else if (saddr
->lock_type
!= USLOCK_TAG
)
1904 db_error ("Not a simple_lock\n");
1905 #endif /* USLOCK_DEBUG */
1907 db_printf ("%s\n", simple_lock_labels
);
1908 db_print_simple_lock (saddr
);
1912 db_print_simple_lock (
1916 db_printf ("%08x %3d", addr
, *hw_lock_addr(addr
->interlock
));
1918 db_printf (" %08x", addr
->debug
.lock_thread
);
1919 db_printf (" %08x ", addr
->debug
.duration
[1]);
1920 db_printsym ((int)addr
->debug
.lock_pc
, DB_STGY_ANY
);
1921 #endif /* USLOCK_DEBUG */
1928 boolean_t have_addr
,
1932 mutex_t
* maddr
= (mutex_t
*)addr
;
1934 if (maddr
== (mutex_t
*)0 || !have_addr
)
1935 db_error ("No mutex\n");
1937 else if (maddr
->type
!= MUTEX_TAG
)
1938 db_error ("Not a mutex\n");
1939 #endif /* MACH_LDEBUG */
1941 db_printf ("%s\n", mutex_labels
);
1942 db_print_mutex (maddr
);
1949 db_printf ("%08x %6d %7d",
1950 addr
, *hw_lock_addr(addr
->locked
), addr
->waiters
);
1952 db_printf (" %08x ", addr
->thread
);
1953 db_printsym (addr
->pc
, DB_STGY_ANY
);
1954 #endif /* MACH_LDEBUG */
1957 #endif /* MACH_KDB */
1960 extern void meter_simple_lock (
1962 extern void meter_simple_unlock (
1964 extern void cyctm05_stamp (
1965 unsigned long * start
);
1966 extern void cyctm05_diff (
1967 unsigned long * start
,
1968 unsigned long * end
,
1969 unsigned long * diff
);
1972 simple_lock_data_t loser
;
1980 cyctm05_stamp (lp
->duration
);
1984 int long_simple_lock_crash
;
1985 int long_simple_lock_time
= 0x600;
1987 * This is pretty gawd-awful. XXX
1989 decl_simple_lock_data(extern,kd_tty
)
1992 meter_simple_unlock(
1996 unsigned long stime
[2], etime
[2], delta
[2];
1998 if (lp
== &kd_tty
) /* XXX */
2001 stime
[0] = lp
->duration
[0];
2002 stime
[1] = lp
->duration
[1];
2004 cyctm05_stamp (etime
);
2006 if (etime
[1] < stime
[1]) /* XXX */
2009 cyctm05_diff (stime
, etime
, delta
);
2011 if (delta
[1] >= 0x10000) /* XXX */
2014 lp
->duration
[0] = delta
[0];
2015 lp
->duration
[1] = delta
[1];
2017 if (loser
.duration
[1] < lp
->duration
[1])
2020 assert (!long_simple_lock_crash
|| delta
[1] < long_simple_lock_time
);
2023 #endif /* MACH_LDEBUG */
2029 * ==============================================================
2030 * ETAP hook when initializing a usimple_lock. May be invoked
2031 * from the portable lock package or from an optimized machine-
2032 * dependent implementation.
2033 * ==============================================================
2037 etap_simplelock_init (
2041 ETAP_CLEAR_TRACE_DATA(l
);
2042 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
2044 #if ETAP_LOCK_ACCUMULATE
2045 /* reserve an entry in the cumulative buffer */
2046 l
->cbuff_entry
= etap_cbuff_reserve(lock_event_table(l
));
2047 /* initialize the entry if one was returned */
2048 if (l
->cbuff_entry
!= CBUFF_ENTRY_NULL
) {
2049 l
->cbuff_entry
->event
= event
;
2050 l
->cbuff_entry
->instance
= (unsigned long) l
;
2051 l
->cbuff_entry
->kind
= SPIN_LOCK
;
2053 #endif /* ETAP_LOCK_ACCUMULATE */
2058 etap_simplelock_unlock(
2061 unsigned short dynamic
= 0;
2062 unsigned short trace
= 0;
2063 etap_time_t total_time
;
2064 etap_time_t stop_hold_time
;
2068 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2071 * Calculate & collect hold time data only if
2072 * the hold tracing was enabled throughout the
2073 * whole operation. This prevents collection of
2074 * bogus data caused by mid-operation trace changes.
2078 if (ETAP_DURATION_ENABLED(trace
) && ETAP_WHOLE_OP(l
)) {
2079 ETAP_TIMESTAMP (stop_hold_time
);
2080 ETAP_TOTAL_TIME(total_time
, stop_hold_time
,
2081 l
->u
.s
.start_hold_time
);
2082 CUM_HOLD_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2083 MON_ASSIGN_PC(l
->end_pc
, pc
, trace
);
2091 ETAP_CLEAR_TRACE_DATA(l
);
2094 /* ========================================================================
2095 * Since the the simple_lock() routine is machine dependant, it must always
2096 * be coded in assembly. The two hook routines below are used to collect
2098 * ========================================================================
2102 * ROUTINE: etap_simplelock_miss()
2104 * FUNCTION: This spin lock routine is called upon the first
2105 * spin (miss) of the lock.
2107 * A timestamp is taken at the beginning of the wait period,
2108 * if wait tracing is enabled.
2113 * - timestamp address.
2115 * RETURNS: Wait timestamp value. The timestamp value is later used
2116 * by etap_simplelock_hold().
2118 * NOTES: This routine is NOT ALWAYS called. The lock may be free
2119 * (never spinning). For this reason the pc is collected in
2120 * etap_simplelock_hold().
2124 etap_simplelock_miss (
2128 unsigned short trace
= 0;
2129 unsigned short dynamic
= 0;
2130 etap_time_t start_miss_time
;
2132 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2134 if (trace
& ETAP_CONTENTION
)
2135 ETAP_TIMESTAMP(start_miss_time
);
2137 return(start_miss_time
);
2141 * ROUTINE: etap_simplelock_hold()
2143 * FUNCTION: This spin lock routine is ALWAYS called once the lock
2144 * is acquired. Here, the contention time is calculated and
2145 * the start hold time is stamped.
2149 * - PC of the calling function.
2150 * - start wait timestamp.
2155 etap_simplelock_hold (
2158 etap_time_t start_hold_time
)
2160 unsigned short dynamic
= 0;
2161 unsigned short trace
= 0;
2162 etap_time_t total_time
;
2163 etap_time_t stop_hold_time
;
2165 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2167 MON_ASSIGN_PC(l
->start_pc
, pc
, trace
);
2169 /* do not collect wait data if lock was free */
2170 if (ETAP_TIME_IS_ZERO(start_hold_time
) && (trace
& ETAP_CONTENTION
)) {
2171 ETAP_TIMESTAMP(stop_hold_time
);
2172 ETAP_TOTAL_TIME(total_time
,
2175 CUM_WAIT_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2182 ETAP_COPY_START_HOLD_TIME(&l
->u
.s
, stop_hold_time
, trace
);
2185 ETAP_DURATION_TIMESTAMP(&l
->u
.s
, trace
);
2193 ETAP_CLEAR_TRACE_DATA(l
);
2194 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
2196 #if ETAP_LOCK_ACCUMULATE
2197 /* reserve an entry in the cumulative buffer */
2198 l
->cbuff_entry
= etap_cbuff_reserve(lock_event_table(l
));
2199 /* initialize the entry if one was returned */
2200 if (l
->cbuff_entry
!= CBUFF_ENTRY_NULL
) {
2201 l
->cbuff_entry
->event
= event
;
2202 l
->cbuff_entry
->instance
= (unsigned long) l
;
2203 l
->cbuff_entry
->kind
= MUTEX_LOCK
;
2205 #endif /* ETAP_LOCK_ACCUMULATE */
2212 unsigned short trace
= 0;
2213 unsigned short dynamic
= 0;
2214 etap_time_t start_miss_time
;
2216 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2218 if (trace
& ETAP_CONTENTION
)
2219 ETAP_TIMESTAMP(start_miss_time
);
2221 ETAP_TIME_CLEAR(start_miss_time
);
2223 return(start_miss_time
);
2230 etap_time_t start_hold_time
)
2232 unsigned short dynamic
= 0;
2233 unsigned short trace
= 0;
2234 etap_time_t total_time
;
2235 etap_time_t stop_hold_time
;
2237 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2239 MON_ASSIGN_PC(l
->start_pc
, pc
, trace
);
2241 /* do not collect wait data if lock was free */
2242 if (!ETAP_TIME_IS_ZERO(start_hold_time
) && (trace
& ETAP_CONTENTION
)) {
2243 ETAP_TIMESTAMP(stop_hold_time
);
2244 ETAP_TOTAL_TIME(total_time
,
2247 CUM_WAIT_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2254 ETAP_COPY_START_HOLD_TIME(&l
->u
.s
, stop_hold_time
, trace
);
2257 ETAP_DURATION_TIMESTAMP(&l
->u
.s
, trace
);
2264 unsigned short dynamic
= 0;
2265 unsigned short trace
= 0;
2266 etap_time_t total_time
;
2267 etap_time_t stop_hold_time
;
2271 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2274 * Calculate & collect hold time data only if
2275 * the hold tracing was enabled throughout the
2276 * whole operation. This prevents collection of
2277 * bogus data caused by mid-operation trace changes.
2281 if (ETAP_DURATION_ENABLED(trace
) && ETAP_WHOLE_OP(l
)) {
2282 ETAP_TIMESTAMP(stop_hold_time
);
2283 ETAP_TOTAL_TIME(total_time
, stop_hold_time
,
2284 l
->u
.s
.start_hold_time
);
2285 CUM_HOLD_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2286 MON_ASSIGN_PC(l
->end_pc
, pc
, trace
);
2294 ETAP_CLEAR_TRACE_DATA(l
);
2297 #endif /* ETAP_LOCK_TRACE */