2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Locking primitives implementation
60 #include <mach_ldebug.h>
62 #include <kern/lock.h>
63 #include <kern/etap_macros.h>
64 #include <kern/misc_protos.h>
65 #include <kern/thread.h>
66 #include <kern/processor.h>
67 #include <kern/sched_prim.h>
69 #include <kern/debug.h>
73 #include <ddb/db_command.h>
74 #include <ddb/db_output.h>
75 #include <ddb/db_sym.h>
76 #include <ddb/db_print.h>
80 #include <ppc/Firmware.h>
81 #include <ppc/POWERMAC/mp/MPPlugIn.h>
84 #include <sys/kdebug.h>
86 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
89 * Some portions of the lock debugging code must run with
90 * interrupts disabled. This can be machine-dependent,
91 * but we don't have any good hooks for that at the moment.
92 * If your architecture is different, add a machine-dependent
93 * ifdef here for these macros. XXX
96 #define DISABLE_INTERRUPTS(s) s = ml_set_interrupts_enabled(FALSE)
97 #define ENABLE_INTERRUPTS(s) (void)ml_set_interrupts_enabled(s)
100 /* Time we loop without holding the interlock.
101 * The former is for when we cannot sleep, the latter
102 * for when our thread can go to sleep (loop less)
103 * we shouldn't retake the interlock at all frequently
104 * if we cannot go to sleep, since it interferes with
105 * any other processors. In particular, 100 is too small
106 * a number for powerpc MP systems because of cache
107 * coherency issues and differing lock fetch times between
110 unsigned int lock_wait_time
[2] = { (unsigned int)-1, 100 } ;
111 #else /* NCPUS > 1 */
114 * It is silly to spin on a uni-processor as if we
115 * thought something magical would happen to the
116 * want_write bit while we are executing.
119 unsigned int lock_wait_time
[2] = { 0, 0 };
120 #endif /* NCPUS > 1 */
125 void db_print_simple_lock(
130 #endif /* MACH_KDB */
135 * Perform simple lock checks.
137 int uslock_check
= 1;
138 int max_lock_loops
= 100000000;
139 decl_simple_lock_data(extern , printf_lock
)
140 decl_simple_lock_data(extern , panic_lock
)
141 #if MACH_KDB && NCPUS > 1
142 decl_simple_lock_data(extern , kdb_lock
)
143 #endif /* MACH_KDB && NCPUS >1 */
144 #endif /* USLOCK_DEBUG */
148 * We often want to know the addresses of the callers
149 * of the various lock routines. However, this information
150 * is only used for debugging and statistics.
153 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
154 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
155 #if ANY_LOCK_DEBUG || ETAP_LOCK_TRACE
156 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
157 #else /* ANY_LOCK_DEBUG || ETAP_LOCK_TRACE */
160 * Eliminate lint complaints about unused local pc variables.
162 #define OBTAIN_PC(pc,l) ++pc
164 #define OBTAIN_PC(pc,l)
166 #endif /* USLOCK_DEBUG || ETAP_LOCK_TRACE */
169 /* #ifndef USIMPLE_LOCK_CALLS
170 * The i386 production version of usimple_locks isn't ready yet.
173 * Portable lock package implementation of usimple_locks.
177 #define ETAPCALL(stmt) stmt
178 void etap_simplelock_init(simple_lock_t
, etap_event_t
);
179 void etap_simplelock_unlock(simple_lock_t
);
180 void etap_simplelock_hold(simple_lock_t
, pc_t
, etap_time_t
);
181 etap_time_t
etap_simplelock_miss(simple_lock_t
);
183 void etap_mutex_init(mutex_t
*, etap_event_t
);
184 void etap_mutex_unlock(mutex_t
*);
185 void etap_mutex_hold(mutex_t
*, pc_t
, etap_time_t
);
186 etap_time_t
etap_mutex_miss(mutex_t
*);
187 #else /* ETAP_LOCK_TRACE */
188 #define ETAPCALL(stmt)
189 #endif /* ETAP_LOCK_TRACE */
192 #define USLDBG(stmt) stmt
193 void usld_lock_init(usimple_lock_t
, etap_event_t
);
194 void usld_lock_pre(usimple_lock_t
, pc_t
);
195 void usld_lock_post(usimple_lock_t
, pc_t
);
196 void usld_unlock(usimple_lock_t
, pc_t
);
197 void usld_lock_try_pre(usimple_lock_t
, pc_t
);
198 void usld_lock_try_post(usimple_lock_t
, pc_t
);
199 void usld_lock_held(usimple_lock_t
);
200 void usld_lock_none_held(void);
201 int usld_lock_common_checks(usimple_lock_t
, char *);
202 #else /* USLOCK_DEBUG */
204 #endif /* USLOCK_DEBUG */
207 * Initialize a usimple_lock.
209 * No change in preemption state.
216 USLDBG(usld_lock_init(l
, event
));
217 ETAPCALL(etap_simplelock_init((l
),(event
)));
218 hw_lock_init(&l
->interlock
);
223 * Acquire a usimple_lock.
225 * Returns with preemption disabled. Note
226 * that the hw_lock routines are responsible for
227 * maintaining preemption state.
236 etap_time_t start_wait_time
;
237 int no_miss_info
= 0;
238 #endif /* ETAP_LOCK_TRACE */
241 #endif /* USLOCK_DEBUG */
244 USLDBG(usld_lock_pre(l
, pc
));
246 ETAP_TIME_CLEAR(start_wait_time
);
247 #endif /* ETAP_LOCK_TRACE */
249 if(!hw_lock_to(&l
->interlock
, LockTimeOut
)) /* Try to get the lock with a timeout */
250 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l
, cpu_number(), pc
);
252 ETAPCALL(etap_simplelock_hold(l
, pc
, start_wait_time
));
253 USLDBG(usld_lock_post(l
, pc
));
258 * Release a usimple_lock.
260 * Returns with preemption enabled. Note
261 * that the hw_lock routines are responsible for
262 * maintaining preemption state.
270 // checkNMI(); /* (TEST/DEBUG) */
273 USLDBG(usld_unlock(l
, pc
));
274 ETAPCALL(etap_simplelock_unlock(l
));
275 hw_lock_unlock(&l
->interlock
);
280 * Conditionally acquire a usimple_lock.
282 * On success, returns with preemption disabled.
283 * On failure, returns with preemption in the same state
284 * as when first invoked. Note that the hw_lock routines
285 * are responsible for maintaining preemption state.
287 * XXX No stats are gathered on a miss; I preserved this
288 * behavior from the original assembly-language code, but
289 * doesn't it make sense to log misses? XXX
296 unsigned int success
;
297 etap_time_t zero_time
;
300 USLDBG(usld_lock_try_pre(l
, pc
));
301 if (success
= hw_lock_try(&l
->interlock
)) {
302 USLDBG(usld_lock_try_post(l
, pc
));
303 ETAP_TIME_CLEAR(zero_time
);
304 ETAPCALL(etap_simplelock_hold(l
, pc
, zero_time
));
311 simple_lock_no_trace(
317 USLDBG(usld_lock_pre(l
, pc
));
318 while (!hw_lock_try(&l
->interlock
)) {
319 while (hw_lock_held(&l
->interlock
)) {
321 * Spin watching the lock value in cache,
322 * without consuming external bus cycles.
323 * On most SMP architectures, the atomic
324 * instruction(s) used by hw_lock_try
325 * cost much, much more than an ordinary
330 USLDBG(usld_lock_post(l
, pc
));
334 simple_unlock_no_trace(
340 USLDBG(usld_unlock(l
, pc
));
341 hw_lock_unlock(&l
->interlock
);
345 simple_lock_try_no_trace(
349 unsigned int success
;
352 USLDBG(usld_lock_try_pre(l
, pc
));
353 if (success
= hw_lock_try(&l
->interlock
)) {
354 USLDBG(usld_lock_try_post(l
, pc
));
358 #endif /* ETAP_LOCK_TRACE */
363 * Verify that the lock is locked and owned by
364 * the current thread.
375 * Verify that no usimple_locks are held by
376 * this processor. Typically used in a
377 * trap handler when returning to user mode
378 * or in a path known to relinquish the processor.
381 usimple_lock_none_held(void)
383 usld_lock_none_held();
385 #endif /* USLOCK_DEBUG */
390 * States of a usimple_lock. The default when initializing
391 * a usimple_lock is setting it up for debug checking.
393 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
394 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
395 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
396 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
397 #define USLOCK_CHECKING(l) (uslock_check && \
398 ((l)->debug.state & USLOCK_CHECKED))
401 * Maintain a per-cpu stack of acquired usimple_locks.
403 void usl_stack_push(usimple_lock_t
, int);
404 void usl_stack_pop(usimple_lock_t
, int);
407 * Trace activities of a particularly interesting lock.
409 void usl_trace(usimple_lock_t
, int, pc_t
, const char *);
413 * Initialize the debugging information contained
421 if (l
== USIMPLE_LOCK_NULL
)
422 panic("lock initialization: null lock pointer");
423 l
->lock_type
= USLOCK_TAG
;
424 l
->debug
.state
= uslock_check
? USLOCK_INITIALIZED
: 0;
425 l
->debug
.lock_cpu
= l
->debug
.unlock_cpu
= 0;
426 l
->debug
.lock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
427 l
->debug
.lock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
428 l
->debug
.duration
[0] = l
->debug
.duration
[1] = 0;
429 l
->debug
.unlock_cpu
= l
->debug
.unlock_cpu
= 0;
430 l
->debug
.unlock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
431 l
->debug
.unlock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
436 * These checks apply to all usimple_locks, not just
437 * those with USLOCK_CHECKED turned on.
440 usld_lock_common_checks(
444 if (l
== USIMPLE_LOCK_NULL
)
445 panic("%s: null lock pointer", caller
);
446 if (l
->lock_type
!= USLOCK_TAG
)
447 panic("%s: 0x%x is not a usimple lock", caller
, (integer_t
) l
);
448 if (!(l
->debug
.state
& USLOCK_INIT
))
449 panic("%s: 0x%x is not an initialized lock",
450 caller
, (integer_t
) l
);
451 return USLOCK_CHECKING(l
);
456 * Debug checks on a usimple_lock just before attempting
465 char *caller
= "usimple_lock";
469 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
471 l
->debug
.lock_thread
,
474 l
->debug
.unlock_thread
,
480 if (!usld_lock_common_checks(l
, caller
))
484 * Note that we have a weird case where we are getting a lock when we are]
485 * in the process of putting the system to sleep. We are running with no
486 * current threads, therefore we can't tell if we are trying to retake a lock
487 * we have or someone on the other processor has it. Therefore we just
488 * ignore this test if the locking thread is 0.
491 if ((l
->debug
.state
& USLOCK_TAKEN
) && l
->debug
.lock_thread
&&
492 l
->debug
.lock_thread
== (void *) current_thread()) {
493 printf("%s: lock 0x%x already locked (at 0x%x) by",
494 caller
, (integer_t
) l
, l
->debug
.lock_pc
);
495 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
496 l
->debug
.lock_thread
, pc
);
499 mp_disable_preemption();
500 usl_trace(l
, cpu_number(), pc
, caller
);
501 mp_enable_preemption();
506 * Debug checks on a usimple_lock just after acquiring it.
508 * Pre-emption has been disabled at this point,
509 * so we are safe in using cpu_number.
517 char *caller
= "successful usimple_lock";
521 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
523 l
->debug
.lock_thread
,
526 l
->debug
.unlock_thread
,
532 if (!usld_lock_common_checks(l
, caller
))
535 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
536 panic("%s: lock 0x%x became uninitialized",
537 caller
, (integer_t
) l
);
538 if ((l
->debug
.state
& USLOCK_TAKEN
))
539 panic("%s: lock 0x%x became TAKEN by someone else",
540 caller
, (integer_t
) l
);
542 mycpu
= cpu_number();
543 l
->debug
.lock_thread
= (void *)current_thread();
544 l
->debug
.state
|= USLOCK_TAKEN
;
545 l
->debug
.lock_pc
= pc
;
546 l
->debug
.lock_cpu
= mycpu
;
548 usl_stack_push(l
, mycpu
);
549 usl_trace(l
, mycpu
, pc
, caller
);
554 * Debug checks on a usimple_lock just before
555 * releasing it. Note that the caller has not
556 * yet released the hardware lock.
558 * Preemption is still disabled, so there's
559 * no problem using cpu_number.
567 char *caller
= "usimple_unlock";
571 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
573 l
->debug
.lock_thread
,
576 l
->debug
.unlock_thread
,
582 if (!usld_lock_common_checks(l
, caller
))
585 mycpu
= cpu_number();
587 if (!(l
->debug
.state
& USLOCK_TAKEN
))
588 panic("%s: lock 0x%x hasn't been taken",
589 caller
, (integer_t
) l
);
590 if (l
->debug
.lock_thread
!= (void *) current_thread())
591 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
592 caller
, (integer_t
) l
, l
->debug
.lock_thread
);
593 if (l
->debug
.lock_cpu
!= mycpu
) {
594 printf("%s: unlocking lock 0x%x on cpu 0x%x",
595 caller
, (integer_t
) l
, mycpu
);
596 printf(" (acquired on cpu 0x%x)\n", l
->debug
.lock_cpu
);
599 usl_trace(l
, mycpu
, pc
, caller
);
600 usl_stack_pop(l
, mycpu
);
602 l
->debug
.unlock_thread
= l
->debug
.lock_thread
;
603 l
->debug
.lock_thread
= INVALID_PC
;
604 l
->debug
.state
&= ~USLOCK_TAKEN
;
605 l
->debug
.unlock_pc
= pc
;
606 l
->debug
.unlock_cpu
= mycpu
;
611 * Debug checks on a usimple_lock just before
612 * attempting to acquire it.
614 * Preemption isn't guaranteed to be disabled.
621 char *caller
= "usimple_lock_try";
623 if (!usld_lock_common_checks(l
, caller
))
625 mp_disable_preemption();
626 usl_trace(l
, cpu_number(), pc
, caller
);
627 mp_enable_preemption();
632 * Debug checks on a usimple_lock just after
633 * successfully attempting to acquire it.
635 * Preemption has been disabled by the
636 * lock acquisition attempt, so it's safe
645 char *caller
= "successful usimple_lock_try";
647 if (!usld_lock_common_checks(l
, caller
))
650 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
651 panic("%s: lock 0x%x became uninitialized",
652 caller
, (integer_t
) l
);
653 if ((l
->debug
.state
& USLOCK_TAKEN
))
654 panic("%s: lock 0x%x became TAKEN by someone else",
655 caller
, (integer_t
) l
);
657 mycpu
= cpu_number();
658 l
->debug
.lock_thread
= (void *) current_thread();
659 l
->debug
.state
|= USLOCK_TAKEN
;
660 l
->debug
.lock_pc
= pc
;
661 l
->debug
.lock_cpu
= mycpu
;
664 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
666 l
->debug
.lock_thread
,
669 l
->debug
.unlock_thread
,
675 usl_stack_push(l
, mycpu
);
676 usl_trace(l
, mycpu
, pc
, caller
);
681 * Determine whether the lock in question is owned
682 * by the current thread.
688 char *caller
= "usimple_lock_held";
692 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
694 l
->debug
.lock_thread
,
697 l
->debug
.unlock_thread
,
703 if (!usld_lock_common_checks(l
, caller
))
706 if (!(l
->debug
.state
& USLOCK_TAKEN
))
707 panic("%s: lock 0x%x hasn't been taken",
708 caller
, (integer_t
) l
);
709 if (l
->debug
.lock_thread
!= (void *) current_thread())
710 panic("%s: lock 0x%x is owned by thread 0x%x", caller
,
711 (integer_t
) l
, (integer_t
) l
->debug
.lock_thread
);
714 * The usimple_lock is active, so preemption
715 * is disabled and the current cpu should
716 * match the one recorded at lock acquisition time.
718 if (l
->debug
.lock_cpu
!= cpu_number())
719 panic("%s: current cpu 0x%x isn't acquiring cpu 0x%x",
720 caller
, cpu_number(), (integer_t
) l
->debug
.lock_cpu
);
725 * Per-cpu stack of currently active usimple_locks.
726 * Requires spl protection so that interrupt-level
727 * locks plug-n-play with their thread-context friends.
729 #define USLOCK_STACK_DEPTH 20
730 usimple_lock_t uslock_stack
[NCPUS
][USLOCK_STACK_DEPTH
];
731 unsigned int uslock_stack_index
[NCPUS
];
732 boolean_t uslock_stack_enabled
= FALSE
;
736 * Record a usimple_lock just acquired on
737 * the current processor.
739 * Preemption has been disabled by lock
740 * acquisition, so it's safe to use the cpu number
741 * specified by the caller.
750 if (uslock_stack_enabled
== FALSE
)
753 DISABLE_INTERRUPTS(s
);
754 assert(uslock_stack_index
[mycpu
] >= 0);
755 assert(uslock_stack_index
[mycpu
] < USLOCK_STACK_DEPTH
);
756 if (uslock_stack_index
[mycpu
] >= USLOCK_STACK_DEPTH
) {
757 printf("usl_stack_push (cpu 0x%x): too many locks (%d)",
758 mycpu
, uslock_stack_index
[mycpu
]);
759 printf(" disabling stacks\n");
760 uslock_stack_enabled
= FALSE
;
761 ENABLE_INTERRUPTS(s
);
764 uslock_stack
[mycpu
][uslock_stack_index
[mycpu
]] = l
;
765 uslock_stack_index
[mycpu
]++;
766 ENABLE_INTERRUPTS(s
);
771 * Eliminate the entry for a usimple_lock
772 * that had been active on the current processor.
774 * Preemption has been disabled by lock
775 * acquisition, and we haven't yet actually
776 * released the hardware lock associated with
777 * this usimple_lock, so it's safe to use the
778 * cpu number supplied by the caller.
785 unsigned int i
, index
;
788 if (uslock_stack_enabled
== FALSE
)
791 DISABLE_INTERRUPTS(s
);
792 assert(uslock_stack_index
[mycpu
] > 0);
793 assert(uslock_stack_index
[mycpu
] <= USLOCK_STACK_DEPTH
);
794 if (uslock_stack_index
[mycpu
] == 0) {
795 printf("usl_stack_pop (cpu 0x%x): not enough locks (%d)",
796 mycpu
, uslock_stack_index
[mycpu
]);
797 printf(" disabling stacks\n");
798 uslock_stack_enabled
= FALSE
;
799 ENABLE_INTERRUPTS(s
);
802 index
= --uslock_stack_index
[mycpu
];
803 for (i
= 0; i
<= index
; ++i
) {
804 if (uslock_stack
[mycpu
][i
] == l
) {
806 uslock_stack
[mycpu
][i
] =
807 uslock_stack
[mycpu
][index
];
808 ENABLE_INTERRUPTS(s
);
812 ENABLE_INTERRUPTS(s
);
813 panic("usl_stack_pop: can't find usimple_lock 0x%x", l
);
818 * Determine whether any usimple_locks are currently held.
820 * Caller's preemption state is uncertain. If
821 * preemption has been disabled, this check is accurate.
822 * Otherwise, this check is just a guess. We do the best
823 * we can by disabling scheduler interrupts, so at least
824 * the check is accurate w.r.t. whatever cpu we're running
825 * on while in this routine.
828 usld_lock_none_held()
832 unsigned int locks_held
;
833 char *caller
= "usimple_lock_none_held";
835 DISABLE_INTERRUPTS(s
);
836 mp_disable_preemption();
837 mycpu
= cpu_number();
838 locks_held
= uslock_stack_index
[mycpu
];
839 mp_enable_preemption();
840 ENABLE_INTERRUPTS(s
);
842 panic("%s: no locks should be held (0x%x locks held)",
843 caller
, (integer_t
) locks_held
);
848 * For very special cases, set traced_lock to point to a
849 * specific lock of interest. The result is a series of
850 * XPRs showing lock operations on that lock. The lock_seq
851 * value is used to show the order of those operations.
853 usimple_lock_t traced_lock
;
854 unsigned int lock_seq
;
861 const char * op_name
)
863 if (traced_lock
== l
) {
865 "seq %d, cpu %d, %s @ %x\n",
866 (integer_t
) lock_seq
, (integer_t
) mycpu
,
867 (integer_t
) op_name
, (integer_t
) pc
, 0);
875 #define printf kdbprintf
876 void db_show_all_slocks(void);
878 db_show_all_slocks(void)
880 unsigned int i
, index
;
881 int mycpu
= cpu_number();
884 if (uslock_stack_enabled
== FALSE
) {
885 printf("Lock stack not enabled\n");
890 if (!mach_slocks_init
)
891 iprintf("WARNING: simple locks stack may not be accurate\n");
893 assert(uslock_stack_index
[mycpu
] >= 0);
894 assert(uslock_stack_index
[mycpu
] <= USLOCK_STACK_DEPTH
);
895 index
= uslock_stack_index
[mycpu
];
896 for (i
= 0; i
< index
; ++i
) {
897 l
= uslock_stack
[mycpu
][i
];
899 db_printsym((vm_offset_t
)l
, DB_STGY_ANY
);
900 if (l
->debug
.lock_pc
!= INVALID_PC
) {
901 printf(" locked by ");
902 db_printsym((int)l
->debug
.lock_pc
, DB_STGY_PROC
);
907 #endif /* MACH_KDB */
909 #endif /* USLOCK_DEBUG */
911 /* #endif USIMPLE_LOCK_CALLS */
914 * Routine: lock_alloc
916 * Allocate a lock for external users who cannot
917 * hard-code the structure definition into their
919 * For now just use kalloc, but a zone is probably
926 etap_event_t i_event
)
930 if ((l
= (lock_t
*)kalloc(sizeof(lock_t
))) != 0)
931 lock_init(l
, can_sleep
, event
, i_event
);
938 * Free a lock allocated for external users.
939 * For now just use kfree, but a zone is probably
946 kfree((vm_offset_t
)l
, sizeof(lock_t
));
953 * Initialize a lock; required before use.
954 * Note that clients declare the "struct lock"
955 * variables and then initialize them, rather
956 * than getting a new one from this module.
963 etap_event_t i_event
)
965 (void) memset((void *) l
, 0, sizeof(lock_t
));
968 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
969 l
->u
.s
.start_list
= SD_ENTRY_NULL
;
970 #endif /* ETAP_LOCK_TRACE */
972 simple_lock_init(&l
->interlock
, i_event
);
973 l
->want_write
= FALSE
;
974 l
->want_upgrade
= FALSE
;
976 l
->can_sleep
= can_sleep
;
978 #if ETAP_LOCK_ACCUMULATE
979 l
->cbuff_write
= etap_cbuff_reserve(lock_event_table(l
));
980 if (l
->cbuff_write
!= CBUFF_ENTRY_NULL
) {
981 l
->cbuff_write
->event
= event
;
982 l
->cbuff_write
->instance
= (unsigned long) l
;
983 l
->cbuff_write
->kind
= WRITE_LOCK
;
985 l
->cbuff_read
= CBUFF_ENTRY_NULL
;
986 #endif /* ETAP_LOCK_ACCUMULATE */
991 * Sleep locks. These use the same data structure and algorithm
992 * as the spin locks, but the process sleeps while it is waiting
993 * for the lock. These work on uniprocessor systems.
996 #define DECREMENTER_TIMEOUT 1000000
1000 register lock_t
* l
)
1003 start_data_node_t entry
= {0};
1004 boolean_t lock_miss
= FALSE
;
1005 unsigned short dynamic
= 0;
1006 unsigned short trace
= 0;
1007 etap_time_t total_time
;
1008 etap_time_t stop_wait_time
;
1012 #endif /* MACH_LDEBUG */
1015 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1016 ETAP_CREATE_ENTRY(entry
, trace
);
1017 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1019 simple_lock(&l
->interlock
);
1022 * Link the new start_list entry
1024 ETAP_LINK_ENTRY(l
, entry
, trace
);
1027 decrementer
= DECREMENTER_TIMEOUT
;
1028 #endif /* MACH_LDEBUG */
1031 * Try to acquire the want_write bit.
1033 while (l
->want_write
) {
1035 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1039 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1041 simple_unlock(&l
->interlock
);
1044 Debugger("timeout - want_write");
1045 #endif /* MACH_LDEBUG */
1046 while (--i
!= 0 && l
->want_write
)
1048 simple_lock(&l
->interlock
);
1051 if (l
->can_sleep
&& l
->want_write
) {
1053 ETAP_SET_REASON(current_thread(),
1054 BLOCKED_ON_COMPLEX_LOCK
);
1055 thread_sleep_simple_lock((event_t
) l
,
1056 simple_lock_addr(l
->interlock
),
1058 /* interlock relocked */
1061 l
->want_write
= TRUE
;
1063 /* Wait for readers (and upgrades) to finish */
1066 decrementer
= DECREMENTER_TIMEOUT
;
1067 #endif /* MACH_LDEBUG */
1068 while ((l
->read_count
!= 0) || l
->want_upgrade
) {
1070 ETAP_CONTENTION_TIMESTAMP(entry
,trace
);
1074 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1076 simple_unlock(&l
->interlock
);
1079 Debugger("timeout - wait for readers");
1080 #endif /* MACH_LDEBUG */
1081 while (--i
!= 0 && (l
->read_count
!= 0 ||
1084 simple_lock(&l
->interlock
);
1087 if (l
->can_sleep
&& (l
->read_count
!= 0 || l
->want_upgrade
)) {
1089 ETAP_SET_REASON(current_thread(),
1090 BLOCKED_ON_COMPLEX_LOCK
);
1091 thread_sleep_simple_lock((event_t
) l
,
1092 simple_lock_addr(l
->interlock
),
1094 /* interlock relocked */
1099 * do not collect wait data if either the lock
1100 * was free or no wait traces are enabled.
1103 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1104 ETAP_TIMESTAMP(stop_wait_time
);
1105 ETAP_TOTAL_TIME(total_time
,
1107 entry
->start_wait_time
);
1108 CUM_WAIT_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1117 simple_unlock(&l
->interlock
);
1120 * Set start hold time if some type of hold tracing is enabled.
1122 * Note: if the stop_wait_time was already stamped, use
1123 * it as the start_hold_time instead of doing an
1124 * expensive bus access.
1128 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1129 ETAP_COPY_START_HOLD_TIME(entry
, stop_wait_time
, trace
);
1131 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1137 register lock_t
* l
)
1139 boolean_t do_wakeup
= FALSE
;
1140 start_data_node_t entry
;
1141 unsigned short dynamic
= 0;
1142 unsigned short trace
= 0;
1143 etap_time_t stop_hold_time
;
1144 etap_time_t total_time
;
1145 unsigned long lock_kind
;
1149 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1151 simple_lock(&l
->interlock
);
1153 if (l
->read_count
!= 0) {
1155 lock_kind
= READ_LOCK
;
1158 if (l
->want_upgrade
) {
1159 l
->want_upgrade
= FALSE
;
1160 lock_kind
= WRITE_LOCK
;
1163 l
->want_write
= FALSE
;
1164 lock_kind
= WRITE_LOCK
;
1168 * There is no reason to wakeup a waiting thread
1169 * if the read-count is non-zero. Consider:
1170 * we must be dropping a read lock
1171 * threads are waiting only if one wants a write lock
1172 * if there are still readers, they can't proceed
1175 if (l
->waiting
&& (l
->read_count
== 0)) {
1180 * Collect hold data if hold tracing is
1185 * NOTE: All complex locks whose tracing was on when the
1186 * lock was acquired will have an entry in the start_data
1190 ETAP_UNLINK_ENTRY(l
,entry
);
1191 if (ETAP_DURATION_ENABLED(trace
) && entry
!= SD_ENTRY_NULL
) {
1192 ETAP_TIMESTAMP (stop_hold_time
);
1193 ETAP_TOTAL_TIME (total_time
,
1195 entry
->start_hold_time
);
1197 if (lock_kind
& WRITE_LOCK
)
1198 CUM_HOLD_ACCUMULATE (l
->cbuff_write
,
1203 CUM_READ_ENTRY_RESERVE(l
,l
->cbuff_read
,trace
);
1204 CUM_HOLD_ACCUMULATE (l
->cbuff_read
,
1209 MON_ASSIGN_PC(entry
->end_pc
,pc
,trace
);
1210 MON_DATA_COLLECT(l
,entry
,
1217 simple_unlock(&l
->interlock
);
1219 ETAP_DESTROY_ENTRY(entry
);
1222 thread_wakeup((event_t
) l
);
1227 register lock_t
* l
)
1230 start_data_node_t entry
= {0};
1231 boolean_t lock_miss
= FALSE
;
1232 unsigned short dynamic
= 0;
1233 unsigned short trace
= 0;
1234 etap_time_t total_time
;
1235 etap_time_t stop_wait_time
;
1239 #endif /* MACH_LDEBUG */
1241 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1242 ETAP_CREATE_ENTRY(entry
, trace
);
1243 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1245 simple_lock(&l
->interlock
);
1248 * Link the new start_list entry
1250 ETAP_LINK_ENTRY(l
,entry
,trace
);
1253 decrementer
= DECREMENTER_TIMEOUT
;
1254 #endif /* MACH_LDEBUG */
1255 while (l
->want_write
|| l
->want_upgrade
) {
1257 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1261 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1264 simple_unlock(&l
->interlock
);
1267 Debugger("timeout - wait no writers");
1268 #endif /* MACH_LDEBUG */
1269 while (--i
!= 0 && (l
->want_write
|| l
->want_upgrade
))
1271 simple_lock(&l
->interlock
);
1274 if (l
->can_sleep
&& (l
->want_write
|| l
->want_upgrade
)) {
1276 thread_sleep_simple_lock((event_t
) l
,
1277 simple_lock_addr(l
->interlock
),
1279 /* interlock relocked */
1286 * Do not collect wait data if the lock was free
1287 * or if no wait traces are enabled.
1290 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1291 ETAP_TIMESTAMP(stop_wait_time
);
1292 ETAP_TOTAL_TIME(total_time
,
1294 entry
->start_wait_time
);
1295 CUM_READ_ENTRY_RESERVE(l
, l
->cbuff_read
, trace
);
1296 CUM_WAIT_ACCUMULATE(l
->cbuff_read
, total_time
, dynamic
, trace
);
1304 simple_unlock(&l
->interlock
);
1307 * Set start hold time if some type of hold tracing is enabled.
1309 * Note: if the stop_wait_time was already stamped, use
1310 * it instead of doing an expensive bus access.
1314 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1315 ETAP_COPY_START_HOLD_TIME(entry
, stop_wait_time
, trace
);
1317 ETAP_DURATION_TIMESTAMP(entry
,trace
);
1322 * Routine: lock_read_to_write
1324 * Improves a read-only lock to one with
1325 * write permission. If another reader has
1326 * already requested an upgrade to a write lock,
1327 * no lock is held upon return.
1329 * Returns TRUE if the upgrade *failed*.
1334 register lock_t
* l
)
1337 boolean_t do_wakeup
= FALSE
;
1338 start_data_node_t entry
= {0};
1339 boolean_t lock_miss
= FALSE
;
1340 unsigned short dynamic
= 0;
1341 unsigned short trace
= 0;
1342 etap_time_t total_time
;
1343 etap_time_t stop_time
;
1347 #endif /* MACH_LDEBUG */
1350 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1352 simple_lock(&l
->interlock
);
1357 * Since the read lock is lost whether the write lock
1358 * is acquired or not, read hold data is collected here.
1359 * This, of course, is assuming some type of hold
1360 * tracing is enabled.
1362 * Note: trace is set to zero if the entry does not exist.
1365 ETAP_FIND_ENTRY(l
, entry
, trace
);
1367 if (ETAP_DURATION_ENABLED(trace
)) {
1368 ETAP_TIMESTAMP(stop_time
);
1369 ETAP_TOTAL_TIME(total_time
, stop_time
, entry
->start_hold_time
);
1370 CUM_HOLD_ACCUMULATE(l
->cbuff_read
, total_time
, dynamic
, trace
);
1371 MON_ASSIGN_PC(entry
->end_pc
, pc
, trace
);
1380 if (l
->want_upgrade
) {
1382 * Someone else has requested upgrade.
1383 * Since we've released a read lock, wake
1386 if (l
->waiting
&& (l
->read_count
== 0)) {
1391 ETAP_UNLINK_ENTRY(l
, entry
);
1392 simple_unlock(&l
->interlock
);
1393 ETAP_DESTROY_ENTRY(entry
);
1396 thread_wakeup((event_t
) l
);
1400 l
->want_upgrade
= TRUE
;
1402 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1405 decrementer
= DECREMENTER_TIMEOUT
;
1406 #endif /* MACH_LDEBUG */
1407 while (l
->read_count
!= 0) {
1409 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1413 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1416 simple_unlock(&l
->interlock
);
1419 Debugger("timeout - read_count");
1420 #endif /* MACH_LDEBUG */
1421 while (--i
!= 0 && l
->read_count
!= 0)
1423 simple_lock(&l
->interlock
);
1426 if (l
->can_sleep
&& l
->read_count
!= 0) {
1428 thread_sleep_simple_lock((event_t
) l
,
1429 simple_lock_addr(l
->interlock
),
1431 /* interlock relocked */
1436 * do not collect wait data if the lock was free
1437 * or if no wait traces are enabled.
1440 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1441 ETAP_TIMESTAMP (stop_time
);
1442 ETAP_TOTAL_TIME(total_time
, stop_time
, entry
->start_wait_time
);
1443 CUM_WAIT_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1452 simple_unlock(&l
->interlock
);
1455 * Set start hold time if some type of hold tracing is enabled
1457 * Note: if the stop_time was already stamped, use
1458 * it as the new start_hold_time instead of doing
1459 * an expensive VME access.
1463 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1464 ETAP_COPY_START_HOLD_TIME(entry
, stop_time
, trace
);
1466 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1473 register lock_t
* l
)
1475 boolean_t do_wakeup
= FALSE
;
1476 start_data_node_t entry
= {0};
1477 unsigned short dynamic
= 0;
1478 unsigned short trace
= 0;
1479 etap_time_t stop_hold_time
;
1480 etap_time_t total_time
;
1483 ETAP_STAMP(lock_event_table(l
), trace
,dynamic
);
1485 simple_lock(&l
->interlock
);
1488 if (l
->want_upgrade
)
1489 l
->want_upgrade
= FALSE
;
1491 l
->want_write
= FALSE
;
1499 * Since we are switching from a write lock to a read lock,
1500 * the write lock data is stored and the read lock data
1501 * collection begins.
1503 * Note: trace is set to zero if the entry does not exist.
1506 ETAP_FIND_ENTRY(l
, entry
, trace
);
1508 if (ETAP_DURATION_ENABLED(trace
)) {
1509 ETAP_TIMESTAMP (stop_hold_time
);
1510 ETAP_TOTAL_TIME(total_time
, stop_hold_time
, entry
->start_hold_time
);
1511 CUM_HOLD_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1512 MON_ASSIGN_PC(entry
->end_pc
, pc
, trace
);
1521 simple_unlock(&l
->interlock
);
1524 * Set start hold time if some type of hold tracing is enabled
1526 * Note: if the stop_hold_time was already stamped, use
1527 * it as the new start_hold_time instead of doing
1528 * an expensive bus access.
1532 if (ETAP_DURATION_ENABLED(trace
))
1533 ETAP_COPY_START_HOLD_TIME(entry
, stop_hold_time
, trace
);
1535 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1537 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1540 thread_wakeup((event_t
) l
);
1546 * Routine: lock_try_write
1548 * Tries to get a write lock.
1550 * Returns FALSE if the lock is not held on return.
1555 register lock_t
* l
)
1557 start_data_node_t entry
= {0};
1558 unsigned short trace
= 0;
1561 ETAP_STAMP(lock_event_table(l
), trace
, trace
);
1562 ETAP_CREATE_ENTRY(entry
, trace
);
1564 simple_lock(&l
->interlock
);
1566 if (l
->want_write
|| l
->want_upgrade
|| l
->read_count
) {
1570 simple_unlock(&l
->interlock
);
1571 ETAP_DESTROY_ENTRY(entry
);
1579 l
->want_write
= TRUE
;
1581 ETAP_LINK_ENTRY(l
, entry
, trace
);
1583 simple_unlock(&l
->interlock
);
1585 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1586 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1592 * Routine: lock_try_read
1594 * Tries to get a read lock.
1596 * Returns FALSE if the lock is not held on return.
1601 register lock_t
* l
)
1603 start_data_node_t entry
= {0};
1604 unsigned short trace
= 0;
1607 ETAP_STAMP(lock_event_table(l
), trace
, trace
);
1608 ETAP_CREATE_ENTRY(entry
, trace
);
1610 simple_lock(&l
->interlock
);
1612 if (l
->want_write
|| l
->want_upgrade
) {
1613 simple_unlock(&l
->interlock
);
1614 ETAP_DESTROY_ENTRY(entry
);
1620 ETAP_LINK_ENTRY(l
, entry
, trace
);
1622 simple_unlock(&l
->interlock
);
1624 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1625 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1633 void db_show_one_lock(lock_t
*);
1640 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1642 lock
->want_upgrade
? "" : "!",
1643 lock
->want_write
? "" : "!");
1644 db_printf("%swaiting, %scan_sleep\n",
1645 lock
->waiting
? "" : "!", lock
->can_sleep
? "" : "!");
1646 db_printf("Interlock:\n");
1647 db_show_one_simple_lock((db_expr_t
)simple_lock_addr(lock
->interlock
),
1648 TRUE
, (db_expr_t
)0, (char *)0);
1650 #endif /* MACH_KDB */
1653 * The C portion of the mutex package. These routines are only invoked
1654 * if the optimized assembler routines can't do the work.
1658 * Routine: lock_alloc
1660 * Allocate a mutex for external users who cannot
1661 * hard-code the structure definition into their
1663 * For now just use kalloc, but a zone is probably
1672 if ((m
= (mutex_t
*)kalloc(sizeof(mutex_t
))) != 0)
1673 mutex_init(m
, event
);
1678 * Routine: mutex_free
1680 * Free a mutex allocated for external users.
1681 * For now just use kfree, but a zone is probably
1688 kfree((vm_offset_t
)m
, sizeof(mutex_t
));
1694 * Invoked in order to wait on contention.
1696 * Called with the interlock locked and
1697 * returns it unlocked.
1702 thread_act_t holder
)
1704 thread_t thread
, self
= current_thread();
1707 spl_t s
= splsched();
1709 priority
= self
->last_processor
->current_pri
;
1710 if (priority
< self
->priority
)
1711 priority
= self
->priority
;
1712 if (priority
> MINPRI_KERNEL
)
1713 priority
= MINPRI_KERNEL
;
1715 if (priority
< BASEPRI_DEFAULT
)
1716 priority
= BASEPRI_DEFAULT
;
1718 thread
= holder
->thread
;
1719 assert(thread
->top_act
== holder
); /* XXX */
1720 thread_lock(thread
);
1721 if (mutex
->promoted_pri
== 0)
1722 thread
->promotions
++;
1723 if (thread
->priority
< MINPRI_KERNEL
) {
1724 thread
->sched_mode
|= TH_MODE_PROMOTED
;
1725 if ( mutex
->promoted_pri
< priority
&&
1726 thread
->sched_pri
< priority
) {
1727 KERNEL_DEBUG_CONSTANT(
1728 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
,
1729 thread
->sched_pri
, priority
, (int)thread
, (int)mutex
, 0);
1731 set_sched_pri(thread
, priority
);
1734 thread_unlock(thread
);
1737 if (mutex
->promoted_pri
< priority
)
1738 mutex
->promoted_pri
= priority
;
1741 if (self
->pending_promoter
[self
->pending_promoter_index
] == NULL
) {
1742 self
->pending_promoter
[self
->pending_promoter_index
] = mutex
;
1746 if (self
->pending_promoter
[self
->pending_promoter_index
] != mutex
) {
1747 self
->pending_promoter
[++self
->pending_promoter_index
] = mutex
;
1751 assert_wait(mutex
, THREAD_UNINT
);
1752 interlock_unlock(&mutex
->interlock
);
1754 thread_block(THREAD_CONTINUE_NULL
);
1758 * mutex_lock_acquire
1760 * Invoked on acquiring the mutex when there is
1763 * Returns the current number of waiters.
1765 * Called with the interlock locked.
1771 thread_t thread
= current_thread();
1773 if (thread
->pending_promoter
[thread
->pending_promoter_index
] == mutex
) {
1774 thread
->pending_promoter
[thread
->pending_promoter_index
] = NULL
;
1775 if (thread
->pending_promoter_index
> 0)
1776 thread
->pending_promoter_index
--;
1781 if (mutex
->waiters
> 0) {
1782 integer_t priority
= mutex
->promoted_pri
;
1783 spl_t s
= splsched();
1785 thread_lock(thread
);
1786 thread
->promotions
++;
1787 if (thread
->priority
< MINPRI_KERNEL
) {
1788 thread
->sched_mode
|= TH_MODE_PROMOTED
;
1789 if (thread
->sched_pri
< priority
) {
1790 KERNEL_DEBUG_CONSTANT(
1791 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_PROMOTE
) | DBG_FUNC_NONE
,
1792 thread
->sched_pri
, priority
, 0, (int)mutex
, 0);
1794 set_sched_pri(thread
, priority
);
1797 thread_unlock(thread
);
1801 mutex
->promoted_pri
= 0;
1804 return (mutex
->waiters
);
1808 * mutex_unlock_wakeup
1810 * Invoked on unlock when there is contention.
1812 * Called with the interlock locked.
1815 mutex_unlock_wakeup (
1817 thread_act_t holder
)
1820 thread_t thread
= current_thread();
1822 if (thread
->top_act
!= holder
)
1823 panic("mutex_unlock_wakeup: mutex %x holder %x\n", mutex
, holder
);
1825 if (thread
->promotions
> 0) {
1826 spl_t s
= splsched();
1828 thread_lock(thread
);
1829 if ( --thread
->promotions
== 0 &&
1830 (thread
->sched_mode
& TH_MODE_PROMOTED
) ) {
1831 thread
->sched_mode
&= ~TH_MODE_PROMOTED
;
1832 if (thread
->sched_mode
& TH_MODE_ISDEPRESSED
) {
1833 KERNEL_DEBUG_CONSTANT(
1834 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) | DBG_FUNC_NONE
,
1835 thread
->sched_pri
, DEPRESSPRI
, 0, (int)mutex
, 0);
1837 set_sched_pri(thread
, DEPRESSPRI
);
1840 if (thread
->priority
< thread
->sched_pri
) {
1841 KERNEL_DEBUG_CONSTANT(
1842 MACHDBG_CODE(DBG_MACH_SCHED
,MACH_DEMOTE
) |
1844 thread
->sched_pri
, thread
->priority
,
1848 compute_priority(thread
, FALSE
);
1851 thread_unlock(thread
);
1856 assert(mutex
->waiters
> 0);
1857 thread_wakeup_one(mutex
);
1861 * mutex_pause: Called by former callers of simple_lock_pause().
1867 wait_result_t wait_result
;
1869 wait_result
= assert_wait_timeout( 1, THREAD_UNINT
);
1870 assert(wait_result
== THREAD_WAITING
);
1872 ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK
);
1874 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1875 assert(wait_result
== THREAD_TIMED_OUT
);
1880 * Routines to print out simple_locks and mutexes in a nicely-formatted
1884 char *simple_lock_labels
= "ENTRY ILK THREAD DURATION CALLER";
1885 char *mutex_labels
= "ENTRY LOCKED WAITERS THREAD CALLER";
1888 db_show_one_simple_lock (
1890 boolean_t have_addr
,
1894 simple_lock_t saddr
= (simple_lock_t
)addr
;
1896 if (saddr
== (simple_lock_t
)0 || !have_addr
) {
1897 db_error ("No simple_lock\n");
1900 else if (saddr
->lock_type
!= USLOCK_TAG
)
1901 db_error ("Not a simple_lock\n");
1902 #endif /* USLOCK_DEBUG */
1904 db_printf ("%s\n", simple_lock_labels
);
1905 db_print_simple_lock (saddr
);
1909 db_print_simple_lock (
1913 db_printf ("%08x %3d", addr
, *hw_lock_addr(addr
->interlock
));
1915 db_printf (" %08x", addr
->debug
.lock_thread
);
1916 db_printf (" %08x ", addr
->debug
.duration
[1]);
1917 db_printsym ((int)addr
->debug
.lock_pc
, DB_STGY_ANY
);
1918 #endif /* USLOCK_DEBUG */
1925 boolean_t have_addr
,
1929 mutex_t
* maddr
= (mutex_t
*)addr
;
1931 if (maddr
== (mutex_t
*)0 || !have_addr
)
1932 db_error ("No mutex\n");
1934 else if (maddr
->type
!= MUTEX_TAG
)
1935 db_error ("Not a mutex\n");
1936 #endif /* MACH_LDEBUG */
1938 db_printf ("%s\n", mutex_labels
);
1939 db_print_mutex (maddr
);
1946 db_printf ("%08x %6d %7d",
1947 addr
, *hw_lock_addr(addr
->locked
), addr
->waiters
);
1949 db_printf (" %08x ", addr
->thread
);
1950 db_printsym (addr
->pc
, DB_STGY_ANY
);
1951 #endif /* MACH_LDEBUG */
1954 #endif /* MACH_KDB */
1957 extern void meter_simple_lock (
1959 extern void meter_simple_unlock (
1961 extern void cyctm05_stamp (
1962 unsigned long * start
);
1963 extern void cyctm05_diff (
1964 unsigned long * start
,
1965 unsigned long * end
,
1966 unsigned long * diff
);
1969 simple_lock_data_t loser
;
1977 cyctm05_stamp (lp
->duration
);
1981 int long_simple_lock_crash
;
1982 int long_simple_lock_time
= 0x600;
1984 * This is pretty gawd-awful. XXX
1986 decl_simple_lock_data(extern,kd_tty
)
1989 meter_simple_unlock(
1993 unsigned long stime
[2], etime
[2], delta
[2];
1995 if (lp
== &kd_tty
) /* XXX */
1998 stime
[0] = lp
->duration
[0];
1999 stime
[1] = lp
->duration
[1];
2001 cyctm05_stamp (etime
);
2003 if (etime
[1] < stime
[1]) /* XXX */
2006 cyctm05_diff (stime
, etime
, delta
);
2008 if (delta
[1] >= 0x10000) /* XXX */
2011 lp
->duration
[0] = delta
[0];
2012 lp
->duration
[1] = delta
[1];
2014 if (loser
.duration
[1] < lp
->duration
[1])
2017 assert (!long_simple_lock_crash
|| delta
[1] < long_simple_lock_time
);
2020 #endif /* MACH_LDEBUG */
2026 * ==============================================================
2027 * ETAP hook when initializing a usimple_lock. May be invoked
2028 * from the portable lock package or from an optimized machine-
2029 * dependent implementation.
2030 * ==============================================================
2034 etap_simplelock_init (
2038 ETAP_CLEAR_TRACE_DATA(l
);
2039 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
2041 #if ETAP_LOCK_ACCUMULATE
2042 /* reserve an entry in the cumulative buffer */
2043 l
->cbuff_entry
= etap_cbuff_reserve(lock_event_table(l
));
2044 /* initialize the entry if one was returned */
2045 if (l
->cbuff_entry
!= CBUFF_ENTRY_NULL
) {
2046 l
->cbuff_entry
->event
= event
;
2047 l
->cbuff_entry
->instance
= (unsigned long) l
;
2048 l
->cbuff_entry
->kind
= SPIN_LOCK
;
2050 #endif /* ETAP_LOCK_ACCUMULATE */
2055 etap_simplelock_unlock(
2058 unsigned short dynamic
= 0;
2059 unsigned short trace
= 0;
2060 etap_time_t total_time
;
2061 etap_time_t stop_hold_time
;
2065 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2068 * Calculate & collect hold time data only if
2069 * the hold tracing was enabled throughout the
2070 * whole operation. This prevents collection of
2071 * bogus data caused by mid-operation trace changes.
2075 if (ETAP_DURATION_ENABLED(trace
) && ETAP_WHOLE_OP(l
)) {
2076 ETAP_TIMESTAMP (stop_hold_time
);
2077 ETAP_TOTAL_TIME(total_time
, stop_hold_time
,
2078 l
->u
.s
.start_hold_time
);
2079 CUM_HOLD_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2080 MON_ASSIGN_PC(l
->end_pc
, pc
, trace
);
2088 ETAP_CLEAR_TRACE_DATA(l
);
2091 /* ========================================================================
2092 * Since the the simple_lock() routine is machine dependant, it must always
2093 * be coded in assembly. The two hook routines below are used to collect
2095 * ========================================================================
2099 * ROUTINE: etap_simplelock_miss()
2101 * FUNCTION: This spin lock routine is called upon the first
2102 * spin (miss) of the lock.
2104 * A timestamp is taken at the beginning of the wait period,
2105 * if wait tracing is enabled.
2110 * - timestamp address.
2112 * RETURNS: Wait timestamp value. The timestamp value is later used
2113 * by etap_simplelock_hold().
2115 * NOTES: This routine is NOT ALWAYS called. The lock may be free
2116 * (never spinning). For this reason the pc is collected in
2117 * etap_simplelock_hold().
2121 etap_simplelock_miss (
2125 unsigned short trace
= 0;
2126 unsigned short dynamic
= 0;
2127 etap_time_t start_miss_time
;
2129 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2131 if (trace
& ETAP_CONTENTION
)
2132 ETAP_TIMESTAMP(start_miss_time
);
2134 return(start_miss_time
);
2138 * ROUTINE: etap_simplelock_hold()
2140 * FUNCTION: This spin lock routine is ALWAYS called once the lock
2141 * is acquired. Here, the contention time is calculated and
2142 * the start hold time is stamped.
2146 * - PC of the calling function.
2147 * - start wait timestamp.
2152 etap_simplelock_hold (
2155 etap_time_t start_hold_time
)
2157 unsigned short dynamic
= 0;
2158 unsigned short trace
= 0;
2159 etap_time_t total_time
;
2160 etap_time_t stop_hold_time
;
2162 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2164 MON_ASSIGN_PC(l
->start_pc
, pc
, trace
);
2166 /* do not collect wait data if lock was free */
2167 if (ETAP_TIME_IS_ZERO(start_hold_time
) && (trace
& ETAP_CONTENTION
)) {
2168 ETAP_TIMESTAMP(stop_hold_time
);
2169 ETAP_TOTAL_TIME(total_time
,
2172 CUM_WAIT_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2179 ETAP_COPY_START_HOLD_TIME(&l
->u
.s
, stop_hold_time
, trace
);
2182 ETAP_DURATION_TIMESTAMP(&l
->u
.s
, trace
);
2190 ETAP_CLEAR_TRACE_DATA(l
);
2191 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
2193 #if ETAP_LOCK_ACCUMULATE
2194 /* reserve an entry in the cumulative buffer */
2195 l
->cbuff_entry
= etap_cbuff_reserve(lock_event_table(l
));
2196 /* initialize the entry if one was returned */
2197 if (l
->cbuff_entry
!= CBUFF_ENTRY_NULL
) {
2198 l
->cbuff_entry
->event
= event
;
2199 l
->cbuff_entry
->instance
= (unsigned long) l
;
2200 l
->cbuff_entry
->kind
= MUTEX_LOCK
;
2202 #endif /* ETAP_LOCK_ACCUMULATE */
2209 unsigned short trace
= 0;
2210 unsigned short dynamic
= 0;
2211 etap_time_t start_miss_time
;
2213 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2215 if (trace
& ETAP_CONTENTION
)
2216 ETAP_TIMESTAMP(start_miss_time
);
2218 ETAP_TIME_CLEAR(start_miss_time
);
2220 return(start_miss_time
);
2227 etap_time_t start_hold_time
)
2229 unsigned short dynamic
= 0;
2230 unsigned short trace
= 0;
2231 etap_time_t total_time
;
2232 etap_time_t stop_hold_time
;
2234 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2236 MON_ASSIGN_PC(l
->start_pc
, pc
, trace
);
2238 /* do not collect wait data if lock was free */
2239 if (!ETAP_TIME_IS_ZERO(start_hold_time
) && (trace
& ETAP_CONTENTION
)) {
2240 ETAP_TIMESTAMP(stop_hold_time
);
2241 ETAP_TOTAL_TIME(total_time
,
2244 CUM_WAIT_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2251 ETAP_COPY_START_HOLD_TIME(&l
->u
.s
, stop_hold_time
, trace
);
2254 ETAP_DURATION_TIMESTAMP(&l
->u
.s
, trace
);
2261 unsigned short dynamic
= 0;
2262 unsigned short trace
= 0;
2263 etap_time_t total_time
;
2264 etap_time_t stop_hold_time
;
2268 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2271 * Calculate & collect hold time data only if
2272 * the hold tracing was enabled throughout the
2273 * whole operation. This prevents collection of
2274 * bogus data caused by mid-operation trace changes.
2278 if (ETAP_DURATION_ENABLED(trace
) && ETAP_WHOLE_OP(l
)) {
2279 ETAP_TIMESTAMP(stop_hold_time
);
2280 ETAP_TOTAL_TIME(total_time
, stop_hold_time
,
2281 l
->u
.s
.start_hold_time
);
2282 CUM_HOLD_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2283 MON_ASSIGN_PC(l
->end_pc
, pc
, trace
);
2291 ETAP_CLEAR_TRACE_DATA(l
);
2294 #endif /* ETAP_LOCK_TRACE */