2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Locking primitives implementation
60 #include <mach_ldebug.h>
62 #include <kern/lock.h>
63 #include <kern/etap_macros.h>
64 #include <kern/misc_protos.h>
65 #include <kern/thread.h>
66 #include <kern/sched_prim.h>
68 #include <kern/debug.h>
72 #include <ddb/db_command.h>
73 #include <ddb/db_output.h>
74 #include <ddb/db_sym.h>
75 #include <ddb/db_print.h>
79 #include <ppc/Firmware.h>
80 #include <ppc/POWERMAC/mp/MPPlugIn.h>
83 #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
86 * Some portions of the lock debugging code must run with
87 * interrupts disabled. This can be machine-dependent,
88 * but we don't have any good hooks for that at the moment.
89 * If your architecture is different, add a machine-dependent
90 * ifdef here for these macros. XXX
93 #define DISABLE_INTERRUPTS(s) s = ml_set_interrupts_enabled(FALSE)
94 #define ENABLE_INTERRUPTS(s) (void)ml_set_interrupts_enabled(s)
97 /* Time we loop without holding the interlock.
98 * The former is for when we cannot sleep, the latter
99 * for when our thread can go to sleep (loop less)
100 * we shouldn't retake the interlock at all frequently
101 * if we cannot go to sleep, since it interferes with
102 * any other processors. In particular, 100 is too small
103 * a number for powerpc MP systems because of cache
104 * coherency issues and differing lock fetch times between
107 unsigned int lock_wait_time
[2] = { (unsigned int)-1, 100 } ;
108 #else /* NCPUS > 1 */
111 * It is silly to spin on a uni-processor as if we
112 * thought something magical would happen to the
113 * want_write bit while we are executing.
116 unsigned int lock_wait_time
[2] = { 0, 0 };
117 #endif /* NCPUS > 1 */
122 void db_print_simple_lock(
127 #endif /* MACH_KDB */
132 * Perform simple lock checks.
134 int uslock_check
= 1;
135 int max_lock_loops
= 100000000;
136 decl_simple_lock_data(extern , printf_lock
)
137 decl_simple_lock_data(extern , panic_lock
)
138 #if MACH_KDB && NCPUS > 1
139 decl_simple_lock_data(extern , kdb_lock
)
140 #endif /* MACH_KDB && NCPUS >1 */
141 #endif /* USLOCK_DEBUG */
145 * We often want to know the addresses of the callers
146 * of the various lock routines. However, this information
147 * is only used for debugging and statistics.
150 #define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
151 #define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
152 #if ANY_LOCK_DEBUG || ETAP_LOCK_TRACE
153 #define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
154 #else /* ANY_LOCK_DEBUG || ETAP_LOCK_TRACE */
157 * Eliminate lint complaints about unused local pc variables.
159 #define OBTAIN_PC(pc,l) ++pc
161 #define OBTAIN_PC(pc,l)
163 #endif /* USLOCK_DEBUG || ETAP_LOCK_TRACE */
166 /* #ifndef USIMPLE_LOCK_CALLS
167 * The i386 production version of usimple_locks isn't ready yet.
170 * Portable lock package implementation of usimple_locks.
174 #define ETAPCALL(stmt) stmt
175 void etap_simplelock_init(simple_lock_t
, etap_event_t
);
176 void etap_simplelock_unlock(simple_lock_t
);
177 void etap_simplelock_hold(simple_lock_t
, pc_t
, etap_time_t
);
178 etap_time_t
etap_simplelock_miss(simple_lock_t
);
180 void etap_mutex_init(mutex_t
*, etap_event_t
);
181 void etap_mutex_unlock(mutex_t
*);
182 void etap_mutex_hold(mutex_t
*, pc_t
, etap_time_t
);
183 etap_time_t
etap_mutex_miss(mutex_t
*);
184 #else /* ETAP_LOCK_TRACE */
185 #define ETAPCALL(stmt)
186 #endif /* ETAP_LOCK_TRACE */
189 #define USLDBG(stmt) stmt
190 void usld_lock_init(usimple_lock_t
, etap_event_t
);
191 void usld_lock_pre(usimple_lock_t
, pc_t
);
192 void usld_lock_post(usimple_lock_t
, pc_t
);
193 void usld_unlock(usimple_lock_t
, pc_t
);
194 void usld_lock_try_pre(usimple_lock_t
, pc_t
);
195 void usld_lock_try_post(usimple_lock_t
, pc_t
);
196 void usld_lock_held(usimple_lock_t
);
197 void usld_lock_none_held(void);
198 int usld_lock_common_checks(usimple_lock_t
, char *);
199 #else /* USLOCK_DEBUG */
201 #endif /* USLOCK_DEBUG */
204 * Initialize a usimple_lock.
206 * No change in preemption state.
213 USLDBG(usld_lock_init(l
, event
));
214 ETAPCALL(etap_simplelock_init((l
),(event
)));
215 hw_lock_init(&l
->interlock
);
220 * Acquire a usimple_lock.
222 * Returns with preemption disabled. Note
223 * that the hw_lock routines are responsible for
224 * maintaining preemption state.
233 etap_time_t start_wait_time
;
234 int no_miss_info
= 0;
235 #endif /* ETAP_LOCK_TRACE */
238 #endif /* USLOCK_DEBUG */
241 USLDBG(usld_lock_pre(l
, pc
));
243 ETAP_TIME_CLEAR(start_wait_time
);
244 #endif /* ETAP_LOCK_TRACE */
247 if(!hw_lock_to(&l
->interlock
, LockTimeOut
)) { /* Try to get the lock with a timeout */
249 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l
, cpu_number(), pc
);
252 while (!hw_lock_try(&l
->interlock
)) {
253 ETAPCALL(if (no_miss_info
++ == 0)
254 start_wait_time
= etap_simplelock_miss(l
));
255 while (hw_lock_held(&l
->interlock
)) {
257 * Spin watching the lock value in cache,
258 * without consuming external bus cycles.
259 * On most SMP architectures, the atomic
260 * instruction(s) used by hw_lock_try
261 * cost much, much more than an ordinary
265 if (count
++ > max_lock_loops
266 #if MACH_KDB && NCPUS > 1
268 #endif /* MACH_KDB && NCPUS > 1 */
270 if (l
== &printf_lock
) {
273 mp_disable_preemption();
274 panic("simple lock deadlock detection - l=%08X (=%08X), cpu=%d, ret=%08X",
275 l
, *hw_lock_addr(l
->interlock
), cpu_number(), pc
);
277 mp_enable_preemption();
279 #endif /* USLOCK_DEBUG */
283 ETAPCALL(etap_simplelock_hold(l
, pc
, start_wait_time
));
284 USLDBG(usld_lock_post(l
, pc
));
289 * Release a usimple_lock.
291 * Returns with preemption enabled. Note
292 * that the hw_lock routines are responsible for
293 * maintaining preemption state.
301 // checkNMI(); /* (TEST/DEBUG) */
304 USLDBG(usld_unlock(l
, pc
));
305 ETAPCALL(etap_simplelock_unlock(l
));
306 hw_lock_unlock(&l
->interlock
);
311 * Conditionally acquire a usimple_lock.
313 * On success, returns with preemption disabled.
314 * On failure, returns with preemption in the same state
315 * as when first invoked. Note that the hw_lock routines
316 * are responsible for maintaining preemption state.
318 * XXX No stats are gathered on a miss; I preserved this
319 * behavior from the original assembly-language code, but
320 * doesn't it make sense to log misses? XXX
327 unsigned int success
;
328 etap_time_t zero_time
;
331 USLDBG(usld_lock_try_pre(l
, pc
));
332 if (success
= hw_lock_try(&l
->interlock
)) {
333 USLDBG(usld_lock_try_post(l
, pc
));
334 ETAP_TIME_CLEAR(zero_time
);
335 ETAPCALL(etap_simplelock_hold(l
, pc
, zero_time
));
342 simple_lock_no_trace(
348 USLDBG(usld_lock_pre(l
, pc
));
349 while (!hw_lock_try(&l
->interlock
)) {
350 while (hw_lock_held(&l
->interlock
)) {
352 * Spin watching the lock value in cache,
353 * without consuming external bus cycles.
354 * On most SMP architectures, the atomic
355 * instruction(s) used by hw_lock_try
356 * cost much, much more than an ordinary
361 USLDBG(usld_lock_post(l
, pc
));
365 simple_unlock_no_trace(
371 USLDBG(usld_unlock(l
, pc
));
372 hw_lock_unlock(&l
->interlock
);
376 simple_lock_try_no_trace(
380 unsigned int success
;
383 USLDBG(usld_lock_try_pre(l
, pc
));
384 if (success
= hw_lock_try(&l
->interlock
)) {
385 USLDBG(usld_lock_try_post(l
, pc
));
389 #endif /* ETAP_LOCK_TRACE */
394 * Verify that the lock is locked and owned by
395 * the current thread.
406 * Verify that no usimple_locks are held by
407 * this processor. Typically used in a
408 * trap handler when returning to user mode
409 * or in a path known to relinquish the processor.
412 usimple_lock_none_held(void)
414 usld_lock_none_held();
416 #endif /* USLOCK_DEBUG */
421 * States of a usimple_lock. The default when initializing
422 * a usimple_lock is setting it up for debug checking.
424 #define USLOCK_CHECKED 0x0001 /* lock is being checked */
425 #define USLOCK_TAKEN 0x0002 /* lock has been taken */
426 #define USLOCK_INIT 0xBAA0 /* lock has been initialized */
427 #define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
428 #define USLOCK_CHECKING(l) (uslock_check && \
429 ((l)->debug.state & USLOCK_CHECKED))
432 * Maintain a per-cpu stack of acquired usimple_locks.
434 void usl_stack_push(usimple_lock_t
, int);
435 void usl_stack_pop(usimple_lock_t
, int);
438 * Trace activities of a particularly interesting lock.
440 void usl_trace(usimple_lock_t
, int, pc_t
, const char *);
444 * Initialize the debugging information contained
452 if (l
== USIMPLE_LOCK_NULL
)
453 panic("lock initialization: null lock pointer");
454 l
->lock_type
= USLOCK_TAG
;
455 l
->debug
.state
= uslock_check
? USLOCK_INITIALIZED
: 0;
456 l
->debug
.lock_cpu
= l
->debug
.unlock_cpu
= 0;
457 l
->debug
.lock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
458 l
->debug
.lock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
459 l
->debug
.duration
[0] = l
->debug
.duration
[1] = 0;
460 l
->debug
.unlock_cpu
= l
->debug
.unlock_cpu
= 0;
461 l
->debug
.unlock_pc
= l
->debug
.unlock_pc
= INVALID_PC
;
462 l
->debug
.unlock_thread
= l
->debug
.unlock_thread
= INVALID_THREAD
;
467 * These checks apply to all usimple_locks, not just
468 * those with USLOCK_CHECKED turned on.
471 usld_lock_common_checks(
475 if (l
== USIMPLE_LOCK_NULL
)
476 panic("%s: null lock pointer", caller
);
477 if (l
->lock_type
!= USLOCK_TAG
)
478 panic("%s: 0x%x is not a usimple lock", caller
, (integer_t
) l
);
479 if (!(l
->debug
.state
& USLOCK_INIT
))
480 panic("%s: 0x%x is not an initialized lock",
481 caller
, (integer_t
) l
);
482 return USLOCK_CHECKING(l
);
487 * Debug checks on a usimple_lock just before attempting
496 char *caller
= "usimple_lock";
500 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
502 l
->debug
.lock_thread
,
505 l
->debug
.unlock_thread
,
511 if (!usld_lock_common_checks(l
, caller
))
515 * Note that we have a weird case where we are getting a lock when we are]
516 * in the process of putting the system to sleep. We are running with no
517 * current threads, therefore we can't tell if we are trying to retake a lock
518 * we have or someone on the other processor has it. Therefore we just
519 * ignore this test if the locking thread is 0.
522 if ((l
->debug
.state
& USLOCK_TAKEN
) && l
->debug
.lock_thread
&&
523 l
->debug
.lock_thread
== (void *) current_thread()) {
524 printf("%s: lock 0x%x already locked (at 0x%x) by",
525 caller
, (integer_t
) l
, l
->debug
.lock_pc
);
526 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
527 l
->debug
.lock_thread
, pc
);
530 mp_disable_preemption();
531 usl_trace(l
, cpu_number(), pc
, caller
);
532 mp_enable_preemption();
537 * Debug checks on a usimple_lock just after acquiring it.
539 * Pre-emption has been disabled at this point,
540 * so we are safe in using cpu_number.
548 char *caller
= "successful usimple_lock";
552 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
554 l
->debug
.lock_thread
,
557 l
->debug
.unlock_thread
,
563 if (!usld_lock_common_checks(l
, caller
))
566 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
567 panic("%s: lock 0x%x became uninitialized",
568 caller
, (integer_t
) l
);
569 if ((l
->debug
.state
& USLOCK_TAKEN
))
570 panic("%s: lock 0x%x became TAKEN by someone else",
571 caller
, (integer_t
) l
);
573 mycpu
= cpu_number();
574 l
->debug
.lock_thread
= (void *)current_thread();
575 l
->debug
.state
|= USLOCK_TAKEN
;
576 l
->debug
.lock_pc
= pc
;
577 l
->debug
.lock_cpu
= mycpu
;
579 usl_stack_push(l
, mycpu
);
580 usl_trace(l
, mycpu
, pc
, caller
);
585 * Debug checks on a usimple_lock just before
586 * releasing it. Note that the caller has not
587 * yet released the hardware lock.
589 * Preemption is still disabled, so there's
590 * no problem using cpu_number.
598 char *caller
= "usimple_unlock";
602 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
604 l
->debug
.lock_thread
,
607 l
->debug
.unlock_thread
,
613 if (!usld_lock_common_checks(l
, caller
))
616 mycpu
= cpu_number();
618 if (!(l
->debug
.state
& USLOCK_TAKEN
))
619 panic("%s: lock 0x%x hasn't been taken",
620 caller
, (integer_t
) l
);
621 if (l
->debug
.lock_thread
!= (void *) current_thread())
622 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
623 caller
, (integer_t
) l
, l
->debug
.lock_thread
);
624 if (l
->debug
.lock_cpu
!= mycpu
) {
625 printf("%s: unlocking lock 0x%x on cpu 0x%x",
626 caller
, (integer_t
) l
, mycpu
);
627 printf(" (acquired on cpu 0x%x)\n", l
->debug
.lock_cpu
);
630 usl_trace(l
, mycpu
, pc
, caller
);
631 usl_stack_pop(l
, mycpu
);
633 l
->debug
.unlock_thread
= l
->debug
.lock_thread
;
634 l
->debug
.lock_thread
= INVALID_PC
;
635 l
->debug
.state
&= ~USLOCK_TAKEN
;
636 l
->debug
.unlock_pc
= pc
;
637 l
->debug
.unlock_cpu
= mycpu
;
642 * Debug checks on a usimple_lock just before
643 * attempting to acquire it.
645 * Preemption isn't guaranteed to be disabled.
652 char *caller
= "usimple_lock_try";
654 if (!usld_lock_common_checks(l
, caller
))
656 mp_disable_preemption();
657 usl_trace(l
, cpu_number(), pc
, caller
);
658 mp_enable_preemption();
663 * Debug checks on a usimple_lock just after
664 * successfully attempting to acquire it.
666 * Preemption has been disabled by the
667 * lock acquisition attempt, so it's safe
676 char *caller
= "successful usimple_lock_try";
678 if (!usld_lock_common_checks(l
, caller
))
681 if (!((l
->debug
.state
& ~USLOCK_TAKEN
) == USLOCK_INITIALIZED
))
682 panic("%s: lock 0x%x became uninitialized",
683 caller
, (integer_t
) l
);
684 if ((l
->debug
.state
& USLOCK_TAKEN
))
685 panic("%s: lock 0x%x became TAKEN by someone else",
686 caller
, (integer_t
) l
);
688 mycpu
= cpu_number();
689 l
->debug
.lock_thread
= (void *) current_thread();
690 l
->debug
.state
|= USLOCK_TAKEN
;
691 l
->debug
.lock_pc
= pc
;
692 l
->debug
.lock_cpu
= mycpu
;
695 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
697 l
->debug
.lock_thread
,
700 l
->debug
.unlock_thread
,
706 usl_stack_push(l
, mycpu
);
707 usl_trace(l
, mycpu
, pc
, caller
);
712 * Determine whether the lock in question is owned
713 * by the current thread.
719 char *caller
= "usimple_lock_held";
723 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
725 l
->debug
.lock_thread
,
728 l
->debug
.unlock_thread
,
734 if (!usld_lock_common_checks(l
, caller
))
737 if (!(l
->debug
.state
& USLOCK_TAKEN
))
738 panic("%s: lock 0x%x hasn't been taken",
739 caller
, (integer_t
) l
);
740 if (l
->debug
.lock_thread
!= (void *) current_thread())
741 panic("%s: lock 0x%x is owned by thread 0x%x", caller
,
742 (integer_t
) l
, (integer_t
) l
->debug
.lock_thread
);
745 * The usimple_lock is active, so preemption
746 * is disabled and the current cpu should
747 * match the one recorded at lock acquisition time.
749 if (l
->debug
.lock_cpu
!= cpu_number())
750 panic("%s: current cpu 0x%x isn't acquiring cpu 0x%x",
751 caller
, cpu_number(), (integer_t
) l
->debug
.lock_cpu
);
756 * Per-cpu stack of currently active usimple_locks.
757 * Requires spl protection so that interrupt-level
758 * locks plug-n-play with their thread-context friends.
760 #define USLOCK_STACK_DEPTH 20
761 usimple_lock_t uslock_stack
[NCPUS
][USLOCK_STACK_DEPTH
];
762 unsigned int uslock_stack_index
[NCPUS
];
763 boolean_t uslock_stack_enabled
= FALSE
;
767 * Record a usimple_lock just acquired on
768 * the current processor.
770 * Preemption has been disabled by lock
771 * acquisition, so it's safe to use the cpu number
772 * specified by the caller.
781 if (uslock_stack_enabled
== FALSE
)
784 DISABLE_INTERRUPTS(s
);
785 assert(uslock_stack_index
[mycpu
] >= 0);
786 assert(uslock_stack_index
[mycpu
] < USLOCK_STACK_DEPTH
);
787 if (uslock_stack_index
[mycpu
] >= USLOCK_STACK_DEPTH
) {
788 printf("usl_stack_push (cpu 0x%x): too many locks (%d)",
789 mycpu
, uslock_stack_index
[mycpu
]);
790 printf(" disabling stacks\n");
791 uslock_stack_enabled
= FALSE
;
792 ENABLE_INTERRUPTS(s
);
795 uslock_stack
[mycpu
][uslock_stack_index
[mycpu
]] = l
;
796 uslock_stack_index
[mycpu
]++;
797 ENABLE_INTERRUPTS(s
);
802 * Eliminate the entry for a usimple_lock
803 * that had been active on the current processor.
805 * Preemption has been disabled by lock
806 * acquisition, and we haven't yet actually
807 * released the hardware lock associated with
808 * this usimple_lock, so it's safe to use the
809 * cpu number supplied by the caller.
816 unsigned int i
, index
;
819 if (uslock_stack_enabled
== FALSE
)
822 DISABLE_INTERRUPTS(s
);
823 assert(uslock_stack_index
[mycpu
] > 0);
824 assert(uslock_stack_index
[mycpu
] <= USLOCK_STACK_DEPTH
);
825 if (uslock_stack_index
[mycpu
] == 0) {
826 printf("usl_stack_pop (cpu 0x%x): not enough locks (%d)",
827 mycpu
, uslock_stack_index
[mycpu
]);
828 printf(" disabling stacks\n");
829 uslock_stack_enabled
= FALSE
;
830 ENABLE_INTERRUPTS(s
);
833 index
= --uslock_stack_index
[mycpu
];
834 for (i
= 0; i
<= index
; ++i
) {
835 if (uslock_stack
[mycpu
][i
] == l
) {
837 uslock_stack
[mycpu
][i
] =
838 uslock_stack
[mycpu
][index
];
839 ENABLE_INTERRUPTS(s
);
843 ENABLE_INTERRUPTS(s
);
844 panic("usl_stack_pop: can't find usimple_lock 0x%x", l
);
849 * Determine whether any usimple_locks are currently held.
851 * Caller's preemption state is uncertain. If
852 * preemption has been disabled, this check is accurate.
853 * Otherwise, this check is just a guess. We do the best
854 * we can by disabling scheduler interrupts, so at least
855 * the check is accurate w.r.t. whatever cpu we're running
856 * on while in this routine.
859 usld_lock_none_held()
863 unsigned int locks_held
;
864 char *caller
= "usimple_lock_none_held";
866 DISABLE_INTERRUPTS(s
);
867 mp_disable_preemption();
868 mycpu
= cpu_number();
869 locks_held
= uslock_stack_index
[mycpu
];
870 mp_enable_preemption();
871 ENABLE_INTERRUPTS(s
);
873 panic("%s: no locks should be held (0x%x locks held)",
874 caller
, (integer_t
) locks_held
);
879 * For very special cases, set traced_lock to point to a
880 * specific lock of interest. The result is a series of
881 * XPRs showing lock operations on that lock. The lock_seq
882 * value is used to show the order of those operations.
884 usimple_lock_t traced_lock
;
885 unsigned int lock_seq
;
892 const char * op_name
)
894 if (traced_lock
== l
) {
896 "seq %d, cpu %d, %s @ %x\n",
897 (integer_t
) lock_seq
, (integer_t
) mycpu
,
898 (integer_t
) op_name
, (integer_t
) pc
, 0);
906 #define printf kdbprintf
907 void db_show_all_slocks(void);
909 db_show_all_slocks(void)
911 unsigned int i
, index
;
912 int mycpu
= cpu_number();
915 if (uslock_stack_enabled
== FALSE
) {
916 printf("Lock stack not enabled\n");
921 if (!mach_slocks_init
)
922 iprintf("WARNING: simple locks stack may not be accurate\n");
924 assert(uslock_stack_index
[mycpu
] >= 0);
925 assert(uslock_stack_index
[mycpu
] <= USLOCK_STACK_DEPTH
);
926 index
= uslock_stack_index
[mycpu
];
927 for (i
= 0; i
< index
; ++i
) {
928 l
= uslock_stack
[mycpu
][i
];
930 db_printsym((vm_offset_t
)l
, DB_STGY_ANY
);
931 if (l
->debug
.lock_pc
!= INVALID_PC
) {
932 printf(" locked by ");
933 db_printsym((int)l
->debug
.lock_pc
, DB_STGY_PROC
);
938 #endif /* MACH_KDB */
940 #endif /* USLOCK_DEBUG */
942 /* #endif USIMPLE_LOCK_CALLS */
945 * Routine: lock_alloc
947 * Allocate a lock for external users who cannot
948 * hard-code the structure definition into their
950 * For now just use kalloc, but a zone is probably
957 etap_event_t i_event
)
961 if ((l
= (lock_t
*)kalloc(sizeof(lock_t
))) != 0)
962 lock_init(l
, can_sleep
, event
, i_event
);
969 * Free a lock allocated for external users.
970 * For now just use kfree, but a zone is probably
977 kfree((vm_offset_t
)l
, sizeof(lock_t
));
984 * Initialize a lock; required before use.
985 * Note that clients declare the "struct lock"
986 * variables and then initialize them, rather
987 * than getting a new one from this module.
994 etap_event_t i_event
)
996 (void) memset((void *) l
, 0, sizeof(lock_t
));
999 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
1000 l
->u
.s
.start_list
= SD_ENTRY_NULL
;
1001 #endif /* ETAP_LOCK_TRACE */
1003 simple_lock_init(&l
->interlock
, i_event
);
1004 l
->want_write
= FALSE
;
1005 l
->want_upgrade
= FALSE
;
1007 l
->can_sleep
= can_sleep
;
1009 #if ETAP_LOCK_ACCUMULATE
1010 l
->cbuff_write
= etap_cbuff_reserve(lock_event_table(l
));
1011 if (l
->cbuff_write
!= CBUFF_ENTRY_NULL
) {
1012 l
->cbuff_write
->event
= event
;
1013 l
->cbuff_write
->instance
= (unsigned long) l
;
1014 l
->cbuff_write
->kind
= WRITE_LOCK
;
1016 l
->cbuff_read
= CBUFF_ENTRY_NULL
;
1017 #endif /* ETAP_LOCK_ACCUMULATE */
1022 * Sleep locks. These use the same data structure and algorithm
1023 * as the spin locks, but the process sleeps while it is waiting
1024 * for the lock. These work on uniprocessor systems.
1027 #define DECREMENTER_TIMEOUT 1000000
1031 register lock_t
* l
)
1034 start_data_node_t entry
= {0};
1035 boolean_t lock_miss
= FALSE
;
1036 unsigned short dynamic
= 0;
1037 unsigned short trace
= 0;
1038 etap_time_t total_time
;
1039 etap_time_t stop_wait_time
;
1043 #endif /* MACH_LDEBUG */
1046 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1047 ETAP_CREATE_ENTRY(entry
, trace
);
1048 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1050 simple_lock(&l
->interlock
);
1053 * Link the new start_list entry
1055 ETAP_LINK_ENTRY(l
, entry
, trace
);
1058 decrementer
= DECREMENTER_TIMEOUT
;
1059 #endif /* MACH_LDEBUG */
1062 * Try to acquire the want_write bit.
1064 while (l
->want_write
) {
1066 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1070 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1072 simple_unlock(&l
->interlock
);
1075 Debugger("timeout - want_write");
1076 #endif /* MACH_LDEBUG */
1077 while (--i
!= 0 && l
->want_write
)
1079 simple_lock(&l
->interlock
);
1082 if (l
->can_sleep
&& l
->want_write
) {
1084 ETAP_SET_REASON(current_thread(),
1085 BLOCKED_ON_COMPLEX_LOCK
);
1086 thread_sleep_simple_lock((event_t
) l
,
1087 simple_lock_addr(l
->interlock
), FALSE
);
1088 simple_lock(&l
->interlock
);
1091 l
->want_write
= TRUE
;
1093 /* Wait for readers (and upgrades) to finish */
1096 decrementer
= DECREMENTER_TIMEOUT
;
1097 #endif /* MACH_LDEBUG */
1098 while ((l
->read_count
!= 0) || l
->want_upgrade
) {
1100 ETAP_CONTENTION_TIMESTAMP(entry
,trace
);
1104 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1106 simple_unlock(&l
->interlock
);
1109 Debugger("timeout - wait for readers");
1110 #endif /* MACH_LDEBUG */
1111 while (--i
!= 0 && (l
->read_count
!= 0 ||
1114 simple_lock(&l
->interlock
);
1117 if (l
->can_sleep
&& (l
->read_count
!= 0 || l
->want_upgrade
)) {
1119 ETAP_SET_REASON(current_thread(),
1120 BLOCKED_ON_COMPLEX_LOCK
);
1121 thread_sleep_simple_lock((event_t
) l
,
1122 simple_lock_addr(l
->interlock
), FALSE
);
1123 simple_lock(&l
->interlock
);
1128 * do not collect wait data if either the lock
1129 * was free or no wait traces are enabled.
1132 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1133 ETAP_TIMESTAMP(stop_wait_time
);
1134 ETAP_TOTAL_TIME(total_time
,
1136 entry
->start_wait_time
);
1137 CUM_WAIT_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1146 simple_unlock(&l
->interlock
);
1149 * Set start hold time if some type of hold tracing is enabled.
1151 * Note: if the stop_wait_time was already stamped, use
1152 * it as the start_hold_time instead of doing an
1153 * expensive bus access.
1157 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1158 ETAP_COPY_START_HOLD_TIME(entry
, stop_wait_time
, trace
);
1160 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1166 register lock_t
* l
)
1168 boolean_t do_wakeup
= FALSE
;
1169 start_data_node_t entry
;
1170 unsigned short dynamic
= 0;
1171 unsigned short trace
= 0;
1172 etap_time_t stop_hold_time
;
1173 etap_time_t total_time
;
1174 unsigned long lock_kind
;
1178 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1180 simple_lock(&l
->interlock
);
1182 if (l
->read_count
!= 0) {
1184 lock_kind
= READ_LOCK
;
1187 if (l
->want_upgrade
) {
1188 l
->want_upgrade
= FALSE
;
1189 lock_kind
= WRITE_LOCK
;
1192 l
->want_write
= FALSE
;
1193 lock_kind
= WRITE_LOCK
;
1197 * There is no reason to wakeup a waiting thread
1198 * if the read-count is non-zero. Consider:
1199 * we must be dropping a read lock
1200 * threads are waiting only if one wants a write lock
1201 * if there are still readers, they can't proceed
1204 if (l
->waiting
&& (l
->read_count
== 0)) {
1209 * Collect hold data if hold tracing is
1214 * NOTE: All complex locks whose tracing was on when the
1215 * lock was acquired will have an entry in the start_data
1219 ETAP_UNLINK_ENTRY(l
,entry
);
1220 if (ETAP_DURATION_ENABLED(trace
) && entry
!= SD_ENTRY_NULL
) {
1221 ETAP_TIMESTAMP (stop_hold_time
);
1222 ETAP_TOTAL_TIME (total_time
,
1224 entry
->start_hold_time
);
1226 if (lock_kind
& WRITE_LOCK
)
1227 CUM_HOLD_ACCUMULATE (l
->cbuff_write
,
1232 CUM_READ_ENTRY_RESERVE(l
,l
->cbuff_read
,trace
);
1233 CUM_HOLD_ACCUMULATE (l
->cbuff_read
,
1238 MON_ASSIGN_PC(entry
->end_pc
,pc
,trace
);
1239 MON_DATA_COLLECT(l
,entry
,
1246 simple_unlock(&l
->interlock
);
1248 ETAP_DESTROY_ENTRY(entry
);
1251 thread_wakeup((event_t
) l
);
1256 register lock_t
* l
)
1259 start_data_node_t entry
= {0};
1260 boolean_t lock_miss
= FALSE
;
1261 unsigned short dynamic
= 0;
1262 unsigned short trace
= 0;
1263 etap_time_t total_time
;
1264 etap_time_t stop_wait_time
;
1268 #endif /* MACH_LDEBUG */
1270 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1271 ETAP_CREATE_ENTRY(entry
, trace
);
1272 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1274 simple_lock(&l
->interlock
);
1277 * Link the new start_list entry
1279 ETAP_LINK_ENTRY(l
,entry
,trace
);
1282 decrementer
= DECREMENTER_TIMEOUT
;
1283 #endif /* MACH_LDEBUG */
1284 while (l
->want_write
|| l
->want_upgrade
) {
1286 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1290 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1293 simple_unlock(&l
->interlock
);
1296 Debugger("timeout - wait no writers");
1297 #endif /* MACH_LDEBUG */
1298 while (--i
!= 0 && (l
->want_write
|| l
->want_upgrade
))
1300 simple_lock(&l
->interlock
);
1303 if (l
->can_sleep
&& (l
->want_write
|| l
->want_upgrade
)) {
1305 thread_sleep_simple_lock((event_t
) l
,
1306 simple_lock_addr(l
->interlock
), FALSE
);
1307 simple_lock(&l
->interlock
);
1314 * Do not collect wait data if the lock was free
1315 * or if no wait traces are enabled.
1318 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1319 ETAP_TIMESTAMP(stop_wait_time
);
1320 ETAP_TOTAL_TIME(total_time
,
1322 entry
->start_wait_time
);
1323 CUM_READ_ENTRY_RESERVE(l
, l
->cbuff_read
, trace
);
1324 CUM_WAIT_ACCUMULATE(l
->cbuff_read
, total_time
, dynamic
, trace
);
1332 simple_unlock(&l
->interlock
);
1335 * Set start hold time if some type of hold tracing is enabled.
1337 * Note: if the stop_wait_time was already stamped, use
1338 * it instead of doing an expensive bus access.
1342 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1343 ETAP_COPY_START_HOLD_TIME(entry
, stop_wait_time
, trace
);
1345 ETAP_DURATION_TIMESTAMP(entry
,trace
);
1350 * Routine: lock_read_to_write
1352 * Improves a read-only lock to one with
1353 * write permission. If another reader has
1354 * already requested an upgrade to a write lock,
1355 * no lock is held upon return.
1357 * Returns TRUE if the upgrade *failed*.
1362 register lock_t
* l
)
1365 boolean_t do_wakeup
= FALSE
;
1366 start_data_node_t entry
= {0};
1367 boolean_t lock_miss
= FALSE
;
1368 unsigned short dynamic
= 0;
1369 unsigned short trace
= 0;
1370 etap_time_t total_time
;
1371 etap_time_t stop_time
;
1375 #endif /* MACH_LDEBUG */
1378 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1380 simple_lock(&l
->interlock
);
1385 * Since the read lock is lost whether the write lock
1386 * is acquired or not, read hold data is collected here.
1387 * This, of course, is assuming some type of hold
1388 * tracing is enabled.
1390 * Note: trace is set to zero if the entry does not exist.
1393 ETAP_FIND_ENTRY(l
, entry
, trace
);
1395 if (ETAP_DURATION_ENABLED(trace
)) {
1396 ETAP_TIMESTAMP(stop_time
);
1397 ETAP_TOTAL_TIME(total_time
, stop_time
, entry
->start_hold_time
);
1398 CUM_HOLD_ACCUMULATE(l
->cbuff_read
, total_time
, dynamic
, trace
);
1399 MON_ASSIGN_PC(entry
->end_pc
, pc
, trace
);
1408 if (l
->want_upgrade
) {
1410 * Someone else has requested upgrade.
1411 * Since we've released a read lock, wake
1414 if (l
->waiting
&& (l
->read_count
== 0)) {
1419 ETAP_UNLINK_ENTRY(l
, entry
);
1420 simple_unlock(&l
->interlock
);
1421 ETAP_DESTROY_ENTRY(entry
);
1424 thread_wakeup((event_t
) l
);
1428 l
->want_upgrade
= TRUE
;
1430 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1433 decrementer
= DECREMENTER_TIMEOUT
;
1434 #endif /* MACH_LDEBUG */
1435 while (l
->read_count
!= 0) {
1437 ETAP_CONTENTION_TIMESTAMP(entry
, trace
);
1441 i
= lock_wait_time
[l
->can_sleep
? 1 : 0];
1444 simple_unlock(&l
->interlock
);
1447 Debugger("timeout - read_count");
1448 #endif /* MACH_LDEBUG */
1449 while (--i
!= 0 && l
->read_count
!= 0)
1451 simple_lock(&l
->interlock
);
1454 if (l
->can_sleep
&& l
->read_count
!= 0) {
1456 thread_sleep_simple_lock((event_t
) l
,
1457 simple_lock_addr(l
->interlock
), FALSE
);
1458 simple_lock(&l
->interlock
);
1463 * do not collect wait data if the lock was free
1464 * or if no wait traces are enabled.
1467 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
)) {
1468 ETAP_TIMESTAMP (stop_time
);
1469 ETAP_TOTAL_TIME(total_time
, stop_time
, entry
->start_wait_time
);
1470 CUM_WAIT_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1479 simple_unlock(&l
->interlock
);
1482 * Set start hold time if some type of hold tracing is enabled
1484 * Note: if the stop_time was already stamped, use
1485 * it as the new start_hold_time instead of doing
1486 * an expensive VME access.
1490 if (lock_miss
&& ETAP_CONTENTION_ENABLED(trace
))
1491 ETAP_COPY_START_HOLD_TIME(entry
, stop_time
, trace
);
1493 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1500 register lock_t
* l
)
1502 boolean_t do_wakeup
= FALSE
;
1503 start_data_node_t entry
= {0};
1504 unsigned short dynamic
= 0;
1505 unsigned short trace
= 0;
1506 etap_time_t stop_hold_time
;
1507 etap_time_t total_time
;
1510 ETAP_STAMP(lock_event_table(l
), trace
,dynamic
);
1512 simple_lock(&l
->interlock
);
1515 if (l
->want_upgrade
)
1516 l
->want_upgrade
= FALSE
;
1518 l
->want_write
= FALSE
;
1526 * Since we are switching from a write lock to a read lock,
1527 * the write lock data is stored and the read lock data
1528 * collection begins.
1530 * Note: trace is set to zero if the entry does not exist.
1533 ETAP_FIND_ENTRY(l
, entry
, trace
);
1535 if (ETAP_DURATION_ENABLED(trace
)) {
1536 ETAP_TIMESTAMP (stop_hold_time
);
1537 ETAP_TOTAL_TIME(total_time
, stop_hold_time
, entry
->start_hold_time
);
1538 CUM_HOLD_ACCUMULATE(l
->cbuff_write
, total_time
, dynamic
, trace
);
1539 MON_ASSIGN_PC(entry
->end_pc
, pc
, trace
);
1548 simple_unlock(&l
->interlock
);
1551 * Set start hold time if some type of hold tracing is enabled
1553 * Note: if the stop_hold_time was already stamped, use
1554 * it as the new start_hold_time instead of doing
1555 * an expensive bus access.
1559 if (ETAP_DURATION_ENABLED(trace
))
1560 ETAP_COPY_START_HOLD_TIME(entry
, stop_hold_time
, trace
);
1562 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1564 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1567 thread_wakeup((event_t
) l
);
1573 * Routine: lock_try_write
1575 * Tries to get a write lock.
1577 * Returns FALSE if the lock is not held on return.
1582 register lock_t
* l
)
1584 start_data_node_t entry
= {0};
1585 unsigned short trace
= 0;
1588 ETAP_STAMP(lock_event_table(l
), trace
, trace
);
1589 ETAP_CREATE_ENTRY(entry
, trace
);
1591 simple_lock(&l
->interlock
);
1593 if (l
->want_write
|| l
->want_upgrade
|| l
->read_count
) {
1597 simple_unlock(&l
->interlock
);
1598 ETAP_DESTROY_ENTRY(entry
);
1606 l
->want_write
= TRUE
;
1608 ETAP_LINK_ENTRY(l
, entry
, trace
);
1610 simple_unlock(&l
->interlock
);
1612 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1613 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1619 * Routine: lock_try_read
1621 * Tries to get a read lock.
1623 * Returns FALSE if the lock is not held on return.
1628 register lock_t
* l
)
1630 start_data_node_t entry
= {0};
1631 unsigned short trace
= 0;
1634 ETAP_STAMP(lock_event_table(l
), trace
, trace
);
1635 ETAP_CREATE_ENTRY(entry
, trace
);
1637 simple_lock(&l
->interlock
);
1639 if (l
->want_write
|| l
->want_upgrade
) {
1640 simple_unlock(&l
->interlock
);
1641 ETAP_DESTROY_ENTRY(entry
);
1647 ETAP_LINK_ENTRY(l
, entry
, trace
);
1649 simple_unlock(&l
->interlock
);
1651 MON_ASSIGN_PC(entry
->start_pc
, pc
, trace
);
1652 ETAP_DURATION_TIMESTAMP(entry
, trace
);
1660 void db_show_one_lock(lock_t
*);
1667 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1669 lock
->want_upgrade
? "" : "!",
1670 lock
->want_write
? "" : "!");
1671 db_printf("%swaiting, %scan_sleep\n",
1672 lock
->waiting
? "" : "!", lock
->can_sleep
? "" : "!");
1673 db_printf("Interlock:\n");
1674 db_show_one_simple_lock((db_expr_t
)simple_lock_addr(lock
->interlock
),
1675 TRUE
, (db_expr_t
)0, (char *)0);
1677 #endif /* MACH_KDB */
1680 * The C portion of the mutex package. These routines are only invoked
1681 * if the optimized assembler routines can't do the work.
1685 * Routine: lock_alloc
1687 * Allocate a mutex for external users who cannot
1688 * hard-code the structure definition into their
1690 * For now just use kalloc, but a zone is probably
1699 if ((m
= (mutex_t
*)kalloc(sizeof(mutex_t
))) != 0)
1700 mutex_init(m
, event
);
1705 * Routine: mutex_free
1707 * Free a mutex allocated for external users.
1708 * For now just use kfree, but a zone is probably
1715 kfree((vm_offset_t
)m
, sizeof(mutex_t
));
1720 * mutex_lock_wait: Invoked if the assembler routine mutex_lock () fails
1721 * because the mutex is already held by another thread. Called with the
1722 * interlock locked and returns with the interlock unlocked.
1730 ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK
);
1731 thread_sleep_interlock ((event_t
) m
, &m
->interlock
, THREAD_UNINT
);
1735 * mutex_unlock_wakeup: Invoked if the assembler routine mutex_unlock ()
1736 * fails because there are thread(s) waiting for this mutex. Called and
1737 * returns with the interlock locked.
1741 mutex_unlock_wakeup (
1746 thread_wakeup_one ((event_t
) m
);
1750 * mutex_pause: Called by former callers of simple_lock_pause().
1758 assert_wait_timeout( 1, THREAD_INTERRUPTIBLE
);
1759 ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK
);
1760 wait_result
= thread_block((void (*)(void))0);
1761 if (wait_result
!= THREAD_TIMED_OUT
)
1762 thread_cancel_timer();
1767 * Routines to print out simple_locks and mutexes in a nicely-formatted
1771 char *simple_lock_labels
= "ENTRY ILK THREAD DURATION CALLER";
1772 char *mutex_labels
= "ENTRY LOCKED WAITERS THREAD CALLER";
1775 db_show_one_simple_lock (
1777 boolean_t have_addr
,
1781 simple_lock_t saddr
= (simple_lock_t
)addr
;
1783 if (saddr
== (simple_lock_t
)0 || !have_addr
) {
1784 db_error ("No simple_lock\n");
1787 else if (saddr
->lock_type
!= USLOCK_TAG
)
1788 db_error ("Not a simple_lock\n");
1789 #endif /* USLOCK_DEBUG */
1791 db_printf ("%s\n", simple_lock_labels
);
1792 db_print_simple_lock (saddr
);
1796 db_print_simple_lock (
1800 db_printf ("%08x %3d", addr
, *hw_lock_addr(addr
->interlock
));
1802 db_printf (" %08x", addr
->debug
.lock_thread
);
1803 db_printf (" %08x ", addr
->debug
.duration
[1]);
1804 db_printsym ((int)addr
->debug
.lock_pc
, DB_STGY_ANY
);
1805 #endif /* USLOCK_DEBUG */
1812 boolean_t have_addr
,
1816 mutex_t
* maddr
= (mutex_t
*)addr
;
1818 if (maddr
== (mutex_t
*)0 || !have_addr
)
1819 db_error ("No mutex\n");
1821 else if (maddr
->type
!= MUTEX_TAG
)
1822 db_error ("Not a mutex\n");
1823 #endif /* MACH_LDEBUG */
1825 db_printf ("%s\n", mutex_labels
);
1826 db_print_mutex (maddr
);
1833 db_printf ("%08x %6d %7d",
1834 addr
, *hw_lock_addr(addr
->locked
), addr
->waiters
);
1836 db_printf (" %08x ", addr
->thread
);
1837 db_printsym (addr
->pc
, DB_STGY_ANY
);
1838 #endif /* MACH_LDEBUG */
1841 #endif /* MACH_KDB */
1844 extern void meter_simple_lock (
1846 extern void meter_simple_unlock (
1848 extern void cyctm05_stamp (
1849 unsigned long * start
);
1850 extern void cyctm05_diff (
1851 unsigned long * start
,
1852 unsigned long * end
,
1853 unsigned long * diff
);
1856 simple_lock_data_t loser
;
1864 cyctm05_stamp (lp
->duration
);
1868 int long_simple_lock_crash
;
1869 int long_simple_lock_time
= 0x600;
1871 * This is pretty gawd-awful. XXX
1873 decl_simple_lock_data(extern,kd_tty
)
1876 meter_simple_unlock(
1880 unsigned long stime
[2], etime
[2], delta
[2];
1882 if (lp
== &kd_tty
) /* XXX */
1885 stime
[0] = lp
->duration
[0];
1886 stime
[1] = lp
->duration
[1];
1888 cyctm05_stamp (etime
);
1890 if (etime
[1] < stime
[1]) /* XXX */
1893 cyctm05_diff (stime
, etime
, delta
);
1895 if (delta
[1] >= 0x10000) /* XXX */
1898 lp
->duration
[0] = delta
[0];
1899 lp
->duration
[1] = delta
[1];
1901 if (loser
.duration
[1] < lp
->duration
[1])
1904 assert (!long_simple_lock_crash
|| delta
[1] < long_simple_lock_time
);
1907 #endif /* MACH_LDEBUG */
1913 * ==============================================================
1914 * ETAP hook when initializing a usimple_lock. May be invoked
1915 * from the portable lock package or from an optimized machine-
1916 * dependent implementation.
1917 * ==============================================================
1921 etap_simplelock_init (
1925 ETAP_CLEAR_TRACE_DATA(l
);
1926 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
1928 #if ETAP_LOCK_ACCUMULATE
1929 /* reserve an entry in the cumulative buffer */
1930 l
->cbuff_entry
= etap_cbuff_reserve(lock_event_table(l
));
1931 /* initialize the entry if one was returned */
1932 if (l
->cbuff_entry
!= CBUFF_ENTRY_NULL
) {
1933 l
->cbuff_entry
->event
= event
;
1934 l
->cbuff_entry
->instance
= (unsigned long) l
;
1935 l
->cbuff_entry
->kind
= SPIN_LOCK
;
1937 #endif /* ETAP_LOCK_ACCUMULATE */
1942 etap_simplelock_unlock(
1945 unsigned short dynamic
= 0;
1946 unsigned short trace
= 0;
1947 etap_time_t total_time
;
1948 etap_time_t stop_hold_time
;
1952 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
1955 * Calculate & collect hold time data only if
1956 * the hold tracing was enabled throughout the
1957 * whole operation. This prevents collection of
1958 * bogus data caused by mid-operation trace changes.
1962 if (ETAP_DURATION_ENABLED(trace
) && ETAP_WHOLE_OP(l
)) {
1963 ETAP_TIMESTAMP (stop_hold_time
);
1964 ETAP_TOTAL_TIME(total_time
, stop_hold_time
,
1965 l
->u
.s
.start_hold_time
);
1966 CUM_HOLD_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
1967 MON_ASSIGN_PC(l
->end_pc
, pc
, trace
);
1975 ETAP_CLEAR_TRACE_DATA(l
);
1978 /* ========================================================================
1979 * Since the the simple_lock() routine is machine dependant, it must always
1980 * be coded in assembly. The two hook routines below are used to collect
1982 * ========================================================================
1986 * ROUTINE: etap_simplelock_miss()
1988 * FUNCTION: This spin lock routine is called upon the first
1989 * spin (miss) of the lock.
1991 * A timestamp is taken at the beginning of the wait period,
1992 * if wait tracing is enabled.
1997 * - timestamp address.
1999 * RETURNS: Wait timestamp value. The timestamp value is later used
2000 * by etap_simplelock_hold().
2002 * NOTES: This routine is NOT ALWAYS called. The lock may be free
2003 * (never spinning). For this reason the pc is collected in
2004 * etap_simplelock_hold().
2008 etap_simplelock_miss (
2012 unsigned short trace
= 0;
2013 unsigned short dynamic
= 0;
2014 etap_time_t start_miss_time
;
2016 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2018 if (trace
& ETAP_CONTENTION
)
2019 ETAP_TIMESTAMP(start_miss_time
);
2021 return(start_miss_time
);
2025 * ROUTINE: etap_simplelock_hold()
2027 * FUNCTION: This spin lock routine is ALWAYS called once the lock
2028 * is acquired. Here, the contention time is calculated and
2029 * the start hold time is stamped.
2033 * - PC of the calling function.
2034 * - start wait timestamp.
2039 etap_simplelock_hold (
2042 etap_time_t start_hold_time
)
2044 unsigned short dynamic
= 0;
2045 unsigned short trace
= 0;
2046 etap_time_t total_time
;
2047 etap_time_t stop_hold_time
;
2049 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2051 MON_ASSIGN_PC(l
->start_pc
, pc
, trace
);
2053 /* do not collect wait data if lock was free */
2054 if (ETAP_TIME_IS_ZERO(start_hold_time
) && (trace
& ETAP_CONTENTION
)) {
2055 ETAP_TIMESTAMP(stop_hold_time
);
2056 ETAP_TOTAL_TIME(total_time
,
2059 CUM_WAIT_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2066 ETAP_COPY_START_HOLD_TIME(&l
->u
.s
, stop_hold_time
, trace
);
2069 ETAP_DURATION_TIMESTAMP(&l
->u
.s
, trace
);
2077 ETAP_CLEAR_TRACE_DATA(l
);
2078 etap_event_table_assign(&l
->u
.event_table_chain
, event
);
2080 #if ETAP_LOCK_ACCUMULATE
2081 /* reserve an entry in the cumulative buffer */
2082 l
->cbuff_entry
= etap_cbuff_reserve(lock_event_table(l
));
2083 /* initialize the entry if one was returned */
2084 if (l
->cbuff_entry
!= CBUFF_ENTRY_NULL
) {
2085 l
->cbuff_entry
->event
= event
;
2086 l
->cbuff_entry
->instance
= (unsigned long) l
;
2087 l
->cbuff_entry
->kind
= MUTEX_LOCK
;
2089 #endif /* ETAP_LOCK_ACCUMULATE */
2096 unsigned short trace
= 0;
2097 unsigned short dynamic
= 0;
2098 etap_time_t start_miss_time
;
2100 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2102 if (trace
& ETAP_CONTENTION
)
2103 ETAP_TIMESTAMP(start_miss_time
);
2105 ETAP_TIME_CLEAR(start_miss_time
);
2107 return(start_miss_time
);
2114 etap_time_t start_hold_time
)
2116 unsigned short dynamic
= 0;
2117 unsigned short trace
= 0;
2118 etap_time_t total_time
;
2119 etap_time_t stop_hold_time
;
2121 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2123 MON_ASSIGN_PC(l
->start_pc
, pc
, trace
);
2125 /* do not collect wait data if lock was free */
2126 if (!ETAP_TIME_IS_ZERO(start_hold_time
) && (trace
& ETAP_CONTENTION
)) {
2127 ETAP_TIMESTAMP(stop_hold_time
);
2128 ETAP_TOTAL_TIME(total_time
,
2131 CUM_WAIT_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2138 ETAP_COPY_START_HOLD_TIME(&l
->u
.s
, stop_hold_time
, trace
);
2141 ETAP_DURATION_TIMESTAMP(&l
->u
.s
, trace
);
2148 unsigned short dynamic
= 0;
2149 unsigned short trace
= 0;
2150 etap_time_t total_time
;
2151 etap_time_t stop_hold_time
;
2155 ETAP_STAMP(lock_event_table(l
), trace
, dynamic
);
2158 * Calculate & collect hold time data only if
2159 * the hold tracing was enabled throughout the
2160 * whole operation. This prevents collection of
2161 * bogus data caused by mid-operation trace changes.
2165 if (ETAP_DURATION_ENABLED(trace
) && ETAP_WHOLE_OP(l
)) {
2166 ETAP_TIMESTAMP(stop_hold_time
);
2167 ETAP_TOTAL_TIME(total_time
, stop_hold_time
,
2168 l
->u
.s
.start_hold_time
);
2169 CUM_HOLD_ACCUMULATE(l
->cbuff_entry
, total_time
, dynamic
, trace
);
2170 MON_ASSIGN_PC(l
->end_pc
, pc
, trace
);
2178 ETAP_CLEAR_TRACE_DATA(l
);
2181 #endif /* ETAP_LOCK_TRACE */