]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / osfmk / kern / lock.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 * File: kern/lock.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young
53 * Date: 1985
54 *
55 * Locking primitives implementation
56 */
57
58#include <cpus.h>
59#include <mach_kdb.h>
60#include <mach_ldebug.h>
61
62#include <kern/lock.h>
63#include <kern/etap_macros.h>
64#include <kern/misc_protos.h>
65#include <kern/thread.h>
9bccf70c 66#include <kern/processor.h>
1c79356b
A
67#include <kern/sched_prim.h>
68#include <kern/xpr.h>
69#include <kern/debug.h>
70#include <string.h>
71
72#if MACH_KDB
73#include <ddb/db_command.h>
74#include <ddb/db_output.h>
75#include <ddb/db_sym.h>
76#include <ddb/db_print.h>
77#endif /* MACH_KDB */
78
79#ifdef __ppc__
80#include <ppc/Firmware.h>
81#include <ppc/POWERMAC/mp/MPPlugIn.h>
82#endif
83
9bccf70c
A
84#include <sys/kdebug.h>
85
1c79356b
A
86#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
87
88/*
89 * Some portions of the lock debugging code must run with
90 * interrupts disabled. This can be machine-dependent,
91 * but we don't have any good hooks for that at the moment.
92 * If your architecture is different, add a machine-dependent
93 * ifdef here for these macros. XXX
94 */
95
96#define DISABLE_INTERRUPTS(s) s = ml_set_interrupts_enabled(FALSE)
97#define ENABLE_INTERRUPTS(s) (void)ml_set_interrupts_enabled(s)
98
99#if NCPUS > 1
100/* Time we loop without holding the interlock.
101 * The former is for when we cannot sleep, the latter
102 * for when our thread can go to sleep (loop less)
103 * we shouldn't retake the interlock at all frequently
104 * if we cannot go to sleep, since it interferes with
105 * any other processors. In particular, 100 is too small
106 * a number for powerpc MP systems because of cache
107 * coherency issues and differing lock fetch times between
108 * the processors
109 */
110unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ;
111#else /* NCPUS > 1 */
112
113 /*
114 * It is silly to spin on a uni-processor as if we
115 * thought something magical would happen to the
116 * want_write bit while we are executing.
117 */
118
119unsigned int lock_wait_time[2] = { 0, 0 };
120#endif /* NCPUS > 1 */
121
122/* Forwards */
123
124#if MACH_KDB
125void db_print_simple_lock(
126 simple_lock_t addr);
127
128void db_print_mutex(
129 mutex_t * addr);
130#endif /* MACH_KDB */
131
132
133#if USLOCK_DEBUG
134/*
135 * Perform simple lock checks.
136 */
137int uslock_check = 1;
138int max_lock_loops = 100000000;
139decl_simple_lock_data(extern , printf_lock)
140decl_simple_lock_data(extern , panic_lock)
141#if MACH_KDB && NCPUS > 1
142decl_simple_lock_data(extern , kdb_lock)
143#endif /* MACH_KDB && NCPUS >1 */
144#endif /* USLOCK_DEBUG */
145
146
147/*
148 * We often want to know the addresses of the callers
149 * of the various lock routines. However, this information
150 * is only used for debugging and statistics.
151 */
152typedef void *pc_t;
153#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
154#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
155#if ANY_LOCK_DEBUG || ETAP_LOCK_TRACE
156#define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
157#else /* ANY_LOCK_DEBUG || ETAP_LOCK_TRACE */
158#ifdef lint
159/*
160 * Eliminate lint complaints about unused local pc variables.
161 */
162#define OBTAIN_PC(pc,l) ++pc
163#else /* lint */
164#define OBTAIN_PC(pc,l)
165#endif /* lint */
166#endif /* USLOCK_DEBUG || ETAP_LOCK_TRACE */
167
168
169/* #ifndef USIMPLE_LOCK_CALLS
170 * The i386 production version of usimple_locks isn't ready yet.
171 */
172/*
173 * Portable lock package implementation of usimple_locks.
174 */
175
176#if ETAP_LOCK_TRACE
177#define ETAPCALL(stmt) stmt
178void etap_simplelock_init(simple_lock_t, etap_event_t);
179void etap_simplelock_unlock(simple_lock_t);
180void etap_simplelock_hold(simple_lock_t, pc_t, etap_time_t);
181etap_time_t etap_simplelock_miss(simple_lock_t);
182
183void etap_mutex_init(mutex_t*, etap_event_t);
184void etap_mutex_unlock(mutex_t*);
185void etap_mutex_hold(mutex_t*, pc_t, etap_time_t);
186etap_time_t etap_mutex_miss(mutex_t*);
187#else /* ETAP_LOCK_TRACE */
188#define ETAPCALL(stmt)
189#endif /* ETAP_LOCK_TRACE */
190
191#if USLOCK_DEBUG
192#define USLDBG(stmt) stmt
193void usld_lock_init(usimple_lock_t, etap_event_t);
194void usld_lock_pre(usimple_lock_t, pc_t);
195void usld_lock_post(usimple_lock_t, pc_t);
196void usld_unlock(usimple_lock_t, pc_t);
197void usld_lock_try_pre(usimple_lock_t, pc_t);
198void usld_lock_try_post(usimple_lock_t, pc_t);
199void usld_lock_held(usimple_lock_t);
200void usld_lock_none_held(void);
201int usld_lock_common_checks(usimple_lock_t, char *);
202#else /* USLOCK_DEBUG */
203#define USLDBG(stmt)
204#endif /* USLOCK_DEBUG */
205
206/*
207 * Initialize a usimple_lock.
208 *
209 * No change in preemption state.
210 */
211void
212usimple_lock_init(
213 usimple_lock_t l,
214 etap_event_t event)
215{
216 USLDBG(usld_lock_init(l, event));
217 ETAPCALL(etap_simplelock_init((l),(event)));
218 hw_lock_init(&l->interlock);
219}
220
221
222/*
223 * Acquire a usimple_lock.
224 *
225 * Returns with preemption disabled. Note
226 * that the hw_lock routines are responsible for
227 * maintaining preemption state.
228 */
229void
230usimple_lock(
231 usimple_lock_t l)
232{
233 int i;
234 pc_t pc;
235#if ETAP_LOCK_TRACE
236 etap_time_t start_wait_time;
237 int no_miss_info = 0;
238#endif /* ETAP_LOCK_TRACE */
239#if USLOCK_DEBUG
240 int count = 0;
241#endif /* USLOCK_DEBUG */
242
243 OBTAIN_PC(pc, l);
244 USLDBG(usld_lock_pre(l, pc));
245#if ETAP_LOCK_TRACE
246 ETAP_TIME_CLEAR(start_wait_time);
247#endif /* ETAP_LOCK_TRACE */
248
9bccf70c 249 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
1c79356b
A
250 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc);
251
1c79356b
A
252 ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time));
253 USLDBG(usld_lock_post(l, pc));
254}
255
256
257/*
258 * Release a usimple_lock.
259 *
260 * Returns with preemption enabled. Note
261 * that the hw_lock routines are responsible for
262 * maintaining preemption state.
263 */
264void
265usimple_unlock(
266 usimple_lock_t l)
267{
268 pc_t pc;
269
270// checkNMI(); /* (TEST/DEBUG) */
271
272 OBTAIN_PC(pc, l);
273 USLDBG(usld_unlock(l, pc));
274 ETAPCALL(etap_simplelock_unlock(l));
275 hw_lock_unlock(&l->interlock);
276}
277
278
279/*
280 * Conditionally acquire a usimple_lock.
281 *
282 * On success, returns with preemption disabled.
283 * On failure, returns with preemption in the same state
284 * as when first invoked. Note that the hw_lock routines
285 * are responsible for maintaining preemption state.
286 *
287 * XXX No stats are gathered on a miss; I preserved this
288 * behavior from the original assembly-language code, but
289 * doesn't it make sense to log misses? XXX
290 */
291unsigned int
292usimple_lock_try(
293 usimple_lock_t l)
294{
295 pc_t pc;
296 unsigned int success;
297 etap_time_t zero_time;
298
299 OBTAIN_PC(pc, l);
300 USLDBG(usld_lock_try_pre(l, pc));
301 if (success = hw_lock_try(&l->interlock)) {
302 USLDBG(usld_lock_try_post(l, pc));
303 ETAP_TIME_CLEAR(zero_time);
304 ETAPCALL(etap_simplelock_hold(l, pc, zero_time));
305 }
306 return success;
307}
308
309#if ETAP_LOCK_TRACE
310void
311simple_lock_no_trace(
312 simple_lock_t l)
313{
314 pc_t pc;
315
316 OBTAIN_PC(pc, l);
317 USLDBG(usld_lock_pre(l, pc));
318 while (!hw_lock_try(&l->interlock)) {
319 while (hw_lock_held(&l->interlock)) {
320 /*
321 * Spin watching the lock value in cache,
322 * without consuming external bus cycles.
323 * On most SMP architectures, the atomic
324 * instruction(s) used by hw_lock_try
325 * cost much, much more than an ordinary
326 * memory read.
327 */
328 }
329 }
330 USLDBG(usld_lock_post(l, pc));
331}
332
333void
334simple_unlock_no_trace(
335 simple_lock_t l)
336{
337 pc_t pc;
338
339 OBTAIN_PC(pc, l);
340 USLDBG(usld_unlock(l, pc));
341 hw_lock_unlock(&l->interlock);
342}
343
344int
345simple_lock_try_no_trace(
346 simple_lock_t l)
347{
348 pc_t pc;
349 unsigned int success;
350
351 OBTAIN_PC(pc, l);
352 USLDBG(usld_lock_try_pre(l, pc));
353 if (success = hw_lock_try(&l->interlock)) {
354 USLDBG(usld_lock_try_post(l, pc));
355 }
356 return success;
357}
358#endif /* ETAP_LOCK_TRACE */
359
360
361#if USLOCK_DEBUG
362/*
363 * Verify that the lock is locked and owned by
364 * the current thread.
365 */
366void
367usimple_lock_held(
368 usimple_lock_t l)
369{
370 usld_lock_held(l);
371}
372
373
374/*
375 * Verify that no usimple_locks are held by
376 * this processor. Typically used in a
377 * trap handler when returning to user mode
378 * or in a path known to relinquish the processor.
379 */
380void
381usimple_lock_none_held(void)
382{
383 usld_lock_none_held();
384}
385#endif /* USLOCK_DEBUG */
386
387
388#if USLOCK_DEBUG
389/*
390 * States of a usimple_lock. The default when initializing
391 * a usimple_lock is setting it up for debug checking.
392 */
393#define USLOCK_CHECKED 0x0001 /* lock is being checked */
394#define USLOCK_TAKEN 0x0002 /* lock has been taken */
395#define USLOCK_INIT 0xBAA0 /* lock has been initialized */
396#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
397#define USLOCK_CHECKING(l) (uslock_check && \
398 ((l)->debug.state & USLOCK_CHECKED))
399
400/*
401 * Maintain a per-cpu stack of acquired usimple_locks.
402 */
403void usl_stack_push(usimple_lock_t, int);
404void usl_stack_pop(usimple_lock_t, int);
405
406/*
407 * Trace activities of a particularly interesting lock.
408 */
409void usl_trace(usimple_lock_t, int, pc_t, const char *);
410
411
412/*
413 * Initialize the debugging information contained
414 * in a usimple_lock.
415 */
416void
417usld_lock_init(
418 usimple_lock_t l,
419 etap_event_t type)
420{
421 if (l == USIMPLE_LOCK_NULL)
422 panic("lock initialization: null lock pointer");
423 l->lock_type = USLOCK_TAG;
424 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
425 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
426 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
427 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
428 l->debug.duration[0] = l->debug.duration[1] = 0;
429 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
430 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
431 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
432}
433
434
435/*
436 * These checks apply to all usimple_locks, not just
437 * those with USLOCK_CHECKED turned on.
438 */
439int
440usld_lock_common_checks(
441 usimple_lock_t l,
442 char *caller)
443{
444 if (l == USIMPLE_LOCK_NULL)
445 panic("%s: null lock pointer", caller);
446 if (l->lock_type != USLOCK_TAG)
447 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
448 if (!(l->debug.state & USLOCK_INIT))
449 panic("%s: 0x%x is not an initialized lock",
450 caller, (integer_t) l);
451 return USLOCK_CHECKING(l);
452}
453
454
455/*
456 * Debug checks on a usimple_lock just before attempting
457 * to acquire it.
458 */
459/* ARGSUSED */
460void
461usld_lock_pre(
462 usimple_lock_t l,
463 pc_t pc)
464{
465 char *caller = "usimple_lock";
466
467
468#if 0
469 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
470 l->debug.lock_pc,
471 l->debug.lock_thread,
472 l->debug.state,
473 l->debug.lock_cpu,
474 l->debug.unlock_thread,
475 l->debug.unlock_cpu,
476 l->debug.unlock_pc,
477 caller);
478#endif
479
480 if (!usld_lock_common_checks(l, caller))
481 return;
482
483/*
484 * Note that we have a weird case where we are getting a lock when we are]
485 * in the process of putting the system to sleep. We are running with no
486 * current threads, therefore we can't tell if we are trying to retake a lock
487 * we have or someone on the other processor has it. Therefore we just
488 * ignore this test if the locking thread is 0.
489 */
490
491 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
492 l->debug.lock_thread == (void *) current_thread()) {
493 printf("%s: lock 0x%x already locked (at 0x%x) by",
494 caller, (integer_t) l, l->debug.lock_pc);
495 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
496 l->debug.lock_thread, pc);
497 panic(caller);
498 }
499 mp_disable_preemption();
500 usl_trace(l, cpu_number(), pc, caller);
501 mp_enable_preemption();
502}
503
504
505/*
506 * Debug checks on a usimple_lock just after acquiring it.
507 *
508 * Pre-emption has been disabled at this point,
509 * so we are safe in using cpu_number.
510 */
511void
512usld_lock_post(
513 usimple_lock_t l,
514 pc_t pc)
515{
516 register int mycpu;
517 char *caller = "successful usimple_lock";
518
519
520#if 0
521 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
522 l->debug.lock_pc,
523 l->debug.lock_thread,
524 l->debug.state,
525 l->debug.lock_cpu,
526 l->debug.unlock_thread,
527 l->debug.unlock_cpu,
528 l->debug.unlock_pc,
529 caller);
530#endif
531
532 if (!usld_lock_common_checks(l, caller))
533 return;
534
535 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
536 panic("%s: lock 0x%x became uninitialized",
537 caller, (integer_t) l);
538 if ((l->debug.state & USLOCK_TAKEN))
539 panic("%s: lock 0x%x became TAKEN by someone else",
540 caller, (integer_t) l);
541
542 mycpu = cpu_number();
543 l->debug.lock_thread = (void *)current_thread();
544 l->debug.state |= USLOCK_TAKEN;
545 l->debug.lock_pc = pc;
546 l->debug.lock_cpu = mycpu;
547
548 usl_stack_push(l, mycpu);
549 usl_trace(l, mycpu, pc, caller);
550}
551
552
553/*
554 * Debug checks on a usimple_lock just before
555 * releasing it. Note that the caller has not
556 * yet released the hardware lock.
557 *
558 * Preemption is still disabled, so there's
559 * no problem using cpu_number.
560 */
561void
562usld_unlock(
563 usimple_lock_t l,
564 pc_t pc)
565{
566 register int mycpu;
567 char *caller = "usimple_unlock";
568
569
570#if 0
571 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
572 l->debug.lock_pc,
573 l->debug.lock_thread,
574 l->debug.state,
575 l->debug.lock_cpu,
576 l->debug.unlock_thread,
577 l->debug.unlock_cpu,
578 l->debug.unlock_pc,
579 caller);
580#endif
581
582 if (!usld_lock_common_checks(l, caller))
583 return;
584
585 mycpu = cpu_number();
586
587 if (!(l->debug.state & USLOCK_TAKEN))
588 panic("%s: lock 0x%x hasn't been taken",
589 caller, (integer_t) l);
590 if (l->debug.lock_thread != (void *) current_thread())
591 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
592 caller, (integer_t) l, l->debug.lock_thread);
593 if (l->debug.lock_cpu != mycpu) {
594 printf("%s: unlocking lock 0x%x on cpu 0x%x",
595 caller, (integer_t) l, mycpu);
596 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
597 panic(caller);
598 }
599 usl_trace(l, mycpu, pc, caller);
600 usl_stack_pop(l, mycpu);
601
602 l->debug.unlock_thread = l->debug.lock_thread;
603 l->debug.lock_thread = INVALID_PC;
604 l->debug.state &= ~USLOCK_TAKEN;
605 l->debug.unlock_pc = pc;
606 l->debug.unlock_cpu = mycpu;
607}
608
609
610/*
611 * Debug checks on a usimple_lock just before
612 * attempting to acquire it.
613 *
614 * Preemption isn't guaranteed to be disabled.
615 */
616void
617usld_lock_try_pre(
618 usimple_lock_t l,
619 pc_t pc)
620{
621 char *caller = "usimple_lock_try";
622
623 if (!usld_lock_common_checks(l, caller))
624 return;
625 mp_disable_preemption();
626 usl_trace(l, cpu_number(), pc, caller);
627 mp_enable_preemption();
628}
629
630
631/*
632 * Debug checks on a usimple_lock just after
633 * successfully attempting to acquire it.
634 *
635 * Preemption has been disabled by the
636 * lock acquisition attempt, so it's safe
637 * to use cpu_number.
638 */
639void
640usld_lock_try_post(
641 usimple_lock_t l,
642 pc_t pc)
643{
644 register int mycpu;
645 char *caller = "successful usimple_lock_try";
646
647 if (!usld_lock_common_checks(l, caller))
648 return;
649
650 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
651 panic("%s: lock 0x%x became uninitialized",
652 caller, (integer_t) l);
653 if ((l->debug.state & USLOCK_TAKEN))
654 panic("%s: lock 0x%x became TAKEN by someone else",
655 caller, (integer_t) l);
656
657 mycpu = cpu_number();
658 l->debug.lock_thread = (void *) current_thread();
659 l->debug.state |= USLOCK_TAKEN;
660 l->debug.lock_pc = pc;
661 l->debug.lock_cpu = mycpu;
662
663#if 0
664 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
665 l->debug.lock_pc,
666 l->debug.lock_thread,
667 l->debug.state,
668 l->debug.lock_cpu,
669 l->debug.unlock_thread,
670 l->debug.unlock_cpu,
671 l->debug.unlock_pc,
672 caller);
673#endif
674
675 usl_stack_push(l, mycpu);
676 usl_trace(l, mycpu, pc, caller);
677}
678
679
680/*
681 * Determine whether the lock in question is owned
682 * by the current thread.
683 */
684void
685usld_lock_held(
686 usimple_lock_t l)
687{
688 char *caller = "usimple_lock_held";
689
690
691#if 0
692 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
693 l->debug.lock_pc,
694 l->debug.lock_thread,
695 l->debug.state,
696 l->debug.lock_cpu,
697 l->debug.unlock_thread,
698 l->debug.unlock_cpu,
699 l->debug.unlock_pc,
700 caller);
701#endif
702
703 if (!usld_lock_common_checks(l, caller))
704 return;
705
706 if (!(l->debug.state & USLOCK_TAKEN))
707 panic("%s: lock 0x%x hasn't been taken",
708 caller, (integer_t) l);
709 if (l->debug.lock_thread != (void *) current_thread())
710 panic("%s: lock 0x%x is owned by thread 0x%x", caller,
711 (integer_t) l, (integer_t) l->debug.lock_thread);
712
713 /*
714 * The usimple_lock is active, so preemption
715 * is disabled and the current cpu should
716 * match the one recorded at lock acquisition time.
717 */
718 if (l->debug.lock_cpu != cpu_number())
719 panic("%s: current cpu 0x%x isn't acquiring cpu 0x%x",
720 caller, cpu_number(), (integer_t) l->debug.lock_cpu);
721}
722
723
724/*
725 * Per-cpu stack of currently active usimple_locks.
726 * Requires spl protection so that interrupt-level
727 * locks plug-n-play with their thread-context friends.
728 */
729#define USLOCK_STACK_DEPTH 20
730usimple_lock_t uslock_stack[NCPUS][USLOCK_STACK_DEPTH];
731unsigned int uslock_stack_index[NCPUS];
732boolean_t uslock_stack_enabled = FALSE;
733
734
735/*
736 * Record a usimple_lock just acquired on
737 * the current processor.
738 *
739 * Preemption has been disabled by lock
740 * acquisition, so it's safe to use the cpu number
741 * specified by the caller.
742 */
743void
744usl_stack_push(
745 usimple_lock_t l,
746 int mycpu)
747{
748 boolean_t s;
749
750 if (uslock_stack_enabled == FALSE)
751 return;
752
753 DISABLE_INTERRUPTS(s);
754 assert(uslock_stack_index[mycpu] >= 0);
755 assert(uslock_stack_index[mycpu] < USLOCK_STACK_DEPTH);
756 if (uslock_stack_index[mycpu] >= USLOCK_STACK_DEPTH) {
757 printf("usl_stack_push (cpu 0x%x): too many locks (%d)",
758 mycpu, uslock_stack_index[mycpu]);
759 printf(" disabling stacks\n");
760 uslock_stack_enabled = FALSE;
761 ENABLE_INTERRUPTS(s);
762 return;
763 }
764 uslock_stack[mycpu][uslock_stack_index[mycpu]] = l;
765 uslock_stack_index[mycpu]++;
766 ENABLE_INTERRUPTS(s);
767}
768
769
770/*
771 * Eliminate the entry for a usimple_lock
772 * that had been active on the current processor.
773 *
774 * Preemption has been disabled by lock
775 * acquisition, and we haven't yet actually
776 * released the hardware lock associated with
777 * this usimple_lock, so it's safe to use the
778 * cpu number supplied by the caller.
779 */
780void
781usl_stack_pop(
782 usimple_lock_t l,
783 int mycpu)
784{
785 unsigned int i, index;
786 boolean_t s;
787
788 if (uslock_stack_enabled == FALSE)
789 return;
790
791 DISABLE_INTERRUPTS(s);
792 assert(uslock_stack_index[mycpu] > 0);
793 assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH);
794 if (uslock_stack_index[mycpu] == 0) {
795 printf("usl_stack_pop (cpu 0x%x): not enough locks (%d)",
796 mycpu, uslock_stack_index[mycpu]);
797 printf(" disabling stacks\n");
798 uslock_stack_enabled = FALSE;
799 ENABLE_INTERRUPTS(s);
800 return;
801 }
802 index = --uslock_stack_index[mycpu];
803 for (i = 0; i <= index; ++i) {
804 if (uslock_stack[mycpu][i] == l) {
805 if (i != index)
806 uslock_stack[mycpu][i] =
807 uslock_stack[mycpu][index];
808 ENABLE_INTERRUPTS(s);
809 return;
810 }
811 }
812 ENABLE_INTERRUPTS(s);
813 panic("usl_stack_pop: can't find usimple_lock 0x%x", l);
814}
815
816
817/*
818 * Determine whether any usimple_locks are currently held.
819 *
820 * Caller's preemption state is uncertain. If
821 * preemption has been disabled, this check is accurate.
822 * Otherwise, this check is just a guess. We do the best
823 * we can by disabling scheduler interrupts, so at least
824 * the check is accurate w.r.t. whatever cpu we're running
825 * on while in this routine.
826 */
827void
828usld_lock_none_held()
829{
830 register int mycpu;
831 boolean_t s;
832 unsigned int locks_held;
833 char *caller = "usimple_lock_none_held";
834
835 DISABLE_INTERRUPTS(s);
836 mp_disable_preemption();
837 mycpu = cpu_number();
838 locks_held = uslock_stack_index[mycpu];
839 mp_enable_preemption();
840 ENABLE_INTERRUPTS(s);
841 if (locks_held > 0)
842 panic("%s: no locks should be held (0x%x locks held)",
843 caller, (integer_t) locks_held);
844}
845
846
847/*
848 * For very special cases, set traced_lock to point to a
849 * specific lock of interest. The result is a series of
850 * XPRs showing lock operations on that lock. The lock_seq
851 * value is used to show the order of those operations.
852 */
853usimple_lock_t traced_lock;
854unsigned int lock_seq;
855
856void
857usl_trace(
858 usimple_lock_t l,
859 int mycpu,
860 pc_t pc,
861 const char * op_name)
862{
863 if (traced_lock == l) {
864 XPR(XPR_SLOCK,
865 "seq %d, cpu %d, %s @ %x\n",
866 (integer_t) lock_seq, (integer_t) mycpu,
867 (integer_t) op_name, (integer_t) pc, 0);
868 lock_seq++;
869 }
870}
871
872
873
874#if MACH_KDB
875#define printf kdbprintf
876void db_show_all_slocks(void);
877void
878db_show_all_slocks(void)
879{
880 unsigned int i, index;
881 int mycpu = cpu_number();
882 usimple_lock_t l;
883
884 if (uslock_stack_enabled == FALSE) {
885 printf("Lock stack not enabled\n");
886 return;
887 }
888
889#if 0
890 if (!mach_slocks_init)
891 iprintf("WARNING: simple locks stack may not be accurate\n");
892#endif
893 assert(uslock_stack_index[mycpu] >= 0);
894 assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH);
895 index = uslock_stack_index[mycpu];
896 for (i = 0; i < index; ++i) {
897 l = uslock_stack[mycpu][i];
898 iprintf("%d: ", i);
899 db_printsym((vm_offset_t)l, DB_STGY_ANY);
900 if (l->debug.lock_pc != INVALID_PC) {
901 printf(" locked by ");
902 db_printsym((int)l->debug.lock_pc, DB_STGY_PROC);
903 }
904 printf("\n");
905 }
906}
907#endif /* MACH_KDB */
908
909#endif /* USLOCK_DEBUG */
910
911/* #endif USIMPLE_LOCK_CALLS */
912
913/*
914 * Routine: lock_alloc
915 * Function:
916 * Allocate a lock for external users who cannot
917 * hard-code the structure definition into their
918 * objects.
919 * For now just use kalloc, but a zone is probably
920 * warranted.
921 */
922lock_t *
923lock_alloc(
924 boolean_t can_sleep,
925 etap_event_t event,
926 etap_event_t i_event)
927{
928 lock_t *l;
929
930 if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0)
931 lock_init(l, can_sleep, event, i_event);
932 return(l);
933}
934
935/*
936 * Routine: lock_free
937 * Function:
938 * Free a lock allocated for external users.
939 * For now just use kfree, but a zone is probably
940 * warranted.
941 */
942void
943lock_free(
944 lock_t *l)
945{
946 kfree((vm_offset_t)l, sizeof(lock_t));
947}
948
949
950/*
951 * Routine: lock_init
952 * Function:
953 * Initialize a lock; required before use.
954 * Note that clients declare the "struct lock"
955 * variables and then initialize them, rather
956 * than getting a new one from this module.
957 */
958void
959lock_init(
960 lock_t *l,
961 boolean_t can_sleep,
962 etap_event_t event,
963 etap_event_t i_event)
964{
965 (void) memset((void *) l, 0, sizeof(lock_t));
966
967#if ETAP_LOCK_TRACE
968 etap_event_table_assign(&l->u.event_table_chain, event);
969 l->u.s.start_list = SD_ENTRY_NULL;
970#endif /* ETAP_LOCK_TRACE */
971
972 simple_lock_init(&l->interlock, i_event);
973 l->want_write = FALSE;
974 l->want_upgrade = FALSE;
975 l->read_count = 0;
976 l->can_sleep = can_sleep;
977
978#if ETAP_LOCK_ACCUMULATE
979 l->cbuff_write = etap_cbuff_reserve(lock_event_table(l));
980 if (l->cbuff_write != CBUFF_ENTRY_NULL) {
981 l->cbuff_write->event = event;
982 l->cbuff_write->instance = (unsigned long) l;
983 l->cbuff_write->kind = WRITE_LOCK;
984 }
985 l->cbuff_read = CBUFF_ENTRY_NULL;
986#endif /* ETAP_LOCK_ACCUMULATE */
987}
988
989
990/*
991 * Sleep locks. These use the same data structure and algorithm
992 * as the spin locks, but the process sleeps while it is waiting
993 * for the lock. These work on uniprocessor systems.
994 */
995
996#define DECREMENTER_TIMEOUT 1000000
997
998void
999lock_write(
1000 register lock_t * l)
1001{
1002 register int i;
1003 start_data_node_t entry = {0};
1004 boolean_t lock_miss = FALSE;
1005 unsigned short dynamic = 0;
1006 unsigned short trace = 0;
1007 etap_time_t total_time;
1008 etap_time_t stop_wait_time;
1009 pc_t pc;
1010#if MACH_LDEBUG
1011 int decrementer;
1012#endif /* MACH_LDEBUG */
1013
1014
1015 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1016 ETAP_CREATE_ENTRY(entry, trace);
1017 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1018
1019 simple_lock(&l->interlock);
1020
1021 /*
1022 * Link the new start_list entry
1023 */
1024 ETAP_LINK_ENTRY(l, entry, trace);
1025
1026#if MACH_LDEBUG
1027 decrementer = DECREMENTER_TIMEOUT;
1028#endif /* MACH_LDEBUG */
1029
1030 /*
1031 * Try to acquire the want_write bit.
1032 */
1033 while (l->want_write) {
1034 if (!lock_miss) {
1035 ETAP_CONTENTION_TIMESTAMP(entry, trace);
1036 lock_miss = TRUE;
1037 }
1038
1039 i = lock_wait_time[l->can_sleep ? 1 : 0];
1040 if (i != 0) {
1041 simple_unlock(&l->interlock);
1042#if MACH_LDEBUG
1043 if (!--decrementer)
1044 Debugger("timeout - want_write");
1045#endif /* MACH_LDEBUG */
1046 while (--i != 0 && l->want_write)
1047 continue;
1048 simple_lock(&l->interlock);
1049 }
1050
1051 if (l->can_sleep && l->want_write) {
1052 l->waiting = TRUE;
1053 ETAP_SET_REASON(current_thread(),
1054 BLOCKED_ON_COMPLEX_LOCK);
1055 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1056 simple_lock_addr(l->interlock),
1057 THREAD_UNINT);
1058 /* interlock relocked */
1c79356b
A
1059 }
1060 }
1061 l->want_write = TRUE;
1062
1063 /* Wait for readers (and upgrades) to finish */
1064
1065#if MACH_LDEBUG
1066 decrementer = DECREMENTER_TIMEOUT;
1067#endif /* MACH_LDEBUG */
1068 while ((l->read_count != 0) || l->want_upgrade) {
1069 if (!lock_miss) {
1070 ETAP_CONTENTION_TIMESTAMP(entry,trace);
1071 lock_miss = TRUE;
1072 }
1073
1074 i = lock_wait_time[l->can_sleep ? 1 : 0];
1075 if (i != 0) {
1076 simple_unlock(&l->interlock);
1077#if MACH_LDEBUG
1078 if (!--decrementer)
1079 Debugger("timeout - wait for readers");
1080#endif /* MACH_LDEBUG */
1081 while (--i != 0 && (l->read_count != 0 ||
1082 l->want_upgrade))
1083 continue;
1084 simple_lock(&l->interlock);
1085 }
1086
1087 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
1088 l->waiting = TRUE;
1089 ETAP_SET_REASON(current_thread(),
1090 BLOCKED_ON_COMPLEX_LOCK);
1091 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1092 simple_lock_addr(l->interlock),
1093 THREAD_UNINT);
1094 /* interlock relocked */
1c79356b
A
1095 }
1096 }
1097
1098 /*
1099 * do not collect wait data if either the lock
1100 * was free or no wait traces are enabled.
1101 */
1102
1103 if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
1104 ETAP_TIMESTAMP(stop_wait_time);
1105 ETAP_TOTAL_TIME(total_time,
1106 stop_wait_time,
1107 entry->start_wait_time);
1108 CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
1109 MON_DATA_COLLECT(l,
1110 entry,
1111 total_time,
1112 WRITE_LOCK,
1113 MON_CONTENTION,
1114 trace);
1115 }
1116
1117 simple_unlock(&l->interlock);
1118
1119 /*
1120 * Set start hold time if some type of hold tracing is enabled.
1121 *
1122 * Note: if the stop_wait_time was already stamped, use
1123 * it as the start_hold_time instead of doing an
1124 * expensive bus access.
1125 *
1126 */
1127
1128 if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
1129 ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace);
1130 else
1131 ETAP_DURATION_TIMESTAMP(entry, trace);
1132
1133}
1134
1135void
1136lock_done(
1137 register lock_t * l)
1138{
1139 boolean_t do_wakeup = FALSE;
1140 start_data_node_t entry;
1141 unsigned short dynamic = 0;
1142 unsigned short trace = 0;
1143 etap_time_t stop_hold_time;
1144 etap_time_t total_time;
1145 unsigned long lock_kind;
1146 pc_t pc;
1147
1148
1149 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1150
1151 simple_lock(&l->interlock);
1152
1153 if (l->read_count != 0) {
1154 l->read_count--;
1155 lock_kind = READ_LOCK;
1156 }
1157 else
1158 if (l->want_upgrade) {
1159 l->want_upgrade = FALSE;
1160 lock_kind = WRITE_LOCK;
1161 }
1162 else {
1163 l->want_write = FALSE;
1164 lock_kind = WRITE_LOCK;
1165 }
1166
1167 /*
1168 * There is no reason to wakeup a waiting thread
1169 * if the read-count is non-zero. Consider:
1170 * we must be dropping a read lock
1171 * threads are waiting only if one wants a write lock
1172 * if there are still readers, they can't proceed
1173 */
1174
1175 if (l->waiting && (l->read_count == 0)) {
1176 l->waiting = FALSE;
1177 do_wakeup = TRUE;
1178 }
1179 /*
1180 * Collect hold data if hold tracing is
1181 * enabled.
1182 */
1183
1184 /*
1185 * NOTE: All complex locks whose tracing was on when the
1186 * lock was acquired will have an entry in the start_data
1187 * list.
1188 */
1189
1190 ETAP_UNLINK_ENTRY(l,entry);
1191 if (ETAP_DURATION_ENABLED(trace) && entry != SD_ENTRY_NULL) {
1192 ETAP_TIMESTAMP (stop_hold_time);
1193 ETAP_TOTAL_TIME (total_time,
1194 stop_hold_time,
1195 entry->start_hold_time);
1196
1197 if (lock_kind & WRITE_LOCK)
1198 CUM_HOLD_ACCUMULATE (l->cbuff_write,
1199 total_time,
1200 dynamic,
1201 trace);
1202 else {
1203 CUM_READ_ENTRY_RESERVE(l,l->cbuff_read,trace);
1204 CUM_HOLD_ACCUMULATE (l->cbuff_read,
1205 total_time,
1206 dynamic,
1207 trace);
1208 }
1209 MON_ASSIGN_PC(entry->end_pc,pc,trace);
1210 MON_DATA_COLLECT(l,entry,
1211 total_time,
1212 lock_kind,
1213 MON_DURATION,
1214 trace);
1215 }
1216
1217 simple_unlock(&l->interlock);
1218
1219 ETAP_DESTROY_ENTRY(entry);
1220
1221 if (do_wakeup)
1222 thread_wakeup((event_t) l);
1223}
1224
1225void
1226lock_read(
1227 register lock_t * l)
1228{
1229 register int i;
1230 start_data_node_t entry = {0};
1231 boolean_t lock_miss = FALSE;
1232 unsigned short dynamic = 0;
1233 unsigned short trace = 0;
1234 etap_time_t total_time;
1235 etap_time_t stop_wait_time;
1236 pc_t pc;
1237#if MACH_LDEBUG
1238 int decrementer;
1239#endif /* MACH_LDEBUG */
1240
1241 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1242 ETAP_CREATE_ENTRY(entry, trace);
1243 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1244
1245 simple_lock(&l->interlock);
1246
1247 /*
1248 * Link the new start_list entry
1249 */
1250 ETAP_LINK_ENTRY(l,entry,trace);
1251
1252#if MACH_LDEBUG
1253 decrementer = DECREMENTER_TIMEOUT;
1254#endif /* MACH_LDEBUG */
1255 while (l->want_write || l->want_upgrade) {
1256 if (!lock_miss) {
1257 ETAP_CONTENTION_TIMESTAMP(entry, trace);
1258 lock_miss = TRUE;
1259 }
1260
1261 i = lock_wait_time[l->can_sleep ? 1 : 0];
1262
1263 if (i != 0) {
1264 simple_unlock(&l->interlock);
1265#if MACH_LDEBUG
1266 if (!--decrementer)
1267 Debugger("timeout - wait no writers");
1268#endif /* MACH_LDEBUG */
1269 while (--i != 0 && (l->want_write || l->want_upgrade))
1270 continue;
1271 simple_lock(&l->interlock);
1272 }
1273
1274 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
1275 l->waiting = TRUE;
1276 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1277 simple_lock_addr(l->interlock),
1278 THREAD_UNINT);
1279 /* interlock relocked */
1c79356b
A
1280 }
1281 }
1282
1283 l->read_count++;
1284
1285 /*
1286 * Do not collect wait data if the lock was free
1287 * or if no wait traces are enabled.
1288 */
1289
1290 if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
1291 ETAP_TIMESTAMP(stop_wait_time);
1292 ETAP_TOTAL_TIME(total_time,
1293 stop_wait_time,
1294 entry->start_wait_time);
1295 CUM_READ_ENTRY_RESERVE(l, l->cbuff_read, trace);
1296 CUM_WAIT_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace);
1297 MON_DATA_COLLECT(l,
1298 entry,
1299 total_time,
1300 READ_LOCK,
1301 MON_CONTENTION,
1302 trace);
1303 }
1304 simple_unlock(&l->interlock);
1305
1306 /*
1307 * Set start hold time if some type of hold tracing is enabled.
1308 *
1309 * Note: if the stop_wait_time was already stamped, use
1310 * it instead of doing an expensive bus access.
1311 *
1312 */
1313
1314 if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
1315 ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace);
1316 else
1317 ETAP_DURATION_TIMESTAMP(entry,trace);
1318}
1319
1320
1321/*
1322 * Routine: lock_read_to_write
1323 * Function:
1324 * Improves a read-only lock to one with
1325 * write permission. If another reader has
1326 * already requested an upgrade to a write lock,
1327 * no lock is held upon return.
1328 *
1329 * Returns TRUE if the upgrade *failed*.
1330 */
1331
1332boolean_t
1333lock_read_to_write(
1334 register lock_t * l)
1335{
1336 register int i;
1337 boolean_t do_wakeup = FALSE;
1338 start_data_node_t entry = {0};
1339 boolean_t lock_miss = FALSE;
1340 unsigned short dynamic = 0;
1341 unsigned short trace = 0;
1342 etap_time_t total_time;
1343 etap_time_t stop_time;
1344 pc_t pc;
1345#if MACH_LDEBUG
1346 int decrementer;
1347#endif /* MACH_LDEBUG */
1348
1349
1350 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1351
1352 simple_lock(&l->interlock);
1353
1354 l->read_count--;
1355
1356 /*
1357 * Since the read lock is lost whether the write lock
1358 * is acquired or not, read hold data is collected here.
1359 * This, of course, is assuming some type of hold
1360 * tracing is enabled.
1361 *
1362 * Note: trace is set to zero if the entry does not exist.
1363 */
1364
1365 ETAP_FIND_ENTRY(l, entry, trace);
1366
1367 if (ETAP_DURATION_ENABLED(trace)) {
1368 ETAP_TIMESTAMP(stop_time);
1369 ETAP_TOTAL_TIME(total_time, stop_time, entry->start_hold_time);
1370 CUM_HOLD_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace);
1371 MON_ASSIGN_PC(entry->end_pc, pc, trace);
1372 MON_DATA_COLLECT(l,
1373 entry,
1374 total_time,
1375 READ_LOCK,
1376 MON_DURATION,
1377 trace);
1378 }
1379
1380 if (l->want_upgrade) {
1381 /*
1382 * Someone else has requested upgrade.
1383 * Since we've released a read lock, wake
1384 * him up.
1385 */
1386 if (l->waiting && (l->read_count == 0)) {
1387 l->waiting = FALSE;
1388 do_wakeup = TRUE;
1389 }
1390
1391 ETAP_UNLINK_ENTRY(l, entry);
1392 simple_unlock(&l->interlock);
1393 ETAP_DESTROY_ENTRY(entry);
1394
1395 if (do_wakeup)
1396 thread_wakeup((event_t) l);
1397 return (TRUE);
1398 }
1399
1400 l->want_upgrade = TRUE;
1401
1402 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1403
1404#if MACH_LDEBUG
1405 decrementer = DECREMENTER_TIMEOUT;
1406#endif /* MACH_LDEBUG */
1407 while (l->read_count != 0) {
1408 if (!lock_miss) {
1409 ETAP_CONTENTION_TIMESTAMP(entry, trace);
1410 lock_miss = TRUE;
1411 }
1412
1413 i = lock_wait_time[l->can_sleep ? 1 : 0];
1414
1415 if (i != 0) {
1416 simple_unlock(&l->interlock);
1417#if MACH_LDEBUG
1418 if (!--decrementer)
1419 Debugger("timeout - read_count");
1420#endif /* MACH_LDEBUG */
1421 while (--i != 0 && l->read_count != 0)
1422 continue;
1423 simple_lock(&l->interlock);
1424 }
1425
1426 if (l->can_sleep && l->read_count != 0) {
1427 l->waiting = TRUE;
1428 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1429 simple_lock_addr(l->interlock),
1430 THREAD_UNINT);
1431 /* interlock relocked */
1c79356b
A
1432 }
1433 }
1434
1435 /*
1436 * do not collect wait data if the lock was free
1437 * or if no wait traces are enabled.
1438 */
1439
1440 if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
1441 ETAP_TIMESTAMP (stop_time);
1442 ETAP_TOTAL_TIME(total_time, stop_time, entry->start_wait_time);
1443 CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
1444 MON_DATA_COLLECT(l,
1445 entry,
1446 total_time,
1447 WRITE_LOCK,
1448 MON_CONTENTION,
1449 trace);
1450 }
1451
1452 simple_unlock(&l->interlock);
1453
1454 /*
1455 * Set start hold time if some type of hold tracing is enabled
1456 *
1457 * Note: if the stop_time was already stamped, use
1458 * it as the new start_hold_time instead of doing
1459 * an expensive VME access.
1460 *
1461 */
1462
1463 if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
1464 ETAP_COPY_START_HOLD_TIME(entry, stop_time, trace);
1465 else
1466 ETAP_DURATION_TIMESTAMP(entry, trace);
1467
1468 return (FALSE);
1469}
1470
1471void
1472lock_write_to_read(
1473 register lock_t * l)
1474{
1475 boolean_t do_wakeup = FALSE;
1476 start_data_node_t entry = {0};
1477 unsigned short dynamic = 0;
1478 unsigned short trace = 0;
1479 etap_time_t stop_hold_time;
1480 etap_time_t total_time;
1481 pc_t pc;
1482
1483 ETAP_STAMP(lock_event_table(l), trace,dynamic);
1484
1485 simple_lock(&l->interlock);
1486
1487 l->read_count++;
1488 if (l->want_upgrade)
1489 l->want_upgrade = FALSE;
1490 else
1491 l->want_write = FALSE;
1492
1493 if (l->waiting) {
1494 l->waiting = FALSE;
1495 do_wakeup = TRUE;
1496 }
1497
1498 /*
1499 * Since we are switching from a write lock to a read lock,
1500 * the write lock data is stored and the read lock data
1501 * collection begins.
1502 *
1503 * Note: trace is set to zero if the entry does not exist.
1504 */
1505
1506 ETAP_FIND_ENTRY(l, entry, trace);
1507
1508 if (ETAP_DURATION_ENABLED(trace)) {
1509 ETAP_TIMESTAMP (stop_hold_time);
1510 ETAP_TOTAL_TIME(total_time, stop_hold_time, entry->start_hold_time);
1511 CUM_HOLD_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
1512 MON_ASSIGN_PC(entry->end_pc, pc, trace);
1513 MON_DATA_COLLECT(l,
1514 entry,
1515 total_time,
1516 WRITE_LOCK,
1517 MON_DURATION,
1518 trace);
1519 }
1520
1521 simple_unlock(&l->interlock);
1522
1523 /*
1524 * Set start hold time if some type of hold tracing is enabled
1525 *
1526 * Note: if the stop_hold_time was already stamped, use
1527 * it as the new start_hold_time instead of doing
1528 * an expensive bus access.
1529 *
1530 */
1531
1532 if (ETAP_DURATION_ENABLED(trace))
1533 ETAP_COPY_START_HOLD_TIME(entry, stop_hold_time, trace);
1534 else
1535 ETAP_DURATION_TIMESTAMP(entry, trace);
1536
1537 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1538
1539 if (do_wakeup)
1540 thread_wakeup((event_t) l);
1541}
1542
1543
1544#if 0 /* Unused */
1545/*
1546 * Routine: lock_try_write
1547 * Function:
1548 * Tries to get a write lock.
1549 *
1550 * Returns FALSE if the lock is not held on return.
1551 */
1552
1553boolean_t
1554lock_try_write(
1555 register lock_t * l)
1556{
1557 start_data_node_t entry = {0};
1558 unsigned short trace = 0;
1559 pc_t pc;
1560
1561 ETAP_STAMP(lock_event_table(l), trace, trace);
1562 ETAP_CREATE_ENTRY(entry, trace);
1563
1564 simple_lock(&l->interlock);
1565
1566 if (l->want_write || l->want_upgrade || l->read_count) {
1567 /*
1568 * Can't get lock.
1569 */
1570 simple_unlock(&l->interlock);
1571 ETAP_DESTROY_ENTRY(entry);
1572 return(FALSE);
1573 }
1574
1575 /*
1576 * Have lock.
1577 */
1578
1579 l->want_write = TRUE;
1580
1581 ETAP_LINK_ENTRY(l, entry, trace);
1582
1583 simple_unlock(&l->interlock);
1584
1585 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1586 ETAP_DURATION_TIMESTAMP(entry, trace);
1587
1588 return(TRUE);
1589}
1590
1591/*
1592 * Routine: lock_try_read
1593 * Function:
1594 * Tries to get a read lock.
1595 *
1596 * Returns FALSE if the lock is not held on return.
1597 */
1598
1599boolean_t
1600lock_try_read(
1601 register lock_t * l)
1602{
1603 start_data_node_t entry = {0};
1604 unsigned short trace = 0;
1605 pc_t pc;
1606
1607 ETAP_STAMP(lock_event_table(l), trace, trace);
1608 ETAP_CREATE_ENTRY(entry, trace);
1609
1610 simple_lock(&l->interlock);
1611
1612 if (l->want_write || l->want_upgrade) {
1613 simple_unlock(&l->interlock);
1614 ETAP_DESTROY_ENTRY(entry);
1615 return(FALSE);
1616 }
1617
1618 l->read_count++;
1619
1620 ETAP_LINK_ENTRY(l, entry, trace);
1621
1622 simple_unlock(&l->interlock);
1623
1624 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1625 ETAP_DURATION_TIMESTAMP(entry, trace);
1626
1627 return(TRUE);
1628}
1629#endif /* Unused */
1630
1631#if MACH_KDB
1632
1633void db_show_one_lock(lock_t *);
1634
1635
1636void
1637db_show_one_lock(
1638 lock_t *lock)
1639{
1640 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1641 lock->read_count,
1642 lock->want_upgrade ? "" : "!",
1643 lock->want_write ? "" : "!");
1644 db_printf("%swaiting, %scan_sleep\n",
1645 lock->waiting ? "" : "!", lock->can_sleep ? "" : "!");
1646 db_printf("Interlock:\n");
1647 db_show_one_simple_lock((db_expr_t)simple_lock_addr(lock->interlock),
1648 TRUE, (db_expr_t)0, (char *)0);
1649}
1650#endif /* MACH_KDB */
1651
1652/*
1653 * The C portion of the mutex package. These routines are only invoked
1654 * if the optimized assembler routines can't do the work.
1655 */
1656
1657/*
1658 * Routine: lock_alloc
1659 * Function:
1660 * Allocate a mutex for external users who cannot
1661 * hard-code the structure definition into their
1662 * objects.
1663 * For now just use kalloc, but a zone is probably
1664 * warranted.
1665 */
1666mutex_t *
1667mutex_alloc(
1668 etap_event_t event)
1669{
1670 mutex_t *m;
1671
1672 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1673 mutex_init(m, event);
1674 return(m);
1675}
1676
1677/*
1678 * Routine: mutex_free
1679 * Function:
1680 * Free a mutex allocated for external users.
1681 * For now just use kfree, but a zone is probably
1682 * warranted.
1683 */
1684void
1685mutex_free(
1686 mutex_t *m)
1687{
1688 kfree((vm_offset_t)m, sizeof(mutex_t));
1689}
1690
1c79356b 1691/*
9bccf70c
A
1692 * mutex_lock_wait
1693 *
1694 * Invoked in order to wait on contention.
1695 *
1696 * Called with the interlock locked and
1697 * returns it unlocked.
1c79356b 1698 */
1c79356b
A
1699void
1700mutex_lock_wait (
9bccf70c
A
1701 mutex_t *mutex,
1702 thread_act_t holder)
1c79356b 1703{
9bccf70c
A
1704 thread_t thread, self = current_thread();
1705#if !defined(i386)
1706 integer_t priority;
1707 spl_t s = splsched();
1708
1709 priority = self->last_processor->current_pri;
1710 if (priority < self->priority)
1711 priority = self->priority;
1712 if (priority > MINPRI_KERNEL)
1713 priority = MINPRI_KERNEL;
1714 else
1715 if (priority < BASEPRI_DEFAULT)
1716 priority = BASEPRI_DEFAULT;
1717
1718 thread = holder->thread;
1719 assert(thread->top_act == holder); /* XXX */
1720 thread_lock(thread);
1721 if (mutex->promoted_pri == 0)
1722 thread->promotions++;
1723 if (thread->priority < MINPRI_KERNEL) {
1724 thread->sched_mode |= TH_MODE_PROMOTED;
1725 if ( mutex->promoted_pri < priority &&
1726 thread->sched_pri < priority ) {
1727 KERNEL_DEBUG_CONSTANT(
1728 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
1729 thread->sched_pri, priority, (int)thread, (int)mutex, 0);
1730
1731 set_sched_pri(thread, priority);
1732 }
1733 }
1734 thread_unlock(thread);
1735 splx(s);
1736
1737 if (mutex->promoted_pri < priority)
1738 mutex->promoted_pri = priority;
1739#endif
1740
1741 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
1742 self->pending_promoter[self->pending_promoter_index] = mutex;
1743 mutex->waiters++;
1744 }
1745 else
1746 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
1747 self->pending_promoter[++self->pending_promoter_index] = mutex;
1748 mutex->waiters++;
1749 }
1750
1751 assert_wait(mutex, THREAD_UNINT);
1752 interlock_unlock(&mutex->interlock);
1753
1754 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
1755}
1756
1757/*
9bccf70c
A
1758 * mutex_lock_acquire
1759 *
1760 * Invoked on acquiring the mutex when there is
1761 * contention.
1762 *
1763 * Returns the current number of waiters.
1764 *
1765 * Called with the interlock locked.
1c79356b 1766 */
9bccf70c
A
1767int
1768mutex_lock_acquire(
1769 mutex_t *mutex)
1770{
1771 thread_t thread = current_thread();
1772
1773 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
1774 thread->pending_promoter[thread->pending_promoter_index] = NULL;
1775 if (thread->pending_promoter_index > 0)
1776 thread->pending_promoter_index--;
1777 mutex->waiters--;
1778 }
1779
1780#if !defined(i386)
1781 if (mutex->waiters > 0) {
1782 integer_t priority = mutex->promoted_pri;
1783 spl_t s = splsched();
1784
1785 thread_lock(thread);
1786 thread->promotions++;
1787 if (thread->priority < MINPRI_KERNEL) {
1788 thread->sched_mode |= TH_MODE_PROMOTED;
1789 if (thread->sched_pri < priority) {
1790 KERNEL_DEBUG_CONSTANT(
1791 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
1792 thread->sched_pri, priority, 0, (int)mutex, 0);
1793
1794 set_sched_pri(thread, priority);
1795 }
1796 }
1797 thread_unlock(thread);
1798 splx(s);
1799 }
1800 else
1801 mutex->promoted_pri = 0;
1802#endif
1803
1804 return (mutex->waiters);
1805}
1c79356b 1806
9bccf70c
A
1807/*
1808 * mutex_unlock_wakeup
1809 *
1810 * Invoked on unlock when there is contention.
1811 *
1812 * Called with the interlock locked.
1813 */
1c79356b
A
1814void
1815mutex_unlock_wakeup (
9bccf70c
A
1816 mutex_t *mutex,
1817 thread_act_t holder)
1c79356b 1818{
9bccf70c
A
1819#if !defined(i386)
1820 thread_t thread = current_thread();
1821
1822 if (thread->top_act != holder)
1823 panic("mutex_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
1824
1825 if (thread->promotions > 0) {
1826 spl_t s = splsched();
1827
1828 thread_lock(thread);
1829 if ( --thread->promotions == 0 &&
1830 (thread->sched_mode & TH_MODE_PROMOTED) ) {
1831 thread->sched_mode &= ~TH_MODE_PROMOTED;
1832 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1833 KERNEL_DEBUG_CONSTANT(
1834 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
1835 thread->sched_pri, DEPRESSPRI, 0, (int)mutex, 0);
1836
1837 set_sched_pri(thread, DEPRESSPRI);
1838 }
1839 else {
1840 if (thread->priority < thread->sched_pri) {
1841 KERNEL_DEBUG_CONSTANT(
1842 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
1843 DBG_FUNC_NONE,
1844 thread->sched_pri, thread->priority,
1845 0, (int)mutex, 0);
1846 }
1847
1848 compute_priority(thread, FALSE);
1849 }
1850 }
1851 thread_unlock(thread);
1852 splx(s);
1853 }
1854#endif
1855
1856 assert(mutex->waiters > 0);
1857 thread_wakeup_one(mutex);
1c79356b
A
1858}
1859
1860/*
1861 * mutex_pause: Called by former callers of simple_lock_pause().
1862 */
1863
1864void
1865mutex_pause(void)
1866{
9bccf70c
A
1867 wait_result_t wait_result;
1868
1869 wait_result = assert_wait_timeout( 1, THREAD_UNINT);
1870 assert(wait_result == THREAD_WAITING);
1c79356b 1871
1c79356b 1872 ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK);
9bccf70c
A
1873
1874 wait_result = thread_block(THREAD_CONTINUE_NULL);
1875 assert(wait_result == THREAD_TIMED_OUT);
1c79356b
A
1876}
1877
1878#if MACH_KDB
1879/*
1880 * Routines to print out simple_locks and mutexes in a nicely-formatted
1881 * fashion.
1882 */
1883
1884char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1885char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1886
1887void
1888db_show_one_simple_lock (
1889 db_expr_t addr,
1890 boolean_t have_addr,
1891 db_expr_t count,
1892 char * modif)
1893{
1894 simple_lock_t saddr = (simple_lock_t)addr;
1895
1896 if (saddr == (simple_lock_t)0 || !have_addr) {
1897 db_error ("No simple_lock\n");
1898 }
1899#if USLOCK_DEBUG
1900 else if (saddr->lock_type != USLOCK_TAG)
1901 db_error ("Not a simple_lock\n");
1902#endif /* USLOCK_DEBUG */
1903
1904 db_printf ("%s\n", simple_lock_labels);
1905 db_print_simple_lock (saddr);
1906}
1907
1908void
1909db_print_simple_lock (
1910 simple_lock_t addr)
1911{
1912
1913 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
1914#if USLOCK_DEBUG
1915 db_printf (" %08x", addr->debug.lock_thread);
1916 db_printf (" %08x ", addr->debug.duration[1]);
1917 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
1918#endif /* USLOCK_DEBUG */
1919 db_printf ("\n");
1920}
1921
1922void
1923db_show_one_mutex (
1924 db_expr_t addr,
1925 boolean_t have_addr,
1926 db_expr_t count,
1927 char * modif)
1928{
1929 mutex_t * maddr = (mutex_t *)addr;
1930
1931 if (maddr == (mutex_t *)0 || !have_addr)
1932 db_error ("No mutex\n");
1933#if MACH_LDEBUG
1934 else if (maddr->type != MUTEX_TAG)
1935 db_error ("Not a mutex\n");
1936#endif /* MACH_LDEBUG */
1937
1938 db_printf ("%s\n", mutex_labels);
1939 db_print_mutex (maddr);
1940}
1941
1942void
1943db_print_mutex (
1944 mutex_t * addr)
1945{
1946 db_printf ("%08x %6d %7d",
1947 addr, *hw_lock_addr(addr->locked), addr->waiters);
1948#if MACH_LDEBUG
1949 db_printf (" %08x ", addr->thread);
1950 db_printsym (addr->pc, DB_STGY_ANY);
1951#endif /* MACH_LDEBUG */
1952 db_printf ("\n");
1953}
1954#endif /* MACH_KDB */
1955
1956#if MACH_LDEBUG
1957extern void meter_simple_lock (
1958 simple_lock_t l);
1959extern void meter_simple_unlock (
1960 simple_lock_t l);
1961extern void cyctm05_stamp (
1962 unsigned long * start);
1963extern void cyctm05_diff (
1964 unsigned long * start,
1965 unsigned long * end,
1966 unsigned long * diff);
1967
1968#if 0
1969simple_lock_data_t loser;
1970#endif
1971
1972void
1973meter_simple_lock(
1974 simple_lock_t lp)
1975{
1976#if 0
1977 cyctm05_stamp (lp->duration);
1978#endif
1979}
1980
1981int long_simple_lock_crash;
1982int long_simple_lock_time = 0x600;
1983/*
1984 * This is pretty gawd-awful. XXX
1985 */
1986decl_simple_lock_data(extern,kd_tty)
1987
1988void
1989meter_simple_unlock(
1990 simple_lock_t lp)
1991{
1992#if 0
1993 unsigned long stime[2], etime[2], delta[2];
1994
1995 if (lp == &kd_tty) /* XXX */
1996 return; /* XXX */
1997
1998 stime[0] = lp->duration[0];
1999 stime[1] = lp->duration[1];
2000
2001 cyctm05_stamp (etime);
2002
2003 if (etime[1] < stime[1]) /* XXX */
2004 return; /* XXX */
2005
2006 cyctm05_diff (stime, etime, delta);
2007
2008 if (delta[1] >= 0x10000) /* XXX */
2009 return; /* XXX */
2010
2011 lp->duration[0] = delta[0];
2012 lp->duration[1] = delta[1];
2013
2014 if (loser.duration[1] < lp->duration[1])
2015 loser = *lp;
2016
2017 assert (!long_simple_lock_crash || delta[1] < long_simple_lock_time);
2018#endif
2019}
2020#endif /* MACH_LDEBUG */
2021
2022
2023#if ETAP_LOCK_TRACE
2024
2025/*
2026 * ==============================================================
2027 * ETAP hook when initializing a usimple_lock. May be invoked
2028 * from the portable lock package or from an optimized machine-
2029 * dependent implementation.
2030 * ==============================================================
2031 */
2032
2033void
2034etap_simplelock_init (
2035 simple_lock_t l,
2036 etap_event_t event)
2037{
2038 ETAP_CLEAR_TRACE_DATA(l);
2039 etap_event_table_assign(&l->u.event_table_chain, event);
2040
2041#if ETAP_LOCK_ACCUMULATE
2042 /* reserve an entry in the cumulative buffer */
2043 l->cbuff_entry = etap_cbuff_reserve(lock_event_table(l));
2044 /* initialize the entry if one was returned */
2045 if (l->cbuff_entry != CBUFF_ENTRY_NULL) {
2046 l->cbuff_entry->event = event;
2047 l->cbuff_entry->instance = (unsigned long) l;
2048 l->cbuff_entry->kind = SPIN_LOCK;
2049 }
2050#endif /* ETAP_LOCK_ACCUMULATE */
2051}
2052
2053
2054void
2055etap_simplelock_unlock(
2056 simple_lock_t l)
2057{
2058 unsigned short dynamic = 0;
2059 unsigned short trace = 0;
2060 etap_time_t total_time;
2061 etap_time_t stop_hold_time;
2062 pc_t pc;
2063
2064 OBTAIN_PC(pc, l);
2065 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2066
2067 /*
2068 * Calculate & collect hold time data only if
2069 * the hold tracing was enabled throughout the
2070 * whole operation. This prevents collection of
2071 * bogus data caused by mid-operation trace changes.
2072 *
2073 */
2074
2075 if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) {
2076 ETAP_TIMESTAMP (stop_hold_time);
2077 ETAP_TOTAL_TIME(total_time, stop_hold_time,
2078 l->u.s.start_hold_time);
2079 CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2080 MON_ASSIGN_PC(l->end_pc, pc, trace);
2081 MON_DATA_COLLECT(l,
2082 l,
2083 total_time,
2084 SPIN_LOCK,
2085 MON_DURATION,
2086 trace);
2087 }
2088 ETAP_CLEAR_TRACE_DATA(l);
2089}
2090
2091/* ========================================================================
2092 * Since the the simple_lock() routine is machine dependant, it must always
2093 * be coded in assembly. The two hook routines below are used to collect
2094 * lock_stat data.
2095 * ========================================================================
2096 */
2097
2098/*
2099 * ROUTINE: etap_simplelock_miss()
2100 *
2101 * FUNCTION: This spin lock routine is called upon the first
2102 * spin (miss) of the lock.
2103 *
2104 * A timestamp is taken at the beginning of the wait period,
2105 * if wait tracing is enabled.
2106 *
2107 *
2108 * PARAMETERS:
2109 * - lock address.
2110 * - timestamp address.
2111 *
2112 * RETURNS: Wait timestamp value. The timestamp value is later used
2113 * by etap_simplelock_hold().
2114 *
2115 * NOTES: This routine is NOT ALWAYS called. The lock may be free
2116 * (never spinning). For this reason the pc is collected in
2117 * etap_simplelock_hold().
2118 *
2119 */
2120etap_time_t
2121etap_simplelock_miss (
2122 simple_lock_t l)
2123
2124{
2125 unsigned short trace = 0;
2126 unsigned short dynamic = 0;
2127 etap_time_t start_miss_time;
2128
2129 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2130
2131 if (trace & ETAP_CONTENTION)
2132 ETAP_TIMESTAMP(start_miss_time);
2133
2134 return(start_miss_time);
2135}
2136
2137/*
2138 * ROUTINE: etap_simplelock_hold()
2139 *
2140 * FUNCTION: This spin lock routine is ALWAYS called once the lock
2141 * is acquired. Here, the contention time is calculated and
2142 * the start hold time is stamped.
2143 *
2144 * PARAMETERS:
2145 * - lock address.
2146 * - PC of the calling function.
2147 * - start wait timestamp.
2148 *
2149 */
2150
2151void
2152etap_simplelock_hold (
2153 simple_lock_t l,
2154 pc_t pc,
2155 etap_time_t start_hold_time)
2156{
2157 unsigned short dynamic = 0;
2158 unsigned short trace = 0;
2159 etap_time_t total_time;
2160 etap_time_t stop_hold_time;
2161
2162 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2163
2164 MON_ASSIGN_PC(l->start_pc, pc, trace);
2165
2166 /* do not collect wait data if lock was free */
2167 if (ETAP_TIME_IS_ZERO(start_hold_time) && (trace & ETAP_CONTENTION)) {
2168 ETAP_TIMESTAMP(stop_hold_time);
2169 ETAP_TOTAL_TIME(total_time,
2170 stop_hold_time,
2171 start_hold_time);
2172 CUM_WAIT_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2173 MON_DATA_COLLECT(l,
2174 l,
2175 total_time,
2176 SPIN_LOCK,
2177 MON_CONTENTION,
2178 trace);
2179 ETAP_COPY_START_HOLD_TIME(&l->u.s, stop_hold_time, trace);
2180 }
2181 else
2182 ETAP_DURATION_TIMESTAMP(&l->u.s, trace);
2183}
2184
2185void
2186etap_mutex_init (
2187 mutex_t *l,
2188 etap_event_t event)
2189{
2190 ETAP_CLEAR_TRACE_DATA(l);
2191 etap_event_table_assign(&l->u.event_table_chain, event);
2192
2193#if ETAP_LOCK_ACCUMULATE
2194 /* reserve an entry in the cumulative buffer */
2195 l->cbuff_entry = etap_cbuff_reserve(lock_event_table(l));
2196 /* initialize the entry if one was returned */
2197 if (l->cbuff_entry != CBUFF_ENTRY_NULL) {
2198 l->cbuff_entry->event = event;
2199 l->cbuff_entry->instance = (unsigned long) l;
2200 l->cbuff_entry->kind = MUTEX_LOCK;
2201 }
2202#endif /* ETAP_LOCK_ACCUMULATE */
2203}
2204
2205etap_time_t
2206etap_mutex_miss (
2207 mutex_t *l)
2208{
2209 unsigned short trace = 0;
2210 unsigned short dynamic = 0;
2211 etap_time_t start_miss_time;
2212
2213 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2214
2215 if (trace & ETAP_CONTENTION)
2216 ETAP_TIMESTAMP(start_miss_time);
2217 else
2218 ETAP_TIME_CLEAR(start_miss_time);
2219
2220 return(start_miss_time);
2221}
2222
2223void
2224etap_mutex_hold (
2225 mutex_t *l,
2226 pc_t pc,
2227 etap_time_t start_hold_time)
2228{
2229 unsigned short dynamic = 0;
2230 unsigned short trace = 0;
2231 etap_time_t total_time;
2232 etap_time_t stop_hold_time;
2233
2234 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2235
2236 MON_ASSIGN_PC(l->start_pc, pc, trace);
2237
2238 /* do not collect wait data if lock was free */
2239 if (!ETAP_TIME_IS_ZERO(start_hold_time) && (trace & ETAP_CONTENTION)) {
2240 ETAP_TIMESTAMP(stop_hold_time);
2241 ETAP_TOTAL_TIME(total_time,
2242 stop_hold_time,
2243 start_hold_time);
2244 CUM_WAIT_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2245 MON_DATA_COLLECT(l,
2246 l,
2247 total_time,
2248 MUTEX_LOCK,
2249 MON_CONTENTION,
2250 trace);
2251 ETAP_COPY_START_HOLD_TIME(&l->u.s, stop_hold_time, trace);
2252 }
2253 else
2254 ETAP_DURATION_TIMESTAMP(&l->u.s, trace);
2255}
2256
2257void
2258etap_mutex_unlock(
2259 mutex_t *l)
2260{
2261 unsigned short dynamic = 0;
2262 unsigned short trace = 0;
2263 etap_time_t total_time;
2264 etap_time_t stop_hold_time;
2265 pc_t pc;
2266
2267 OBTAIN_PC(pc, l);
2268 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2269
2270 /*
2271 * Calculate & collect hold time data only if
2272 * the hold tracing was enabled throughout the
2273 * whole operation. This prevents collection of
2274 * bogus data caused by mid-operation trace changes.
2275 *
2276 */
2277
2278 if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) {
2279 ETAP_TIMESTAMP(stop_hold_time);
2280 ETAP_TOTAL_TIME(total_time, stop_hold_time,
2281 l->u.s.start_hold_time);
2282 CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2283 MON_ASSIGN_PC(l->end_pc, pc, trace);
2284 MON_DATA_COLLECT(l,
2285 l,
2286 total_time,
2287 MUTEX_LOCK,
2288 MON_DURATION,
2289 trace);
2290 }
2291 ETAP_CLEAR_TRACE_DATA(l);
2292}
2293
2294#endif /* ETAP_LOCK_TRACE */