]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / kern / lock.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 * File: kern/lock.c
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Date: 1985
57 *
58 * Locking primitives implementation
59 */
60
61#include <cpus.h>
62#include <mach_kdb.h>
63#include <mach_ldebug.h>
64
65#include <kern/lock.h>
66#include <kern/etap_macros.h>
67#include <kern/misc_protos.h>
68#include <kern/thread.h>
9bccf70c 69#include <kern/processor.h>
1c79356b
A
70#include <kern/sched_prim.h>
71#include <kern/xpr.h>
72#include <kern/debug.h>
73#include <string.h>
74
75#if MACH_KDB
76#include <ddb/db_command.h>
77#include <ddb/db_output.h>
78#include <ddb/db_sym.h>
79#include <ddb/db_print.h>
80#endif /* MACH_KDB */
81
82#ifdef __ppc__
83#include <ppc/Firmware.h>
1c79356b
A
84#endif
85
9bccf70c
A
86#include <sys/kdebug.h>
87
1c79356b
A
88#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG)
89
90/*
91 * Some portions of the lock debugging code must run with
92 * interrupts disabled. This can be machine-dependent,
93 * but we don't have any good hooks for that at the moment.
94 * If your architecture is different, add a machine-dependent
95 * ifdef here for these macros. XXX
96 */
97
98#define DISABLE_INTERRUPTS(s) s = ml_set_interrupts_enabled(FALSE)
99#define ENABLE_INTERRUPTS(s) (void)ml_set_interrupts_enabled(s)
100
101#if NCPUS > 1
102/* Time we loop without holding the interlock.
103 * The former is for when we cannot sleep, the latter
104 * for when our thread can go to sleep (loop less)
105 * we shouldn't retake the interlock at all frequently
106 * if we cannot go to sleep, since it interferes with
107 * any other processors. In particular, 100 is too small
108 * a number for powerpc MP systems because of cache
109 * coherency issues and differing lock fetch times between
110 * the processors
111 */
112unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ;
113#else /* NCPUS > 1 */
114
115 /*
116 * It is silly to spin on a uni-processor as if we
117 * thought something magical would happen to the
118 * want_write bit while we are executing.
119 */
120
121unsigned int lock_wait_time[2] = { 0, 0 };
122#endif /* NCPUS > 1 */
123
124/* Forwards */
125
126#if MACH_KDB
127void db_print_simple_lock(
128 simple_lock_t addr);
129
130void db_print_mutex(
131 mutex_t * addr);
132#endif /* MACH_KDB */
133
134
135#if USLOCK_DEBUG
136/*
137 * Perform simple lock checks.
138 */
139int uslock_check = 1;
140int max_lock_loops = 100000000;
141decl_simple_lock_data(extern , printf_lock)
142decl_simple_lock_data(extern , panic_lock)
143#if MACH_KDB && NCPUS > 1
144decl_simple_lock_data(extern , kdb_lock)
145#endif /* MACH_KDB && NCPUS >1 */
146#endif /* USLOCK_DEBUG */
147
148
149/*
150 * We often want to know the addresses of the callers
151 * of the various lock routines. However, this information
152 * is only used for debugging and statistics.
153 */
154typedef void *pc_t;
155#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS)
156#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS)
157#if ANY_LOCK_DEBUG || ETAP_LOCK_TRACE
158#define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l)))
159#else /* ANY_LOCK_DEBUG || ETAP_LOCK_TRACE */
160#ifdef lint
161/*
162 * Eliminate lint complaints about unused local pc variables.
163 */
164#define OBTAIN_PC(pc,l) ++pc
165#else /* lint */
166#define OBTAIN_PC(pc,l)
167#endif /* lint */
168#endif /* USLOCK_DEBUG || ETAP_LOCK_TRACE */
169
170
171/* #ifndef USIMPLE_LOCK_CALLS
172 * The i386 production version of usimple_locks isn't ready yet.
173 */
174/*
175 * Portable lock package implementation of usimple_locks.
176 */
177
178#if ETAP_LOCK_TRACE
179#define ETAPCALL(stmt) stmt
180void etap_simplelock_init(simple_lock_t, etap_event_t);
181void etap_simplelock_unlock(simple_lock_t);
182void etap_simplelock_hold(simple_lock_t, pc_t, etap_time_t);
183etap_time_t etap_simplelock_miss(simple_lock_t);
184
185void etap_mutex_init(mutex_t*, etap_event_t);
186void etap_mutex_unlock(mutex_t*);
187void etap_mutex_hold(mutex_t*, pc_t, etap_time_t);
188etap_time_t etap_mutex_miss(mutex_t*);
189#else /* ETAP_LOCK_TRACE */
190#define ETAPCALL(stmt)
191#endif /* ETAP_LOCK_TRACE */
192
193#if USLOCK_DEBUG
194#define USLDBG(stmt) stmt
195void usld_lock_init(usimple_lock_t, etap_event_t);
196void usld_lock_pre(usimple_lock_t, pc_t);
197void usld_lock_post(usimple_lock_t, pc_t);
198void usld_unlock(usimple_lock_t, pc_t);
199void usld_lock_try_pre(usimple_lock_t, pc_t);
200void usld_lock_try_post(usimple_lock_t, pc_t);
201void usld_lock_held(usimple_lock_t);
202void usld_lock_none_held(void);
203int usld_lock_common_checks(usimple_lock_t, char *);
204#else /* USLOCK_DEBUG */
205#define USLDBG(stmt)
206#endif /* USLOCK_DEBUG */
207
208/*
209 * Initialize a usimple_lock.
210 *
211 * No change in preemption state.
212 */
213void
214usimple_lock_init(
215 usimple_lock_t l,
216 etap_event_t event)
217{
d7e50217 218#ifndef MACHINE_SIMPLE_LOCK
1c79356b
A
219 USLDBG(usld_lock_init(l, event));
220 ETAPCALL(etap_simplelock_init((l),(event)));
221 hw_lock_init(&l->interlock);
d7e50217
A
222#else
223 simple_lock_init(l,event);
224#endif
1c79356b
A
225}
226
227
228/*
229 * Acquire a usimple_lock.
230 *
231 * Returns with preemption disabled. Note
232 * that the hw_lock routines are responsible for
233 * maintaining preemption state.
234 */
235void
236usimple_lock(
237 usimple_lock_t l)
238{
d7e50217 239#ifndef MACHINE_SIMPLE_LOCK
1c79356b
A
240 int i;
241 pc_t pc;
242#if ETAP_LOCK_TRACE
243 etap_time_t start_wait_time;
244 int no_miss_info = 0;
245#endif /* ETAP_LOCK_TRACE */
246#if USLOCK_DEBUG
247 int count = 0;
248#endif /* USLOCK_DEBUG */
249
250 OBTAIN_PC(pc, l);
251 USLDBG(usld_lock_pre(l, pc));
252#if ETAP_LOCK_TRACE
253 ETAP_TIME_CLEAR(start_wait_time);
254#endif /* ETAP_LOCK_TRACE */
255
9bccf70c 256 if(!hw_lock_to(&l->interlock, LockTimeOut)) /* Try to get the lock with a timeout */
1c79356b
A
257 panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc);
258
1c79356b
A
259 ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time));
260 USLDBG(usld_lock_post(l, pc));
d7e50217
A
261#else
262 simple_lock(l);
263#endif
1c79356b
A
264}
265
266
267/*
268 * Release a usimple_lock.
269 *
270 * Returns with preemption enabled. Note
271 * that the hw_lock routines are responsible for
272 * maintaining preemption state.
273 */
274void
275usimple_unlock(
276 usimple_lock_t l)
277{
d7e50217 278#ifndef MACHINE_SIMPLE_LOCK
1c79356b
A
279 pc_t pc;
280
281// checkNMI(); /* (TEST/DEBUG) */
282
283 OBTAIN_PC(pc, l);
284 USLDBG(usld_unlock(l, pc));
285 ETAPCALL(etap_simplelock_unlock(l));
d7e50217
A
286#ifdef __ppc__
287 sync();
288#endif
1c79356b 289 hw_lock_unlock(&l->interlock);
d7e50217
A
290#else
291 simple_unlock_rwmb(l);
292#endif
1c79356b
A
293}
294
295
296/*
297 * Conditionally acquire a usimple_lock.
298 *
299 * On success, returns with preemption disabled.
300 * On failure, returns with preemption in the same state
301 * as when first invoked. Note that the hw_lock routines
302 * are responsible for maintaining preemption state.
303 *
304 * XXX No stats are gathered on a miss; I preserved this
305 * behavior from the original assembly-language code, but
306 * doesn't it make sense to log misses? XXX
307 */
308unsigned int
309usimple_lock_try(
310 usimple_lock_t l)
311{
d7e50217 312#ifndef MACHINE_SIMPLE_LOCK
1c79356b
A
313 pc_t pc;
314 unsigned int success;
315 etap_time_t zero_time;
316
317 OBTAIN_PC(pc, l);
318 USLDBG(usld_lock_try_pre(l, pc));
319 if (success = hw_lock_try(&l->interlock)) {
320 USLDBG(usld_lock_try_post(l, pc));
321 ETAP_TIME_CLEAR(zero_time);
322 ETAPCALL(etap_simplelock_hold(l, pc, zero_time));
323 }
324 return success;
d7e50217
A
325#else
326 return(simple_lock_try(l));
327#endif
1c79356b
A
328}
329
330#if ETAP_LOCK_TRACE
331void
332simple_lock_no_trace(
333 simple_lock_t l)
334{
335 pc_t pc;
336
337 OBTAIN_PC(pc, l);
338 USLDBG(usld_lock_pre(l, pc));
339 while (!hw_lock_try(&l->interlock)) {
340 while (hw_lock_held(&l->interlock)) {
341 /*
342 * Spin watching the lock value in cache,
343 * without consuming external bus cycles.
344 * On most SMP architectures, the atomic
345 * instruction(s) used by hw_lock_try
346 * cost much, much more than an ordinary
347 * memory read.
348 */
349 }
350 }
351 USLDBG(usld_lock_post(l, pc));
352}
353
354void
355simple_unlock_no_trace(
356 simple_lock_t l)
357{
358 pc_t pc;
359
360 OBTAIN_PC(pc, l);
361 USLDBG(usld_unlock(l, pc));
362 hw_lock_unlock(&l->interlock);
363}
364
365int
366simple_lock_try_no_trace(
367 simple_lock_t l)
368{
369 pc_t pc;
370 unsigned int success;
371
372 OBTAIN_PC(pc, l);
373 USLDBG(usld_lock_try_pre(l, pc));
374 if (success = hw_lock_try(&l->interlock)) {
375 USLDBG(usld_lock_try_post(l, pc));
376 }
377 return success;
378}
379#endif /* ETAP_LOCK_TRACE */
380
381
382#if USLOCK_DEBUG
383/*
384 * Verify that the lock is locked and owned by
385 * the current thread.
386 */
387void
388usimple_lock_held(
389 usimple_lock_t l)
390{
391 usld_lock_held(l);
392}
393
394
395/*
396 * Verify that no usimple_locks are held by
397 * this processor. Typically used in a
398 * trap handler when returning to user mode
399 * or in a path known to relinquish the processor.
400 */
401void
402usimple_lock_none_held(void)
403{
404 usld_lock_none_held();
405}
406#endif /* USLOCK_DEBUG */
407
408
409#if USLOCK_DEBUG
410/*
411 * States of a usimple_lock. The default when initializing
412 * a usimple_lock is setting it up for debug checking.
413 */
414#define USLOCK_CHECKED 0x0001 /* lock is being checked */
415#define USLOCK_TAKEN 0x0002 /* lock has been taken */
416#define USLOCK_INIT 0xBAA0 /* lock has been initialized */
417#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED)
418#define USLOCK_CHECKING(l) (uslock_check && \
419 ((l)->debug.state & USLOCK_CHECKED))
420
421/*
422 * Maintain a per-cpu stack of acquired usimple_locks.
423 */
424void usl_stack_push(usimple_lock_t, int);
425void usl_stack_pop(usimple_lock_t, int);
426
427/*
428 * Trace activities of a particularly interesting lock.
429 */
430void usl_trace(usimple_lock_t, int, pc_t, const char *);
431
432
433/*
434 * Initialize the debugging information contained
435 * in a usimple_lock.
436 */
437void
438usld_lock_init(
439 usimple_lock_t l,
440 etap_event_t type)
441{
442 if (l == USIMPLE_LOCK_NULL)
443 panic("lock initialization: null lock pointer");
444 l->lock_type = USLOCK_TAG;
445 l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0;
446 l->debug.lock_cpu = l->debug.unlock_cpu = 0;
447 l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC;
448 l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD;
449 l->debug.duration[0] = l->debug.duration[1] = 0;
450 l->debug.unlock_cpu = l->debug.unlock_cpu = 0;
451 l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC;
452 l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD;
453}
454
455
456/*
457 * These checks apply to all usimple_locks, not just
458 * those with USLOCK_CHECKED turned on.
459 */
460int
461usld_lock_common_checks(
462 usimple_lock_t l,
463 char *caller)
464{
465 if (l == USIMPLE_LOCK_NULL)
466 panic("%s: null lock pointer", caller);
467 if (l->lock_type != USLOCK_TAG)
468 panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l);
469 if (!(l->debug.state & USLOCK_INIT))
470 panic("%s: 0x%x is not an initialized lock",
471 caller, (integer_t) l);
472 return USLOCK_CHECKING(l);
473}
474
475
476/*
477 * Debug checks on a usimple_lock just before attempting
478 * to acquire it.
479 */
480/* ARGSUSED */
481void
482usld_lock_pre(
483 usimple_lock_t l,
484 pc_t pc)
485{
486 char *caller = "usimple_lock";
487
488
489#if 0
490 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
491 l->debug.lock_pc,
492 l->debug.lock_thread,
493 l->debug.state,
494 l->debug.lock_cpu,
495 l->debug.unlock_thread,
496 l->debug.unlock_cpu,
497 l->debug.unlock_pc,
498 caller);
499#endif
500
501 if (!usld_lock_common_checks(l, caller))
502 return;
503
504/*
505 * Note that we have a weird case where we are getting a lock when we are]
506 * in the process of putting the system to sleep. We are running with no
507 * current threads, therefore we can't tell if we are trying to retake a lock
508 * we have or someone on the other processor has it. Therefore we just
509 * ignore this test if the locking thread is 0.
510 */
511
512 if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread &&
513 l->debug.lock_thread == (void *) current_thread()) {
514 printf("%s: lock 0x%x already locked (at 0x%x) by",
515 caller, (integer_t) l, l->debug.lock_pc);
516 printf(" current thread 0x%x (new attempt at pc 0x%x)\n",
517 l->debug.lock_thread, pc);
518 panic(caller);
519 }
520 mp_disable_preemption();
521 usl_trace(l, cpu_number(), pc, caller);
522 mp_enable_preemption();
523}
524
525
526/*
527 * Debug checks on a usimple_lock just after acquiring it.
528 *
529 * Pre-emption has been disabled at this point,
530 * so we are safe in using cpu_number.
531 */
532void
533usld_lock_post(
534 usimple_lock_t l,
535 pc_t pc)
536{
537 register int mycpu;
538 char *caller = "successful usimple_lock";
539
540
541#if 0
542 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
543 l->debug.lock_pc,
544 l->debug.lock_thread,
545 l->debug.state,
546 l->debug.lock_cpu,
547 l->debug.unlock_thread,
548 l->debug.unlock_cpu,
549 l->debug.unlock_pc,
550 caller);
551#endif
552
553 if (!usld_lock_common_checks(l, caller))
554 return;
555
556 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
557 panic("%s: lock 0x%x became uninitialized",
558 caller, (integer_t) l);
559 if ((l->debug.state & USLOCK_TAKEN))
560 panic("%s: lock 0x%x became TAKEN by someone else",
561 caller, (integer_t) l);
562
563 mycpu = cpu_number();
564 l->debug.lock_thread = (void *)current_thread();
565 l->debug.state |= USLOCK_TAKEN;
566 l->debug.lock_pc = pc;
567 l->debug.lock_cpu = mycpu;
568
569 usl_stack_push(l, mycpu);
570 usl_trace(l, mycpu, pc, caller);
571}
572
573
574/*
575 * Debug checks on a usimple_lock just before
576 * releasing it. Note that the caller has not
577 * yet released the hardware lock.
578 *
579 * Preemption is still disabled, so there's
580 * no problem using cpu_number.
581 */
582void
583usld_unlock(
584 usimple_lock_t l,
585 pc_t pc)
586{
587 register int mycpu;
588 char *caller = "usimple_unlock";
589
590
591#if 0
592 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
593 l->debug.lock_pc,
594 l->debug.lock_thread,
595 l->debug.state,
596 l->debug.lock_cpu,
597 l->debug.unlock_thread,
598 l->debug.unlock_cpu,
599 l->debug.unlock_pc,
600 caller);
601#endif
602
603 if (!usld_lock_common_checks(l, caller))
604 return;
605
606 mycpu = cpu_number();
607
608 if (!(l->debug.state & USLOCK_TAKEN))
609 panic("%s: lock 0x%x hasn't been taken",
610 caller, (integer_t) l);
611 if (l->debug.lock_thread != (void *) current_thread())
612 panic("%s: unlocking lock 0x%x, owned by thread 0x%x",
613 caller, (integer_t) l, l->debug.lock_thread);
614 if (l->debug.lock_cpu != mycpu) {
615 printf("%s: unlocking lock 0x%x on cpu 0x%x",
616 caller, (integer_t) l, mycpu);
617 printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu);
618 panic(caller);
619 }
620 usl_trace(l, mycpu, pc, caller);
621 usl_stack_pop(l, mycpu);
622
623 l->debug.unlock_thread = l->debug.lock_thread;
624 l->debug.lock_thread = INVALID_PC;
625 l->debug.state &= ~USLOCK_TAKEN;
626 l->debug.unlock_pc = pc;
627 l->debug.unlock_cpu = mycpu;
628}
629
630
631/*
632 * Debug checks on a usimple_lock just before
633 * attempting to acquire it.
634 *
635 * Preemption isn't guaranteed to be disabled.
636 */
637void
638usld_lock_try_pre(
639 usimple_lock_t l,
640 pc_t pc)
641{
642 char *caller = "usimple_lock_try";
643
644 if (!usld_lock_common_checks(l, caller))
645 return;
646 mp_disable_preemption();
647 usl_trace(l, cpu_number(), pc, caller);
648 mp_enable_preemption();
649}
650
651
652/*
653 * Debug checks on a usimple_lock just after
654 * successfully attempting to acquire it.
655 *
656 * Preemption has been disabled by the
657 * lock acquisition attempt, so it's safe
658 * to use cpu_number.
659 */
660void
661usld_lock_try_post(
662 usimple_lock_t l,
663 pc_t pc)
664{
665 register int mycpu;
666 char *caller = "successful usimple_lock_try";
667
668 if (!usld_lock_common_checks(l, caller))
669 return;
670
671 if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED))
672 panic("%s: lock 0x%x became uninitialized",
673 caller, (integer_t) l);
674 if ((l->debug.state & USLOCK_TAKEN))
675 panic("%s: lock 0x%x became TAKEN by someone else",
676 caller, (integer_t) l);
677
678 mycpu = cpu_number();
679 l->debug.lock_thread = (void *) current_thread();
680 l->debug.state |= USLOCK_TAKEN;
681 l->debug.lock_pc = pc;
682 l->debug.lock_cpu = mycpu;
683
684#if 0
685 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
686 l->debug.lock_pc,
687 l->debug.lock_thread,
688 l->debug.state,
689 l->debug.lock_cpu,
690 l->debug.unlock_thread,
691 l->debug.unlock_cpu,
692 l->debug.unlock_pc,
693 caller);
694#endif
695
696 usl_stack_push(l, mycpu);
697 usl_trace(l, mycpu, pc, caller);
698}
699
700
701/*
702 * Determine whether the lock in question is owned
703 * by the current thread.
704 */
705void
706usld_lock_held(
707 usimple_lock_t l)
708{
709 char *caller = "usimple_lock_held";
710
711
712#if 0
713 printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */
714 l->debug.lock_pc,
715 l->debug.lock_thread,
716 l->debug.state,
717 l->debug.lock_cpu,
718 l->debug.unlock_thread,
719 l->debug.unlock_cpu,
720 l->debug.unlock_pc,
721 caller);
722#endif
723
724 if (!usld_lock_common_checks(l, caller))
725 return;
726
727 if (!(l->debug.state & USLOCK_TAKEN))
728 panic("%s: lock 0x%x hasn't been taken",
729 caller, (integer_t) l);
730 if (l->debug.lock_thread != (void *) current_thread())
731 panic("%s: lock 0x%x is owned by thread 0x%x", caller,
732 (integer_t) l, (integer_t) l->debug.lock_thread);
733
734 /*
735 * The usimple_lock is active, so preemption
736 * is disabled and the current cpu should
737 * match the one recorded at lock acquisition time.
738 */
739 if (l->debug.lock_cpu != cpu_number())
740 panic("%s: current cpu 0x%x isn't acquiring cpu 0x%x",
741 caller, cpu_number(), (integer_t) l->debug.lock_cpu);
742}
743
744
745/*
746 * Per-cpu stack of currently active usimple_locks.
747 * Requires spl protection so that interrupt-level
748 * locks plug-n-play with their thread-context friends.
749 */
750#define USLOCK_STACK_DEPTH 20
751usimple_lock_t uslock_stack[NCPUS][USLOCK_STACK_DEPTH];
752unsigned int uslock_stack_index[NCPUS];
753boolean_t uslock_stack_enabled = FALSE;
754
755
756/*
757 * Record a usimple_lock just acquired on
758 * the current processor.
759 *
760 * Preemption has been disabled by lock
761 * acquisition, so it's safe to use the cpu number
762 * specified by the caller.
763 */
764void
765usl_stack_push(
766 usimple_lock_t l,
767 int mycpu)
768{
769 boolean_t s;
770
771 if (uslock_stack_enabled == FALSE)
772 return;
773
774 DISABLE_INTERRUPTS(s);
775 assert(uslock_stack_index[mycpu] >= 0);
776 assert(uslock_stack_index[mycpu] < USLOCK_STACK_DEPTH);
777 if (uslock_stack_index[mycpu] >= USLOCK_STACK_DEPTH) {
778 printf("usl_stack_push (cpu 0x%x): too many locks (%d)",
779 mycpu, uslock_stack_index[mycpu]);
780 printf(" disabling stacks\n");
781 uslock_stack_enabled = FALSE;
782 ENABLE_INTERRUPTS(s);
783 return;
784 }
785 uslock_stack[mycpu][uslock_stack_index[mycpu]] = l;
786 uslock_stack_index[mycpu]++;
787 ENABLE_INTERRUPTS(s);
788}
789
790
791/*
792 * Eliminate the entry for a usimple_lock
793 * that had been active on the current processor.
794 *
795 * Preemption has been disabled by lock
796 * acquisition, and we haven't yet actually
797 * released the hardware lock associated with
798 * this usimple_lock, so it's safe to use the
799 * cpu number supplied by the caller.
800 */
801void
802usl_stack_pop(
803 usimple_lock_t l,
804 int mycpu)
805{
806 unsigned int i, index;
807 boolean_t s;
808
809 if (uslock_stack_enabled == FALSE)
810 return;
811
812 DISABLE_INTERRUPTS(s);
813 assert(uslock_stack_index[mycpu] > 0);
814 assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH);
815 if (uslock_stack_index[mycpu] == 0) {
816 printf("usl_stack_pop (cpu 0x%x): not enough locks (%d)",
817 mycpu, uslock_stack_index[mycpu]);
818 printf(" disabling stacks\n");
819 uslock_stack_enabled = FALSE;
820 ENABLE_INTERRUPTS(s);
821 return;
822 }
823 index = --uslock_stack_index[mycpu];
824 for (i = 0; i <= index; ++i) {
825 if (uslock_stack[mycpu][i] == l) {
826 if (i != index)
827 uslock_stack[mycpu][i] =
828 uslock_stack[mycpu][index];
829 ENABLE_INTERRUPTS(s);
830 return;
831 }
832 }
833 ENABLE_INTERRUPTS(s);
834 panic("usl_stack_pop: can't find usimple_lock 0x%x", l);
835}
836
837
838/*
839 * Determine whether any usimple_locks are currently held.
840 *
841 * Caller's preemption state is uncertain. If
842 * preemption has been disabled, this check is accurate.
843 * Otherwise, this check is just a guess. We do the best
844 * we can by disabling scheduler interrupts, so at least
845 * the check is accurate w.r.t. whatever cpu we're running
846 * on while in this routine.
847 */
848void
849usld_lock_none_held()
850{
851 register int mycpu;
852 boolean_t s;
853 unsigned int locks_held;
854 char *caller = "usimple_lock_none_held";
855
856 DISABLE_INTERRUPTS(s);
857 mp_disable_preemption();
858 mycpu = cpu_number();
859 locks_held = uslock_stack_index[mycpu];
860 mp_enable_preemption();
861 ENABLE_INTERRUPTS(s);
862 if (locks_held > 0)
863 panic("%s: no locks should be held (0x%x locks held)",
864 caller, (integer_t) locks_held);
865}
866
867
868/*
869 * For very special cases, set traced_lock to point to a
870 * specific lock of interest. The result is a series of
871 * XPRs showing lock operations on that lock. The lock_seq
872 * value is used to show the order of those operations.
873 */
874usimple_lock_t traced_lock;
875unsigned int lock_seq;
876
877void
878usl_trace(
879 usimple_lock_t l,
880 int mycpu,
881 pc_t pc,
882 const char * op_name)
883{
884 if (traced_lock == l) {
885 XPR(XPR_SLOCK,
886 "seq %d, cpu %d, %s @ %x\n",
887 (integer_t) lock_seq, (integer_t) mycpu,
888 (integer_t) op_name, (integer_t) pc, 0);
889 lock_seq++;
890 }
891}
892
893
894
895#if MACH_KDB
896#define printf kdbprintf
897void db_show_all_slocks(void);
898void
899db_show_all_slocks(void)
900{
901 unsigned int i, index;
902 int mycpu = cpu_number();
903 usimple_lock_t l;
904
905 if (uslock_stack_enabled == FALSE) {
906 printf("Lock stack not enabled\n");
907 return;
908 }
909
910#if 0
911 if (!mach_slocks_init)
912 iprintf("WARNING: simple locks stack may not be accurate\n");
913#endif
914 assert(uslock_stack_index[mycpu] >= 0);
915 assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH);
916 index = uslock_stack_index[mycpu];
917 for (i = 0; i < index; ++i) {
918 l = uslock_stack[mycpu][i];
919 iprintf("%d: ", i);
920 db_printsym((vm_offset_t)l, DB_STGY_ANY);
921 if (l->debug.lock_pc != INVALID_PC) {
922 printf(" locked by ");
923 db_printsym((int)l->debug.lock_pc, DB_STGY_PROC);
924 }
925 printf("\n");
926 }
927}
928#endif /* MACH_KDB */
929
930#endif /* USLOCK_DEBUG */
931
932/* #endif USIMPLE_LOCK_CALLS */
933
934/*
935 * Routine: lock_alloc
936 * Function:
937 * Allocate a lock for external users who cannot
938 * hard-code the structure definition into their
939 * objects.
940 * For now just use kalloc, but a zone is probably
941 * warranted.
942 */
943lock_t *
944lock_alloc(
945 boolean_t can_sleep,
946 etap_event_t event,
947 etap_event_t i_event)
948{
949 lock_t *l;
950
951 if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0)
952 lock_init(l, can_sleep, event, i_event);
953 return(l);
954}
955
956/*
957 * Routine: lock_free
958 * Function:
959 * Free a lock allocated for external users.
960 * For now just use kfree, but a zone is probably
961 * warranted.
962 */
963void
964lock_free(
965 lock_t *l)
966{
967 kfree((vm_offset_t)l, sizeof(lock_t));
968}
969
970
971/*
972 * Routine: lock_init
973 * Function:
974 * Initialize a lock; required before use.
975 * Note that clients declare the "struct lock"
976 * variables and then initialize them, rather
977 * than getting a new one from this module.
978 */
979void
980lock_init(
981 lock_t *l,
982 boolean_t can_sleep,
983 etap_event_t event,
984 etap_event_t i_event)
985{
986 (void) memset((void *) l, 0, sizeof(lock_t));
987
988#if ETAP_LOCK_TRACE
989 etap_event_table_assign(&l->u.event_table_chain, event);
990 l->u.s.start_list = SD_ENTRY_NULL;
991#endif /* ETAP_LOCK_TRACE */
992
993 simple_lock_init(&l->interlock, i_event);
994 l->want_write = FALSE;
995 l->want_upgrade = FALSE;
996 l->read_count = 0;
997 l->can_sleep = can_sleep;
998
999#if ETAP_LOCK_ACCUMULATE
1000 l->cbuff_write = etap_cbuff_reserve(lock_event_table(l));
1001 if (l->cbuff_write != CBUFF_ENTRY_NULL) {
1002 l->cbuff_write->event = event;
1003 l->cbuff_write->instance = (unsigned long) l;
1004 l->cbuff_write->kind = WRITE_LOCK;
1005 }
1006 l->cbuff_read = CBUFF_ENTRY_NULL;
1007#endif /* ETAP_LOCK_ACCUMULATE */
1008}
1009
1010
1011/*
1012 * Sleep locks. These use the same data structure and algorithm
1013 * as the spin locks, but the process sleeps while it is waiting
1014 * for the lock. These work on uniprocessor systems.
1015 */
1016
1017#define DECREMENTER_TIMEOUT 1000000
1018
1019void
1020lock_write(
1021 register lock_t * l)
1022{
1023 register int i;
1024 start_data_node_t entry = {0};
1025 boolean_t lock_miss = FALSE;
1026 unsigned short dynamic = 0;
1027 unsigned short trace = 0;
1028 etap_time_t total_time;
1029 etap_time_t stop_wait_time;
1030 pc_t pc;
1031#if MACH_LDEBUG
1032 int decrementer;
1033#endif /* MACH_LDEBUG */
1034
1035
1036 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1037 ETAP_CREATE_ENTRY(entry, trace);
1038 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1039
1040 simple_lock(&l->interlock);
1041
1042 /*
1043 * Link the new start_list entry
1044 */
1045 ETAP_LINK_ENTRY(l, entry, trace);
1046
1047#if MACH_LDEBUG
1048 decrementer = DECREMENTER_TIMEOUT;
1049#endif /* MACH_LDEBUG */
1050
1051 /*
1052 * Try to acquire the want_write bit.
1053 */
1054 while (l->want_write) {
1055 if (!lock_miss) {
1056 ETAP_CONTENTION_TIMESTAMP(entry, trace);
1057 lock_miss = TRUE;
1058 }
1059
1060 i = lock_wait_time[l->can_sleep ? 1 : 0];
1061 if (i != 0) {
1062 simple_unlock(&l->interlock);
1063#if MACH_LDEBUG
1064 if (!--decrementer)
1065 Debugger("timeout - want_write");
1066#endif /* MACH_LDEBUG */
1067 while (--i != 0 && l->want_write)
1068 continue;
1069 simple_lock(&l->interlock);
1070 }
1071
1072 if (l->can_sleep && l->want_write) {
1073 l->waiting = TRUE;
1074 ETAP_SET_REASON(current_thread(),
1075 BLOCKED_ON_COMPLEX_LOCK);
1076 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1077 simple_lock_addr(l->interlock),
1078 THREAD_UNINT);
1079 /* interlock relocked */
1c79356b
A
1080 }
1081 }
1082 l->want_write = TRUE;
1083
1084 /* Wait for readers (and upgrades) to finish */
1085
1086#if MACH_LDEBUG
1087 decrementer = DECREMENTER_TIMEOUT;
1088#endif /* MACH_LDEBUG */
1089 while ((l->read_count != 0) || l->want_upgrade) {
1090 if (!lock_miss) {
1091 ETAP_CONTENTION_TIMESTAMP(entry,trace);
1092 lock_miss = TRUE;
1093 }
1094
1095 i = lock_wait_time[l->can_sleep ? 1 : 0];
1096 if (i != 0) {
1097 simple_unlock(&l->interlock);
1098#if MACH_LDEBUG
1099 if (!--decrementer)
1100 Debugger("timeout - wait for readers");
1101#endif /* MACH_LDEBUG */
1102 while (--i != 0 && (l->read_count != 0 ||
1103 l->want_upgrade))
1104 continue;
1105 simple_lock(&l->interlock);
1106 }
1107
1108 if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
1109 l->waiting = TRUE;
1110 ETAP_SET_REASON(current_thread(),
1111 BLOCKED_ON_COMPLEX_LOCK);
1112 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1113 simple_lock_addr(l->interlock),
1114 THREAD_UNINT);
1115 /* interlock relocked */
1c79356b
A
1116 }
1117 }
1118
1119 /*
1120 * do not collect wait data if either the lock
1121 * was free or no wait traces are enabled.
1122 */
1123
1124 if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
1125 ETAP_TIMESTAMP(stop_wait_time);
1126 ETAP_TOTAL_TIME(total_time,
1127 stop_wait_time,
1128 entry->start_wait_time);
1129 CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
1130 MON_DATA_COLLECT(l,
1131 entry,
1132 total_time,
1133 WRITE_LOCK,
1134 MON_CONTENTION,
1135 trace);
1136 }
1137
1138 simple_unlock(&l->interlock);
1139
1140 /*
1141 * Set start hold time if some type of hold tracing is enabled.
1142 *
1143 * Note: if the stop_wait_time was already stamped, use
1144 * it as the start_hold_time instead of doing an
1145 * expensive bus access.
1146 *
1147 */
1148
1149 if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
1150 ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace);
1151 else
1152 ETAP_DURATION_TIMESTAMP(entry, trace);
1153
1154}
1155
1156void
1157lock_done(
1158 register lock_t * l)
1159{
1160 boolean_t do_wakeup = FALSE;
1161 start_data_node_t entry;
1162 unsigned short dynamic = 0;
1163 unsigned short trace = 0;
1164 etap_time_t stop_hold_time;
1165 etap_time_t total_time;
1166 unsigned long lock_kind;
1167 pc_t pc;
1168
1169
1170 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1171
1172 simple_lock(&l->interlock);
1173
1174 if (l->read_count != 0) {
1175 l->read_count--;
1176 lock_kind = READ_LOCK;
1177 }
1178 else
1179 if (l->want_upgrade) {
1180 l->want_upgrade = FALSE;
1181 lock_kind = WRITE_LOCK;
1182 }
1183 else {
1184 l->want_write = FALSE;
1185 lock_kind = WRITE_LOCK;
1186 }
1187
1188 /*
1189 * There is no reason to wakeup a waiting thread
1190 * if the read-count is non-zero. Consider:
1191 * we must be dropping a read lock
1192 * threads are waiting only if one wants a write lock
1193 * if there are still readers, they can't proceed
1194 */
1195
1196 if (l->waiting && (l->read_count == 0)) {
1197 l->waiting = FALSE;
1198 do_wakeup = TRUE;
1199 }
1200 /*
1201 * Collect hold data if hold tracing is
1202 * enabled.
1203 */
1204
1205 /*
1206 * NOTE: All complex locks whose tracing was on when the
1207 * lock was acquired will have an entry in the start_data
1208 * list.
1209 */
1210
1211 ETAP_UNLINK_ENTRY(l,entry);
1212 if (ETAP_DURATION_ENABLED(trace) && entry != SD_ENTRY_NULL) {
1213 ETAP_TIMESTAMP (stop_hold_time);
1214 ETAP_TOTAL_TIME (total_time,
1215 stop_hold_time,
1216 entry->start_hold_time);
1217
1218 if (lock_kind & WRITE_LOCK)
1219 CUM_HOLD_ACCUMULATE (l->cbuff_write,
1220 total_time,
1221 dynamic,
1222 trace);
1223 else {
1224 CUM_READ_ENTRY_RESERVE(l,l->cbuff_read,trace);
1225 CUM_HOLD_ACCUMULATE (l->cbuff_read,
1226 total_time,
1227 dynamic,
1228 trace);
1229 }
1230 MON_ASSIGN_PC(entry->end_pc,pc,trace);
1231 MON_DATA_COLLECT(l,entry,
1232 total_time,
1233 lock_kind,
1234 MON_DURATION,
1235 trace);
1236 }
1237
1238 simple_unlock(&l->interlock);
1239
1240 ETAP_DESTROY_ENTRY(entry);
1241
1242 if (do_wakeup)
1243 thread_wakeup((event_t) l);
1244}
1245
1246void
1247lock_read(
1248 register lock_t * l)
1249{
1250 register int i;
1251 start_data_node_t entry = {0};
1252 boolean_t lock_miss = FALSE;
1253 unsigned short dynamic = 0;
1254 unsigned short trace = 0;
1255 etap_time_t total_time;
1256 etap_time_t stop_wait_time;
1257 pc_t pc;
1258#if MACH_LDEBUG
1259 int decrementer;
1260#endif /* MACH_LDEBUG */
1261
1262 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1263 ETAP_CREATE_ENTRY(entry, trace);
1264 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1265
1266 simple_lock(&l->interlock);
1267
1268 /*
1269 * Link the new start_list entry
1270 */
1271 ETAP_LINK_ENTRY(l,entry,trace);
1272
1273#if MACH_LDEBUG
1274 decrementer = DECREMENTER_TIMEOUT;
1275#endif /* MACH_LDEBUG */
1276 while (l->want_write || l->want_upgrade) {
1277 if (!lock_miss) {
1278 ETAP_CONTENTION_TIMESTAMP(entry, trace);
1279 lock_miss = TRUE;
1280 }
1281
1282 i = lock_wait_time[l->can_sleep ? 1 : 0];
1283
1284 if (i != 0) {
1285 simple_unlock(&l->interlock);
1286#if MACH_LDEBUG
1287 if (!--decrementer)
1288 Debugger("timeout - wait no writers");
1289#endif /* MACH_LDEBUG */
1290 while (--i != 0 && (l->want_write || l->want_upgrade))
1291 continue;
1292 simple_lock(&l->interlock);
1293 }
1294
1295 if (l->can_sleep && (l->want_write || l->want_upgrade)) {
1296 l->waiting = TRUE;
1297 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1298 simple_lock_addr(l->interlock),
1299 THREAD_UNINT);
1300 /* interlock relocked */
1c79356b
A
1301 }
1302 }
1303
1304 l->read_count++;
1305
1306 /*
1307 * Do not collect wait data if the lock was free
1308 * or if no wait traces are enabled.
1309 */
1310
1311 if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
1312 ETAP_TIMESTAMP(stop_wait_time);
1313 ETAP_TOTAL_TIME(total_time,
1314 stop_wait_time,
1315 entry->start_wait_time);
1316 CUM_READ_ENTRY_RESERVE(l, l->cbuff_read, trace);
1317 CUM_WAIT_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace);
1318 MON_DATA_COLLECT(l,
1319 entry,
1320 total_time,
1321 READ_LOCK,
1322 MON_CONTENTION,
1323 trace);
1324 }
1325 simple_unlock(&l->interlock);
1326
1327 /*
1328 * Set start hold time if some type of hold tracing is enabled.
1329 *
1330 * Note: if the stop_wait_time was already stamped, use
1331 * it instead of doing an expensive bus access.
1332 *
1333 */
1334
1335 if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
1336 ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace);
1337 else
1338 ETAP_DURATION_TIMESTAMP(entry,trace);
1339}
1340
1341
1342/*
1343 * Routine: lock_read_to_write
1344 * Function:
1345 * Improves a read-only lock to one with
1346 * write permission. If another reader has
1347 * already requested an upgrade to a write lock,
1348 * no lock is held upon return.
1349 *
1350 * Returns TRUE if the upgrade *failed*.
1351 */
1352
1353boolean_t
1354lock_read_to_write(
1355 register lock_t * l)
1356{
1357 register int i;
1358 boolean_t do_wakeup = FALSE;
1359 start_data_node_t entry = {0};
1360 boolean_t lock_miss = FALSE;
1361 unsigned short dynamic = 0;
1362 unsigned short trace = 0;
1363 etap_time_t total_time;
1364 etap_time_t stop_time;
1365 pc_t pc;
1366#if MACH_LDEBUG
1367 int decrementer;
1368#endif /* MACH_LDEBUG */
1369
1370
1371 ETAP_STAMP(lock_event_table(l), trace, dynamic);
1372
1373 simple_lock(&l->interlock);
1374
1375 l->read_count--;
1376
1377 /*
1378 * Since the read lock is lost whether the write lock
1379 * is acquired or not, read hold data is collected here.
1380 * This, of course, is assuming some type of hold
1381 * tracing is enabled.
1382 *
1383 * Note: trace is set to zero if the entry does not exist.
1384 */
1385
1386 ETAP_FIND_ENTRY(l, entry, trace);
1387
1388 if (ETAP_DURATION_ENABLED(trace)) {
1389 ETAP_TIMESTAMP(stop_time);
1390 ETAP_TOTAL_TIME(total_time, stop_time, entry->start_hold_time);
1391 CUM_HOLD_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace);
1392 MON_ASSIGN_PC(entry->end_pc, pc, trace);
1393 MON_DATA_COLLECT(l,
1394 entry,
1395 total_time,
1396 READ_LOCK,
1397 MON_DURATION,
1398 trace);
1399 }
1400
1401 if (l->want_upgrade) {
1402 /*
1403 * Someone else has requested upgrade.
1404 * Since we've released a read lock, wake
1405 * him up.
1406 */
1407 if (l->waiting && (l->read_count == 0)) {
1408 l->waiting = FALSE;
1409 do_wakeup = TRUE;
1410 }
1411
1412 ETAP_UNLINK_ENTRY(l, entry);
1413 simple_unlock(&l->interlock);
1414 ETAP_DESTROY_ENTRY(entry);
1415
1416 if (do_wakeup)
1417 thread_wakeup((event_t) l);
1418 return (TRUE);
1419 }
1420
1421 l->want_upgrade = TRUE;
1422
1423 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1424
1425#if MACH_LDEBUG
1426 decrementer = DECREMENTER_TIMEOUT;
1427#endif /* MACH_LDEBUG */
1428 while (l->read_count != 0) {
1429 if (!lock_miss) {
1430 ETAP_CONTENTION_TIMESTAMP(entry, trace);
1431 lock_miss = TRUE;
1432 }
1433
1434 i = lock_wait_time[l->can_sleep ? 1 : 0];
1435
1436 if (i != 0) {
1437 simple_unlock(&l->interlock);
1438#if MACH_LDEBUG
1439 if (!--decrementer)
1440 Debugger("timeout - read_count");
1441#endif /* MACH_LDEBUG */
1442 while (--i != 0 && l->read_count != 0)
1443 continue;
1444 simple_lock(&l->interlock);
1445 }
1446
1447 if (l->can_sleep && l->read_count != 0) {
1448 l->waiting = TRUE;
1449 thread_sleep_simple_lock((event_t) l,
9bccf70c
A
1450 simple_lock_addr(l->interlock),
1451 THREAD_UNINT);
1452 /* interlock relocked */
1c79356b
A
1453 }
1454 }
1455
1456 /*
1457 * do not collect wait data if the lock was free
1458 * or if no wait traces are enabled.
1459 */
1460
1461 if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) {
1462 ETAP_TIMESTAMP (stop_time);
1463 ETAP_TOTAL_TIME(total_time, stop_time, entry->start_wait_time);
1464 CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
1465 MON_DATA_COLLECT(l,
1466 entry,
1467 total_time,
1468 WRITE_LOCK,
1469 MON_CONTENTION,
1470 trace);
1471 }
1472
1473 simple_unlock(&l->interlock);
1474
1475 /*
1476 * Set start hold time if some type of hold tracing is enabled
1477 *
1478 * Note: if the stop_time was already stamped, use
1479 * it as the new start_hold_time instead of doing
1480 * an expensive VME access.
1481 *
1482 */
1483
1484 if (lock_miss && ETAP_CONTENTION_ENABLED(trace))
1485 ETAP_COPY_START_HOLD_TIME(entry, stop_time, trace);
1486 else
1487 ETAP_DURATION_TIMESTAMP(entry, trace);
1488
1489 return (FALSE);
1490}
1491
1492void
1493lock_write_to_read(
1494 register lock_t * l)
1495{
1496 boolean_t do_wakeup = FALSE;
1497 start_data_node_t entry = {0};
1498 unsigned short dynamic = 0;
1499 unsigned short trace = 0;
1500 etap_time_t stop_hold_time;
1501 etap_time_t total_time;
1502 pc_t pc;
1503
1504 ETAP_STAMP(lock_event_table(l), trace,dynamic);
1505
1506 simple_lock(&l->interlock);
1507
1508 l->read_count++;
1509 if (l->want_upgrade)
1510 l->want_upgrade = FALSE;
1511 else
1512 l->want_write = FALSE;
1513
1514 if (l->waiting) {
1515 l->waiting = FALSE;
1516 do_wakeup = TRUE;
1517 }
1518
1519 /*
1520 * Since we are switching from a write lock to a read lock,
1521 * the write lock data is stored and the read lock data
1522 * collection begins.
1523 *
1524 * Note: trace is set to zero if the entry does not exist.
1525 */
1526
1527 ETAP_FIND_ENTRY(l, entry, trace);
1528
1529 if (ETAP_DURATION_ENABLED(trace)) {
1530 ETAP_TIMESTAMP (stop_hold_time);
1531 ETAP_TOTAL_TIME(total_time, stop_hold_time, entry->start_hold_time);
1532 CUM_HOLD_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace);
1533 MON_ASSIGN_PC(entry->end_pc, pc, trace);
1534 MON_DATA_COLLECT(l,
1535 entry,
1536 total_time,
1537 WRITE_LOCK,
1538 MON_DURATION,
1539 trace);
1540 }
1541
1542 simple_unlock(&l->interlock);
1543
1544 /*
1545 * Set start hold time if some type of hold tracing is enabled
1546 *
1547 * Note: if the stop_hold_time was already stamped, use
1548 * it as the new start_hold_time instead of doing
1549 * an expensive bus access.
1550 *
1551 */
1552
1553 if (ETAP_DURATION_ENABLED(trace))
1554 ETAP_COPY_START_HOLD_TIME(entry, stop_hold_time, trace);
1555 else
1556 ETAP_DURATION_TIMESTAMP(entry, trace);
1557
1558 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1559
1560 if (do_wakeup)
1561 thread_wakeup((event_t) l);
1562}
1563
1564
1565#if 0 /* Unused */
1566/*
1567 * Routine: lock_try_write
1568 * Function:
1569 * Tries to get a write lock.
1570 *
1571 * Returns FALSE if the lock is not held on return.
1572 */
1573
1574boolean_t
1575lock_try_write(
1576 register lock_t * l)
1577{
1578 start_data_node_t entry = {0};
1579 unsigned short trace = 0;
1580 pc_t pc;
1581
1582 ETAP_STAMP(lock_event_table(l), trace, trace);
1583 ETAP_CREATE_ENTRY(entry, trace);
1584
1585 simple_lock(&l->interlock);
1586
1587 if (l->want_write || l->want_upgrade || l->read_count) {
1588 /*
1589 * Can't get lock.
1590 */
1591 simple_unlock(&l->interlock);
1592 ETAP_DESTROY_ENTRY(entry);
1593 return(FALSE);
1594 }
1595
1596 /*
1597 * Have lock.
1598 */
1599
1600 l->want_write = TRUE;
1601
1602 ETAP_LINK_ENTRY(l, entry, trace);
1603
1604 simple_unlock(&l->interlock);
1605
1606 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1607 ETAP_DURATION_TIMESTAMP(entry, trace);
1608
1609 return(TRUE);
1610}
1611
1612/*
1613 * Routine: lock_try_read
1614 * Function:
1615 * Tries to get a read lock.
1616 *
1617 * Returns FALSE if the lock is not held on return.
1618 */
1619
1620boolean_t
1621lock_try_read(
1622 register lock_t * l)
1623{
1624 start_data_node_t entry = {0};
1625 unsigned short trace = 0;
1626 pc_t pc;
1627
1628 ETAP_STAMP(lock_event_table(l), trace, trace);
1629 ETAP_CREATE_ENTRY(entry, trace);
1630
1631 simple_lock(&l->interlock);
1632
1633 if (l->want_write || l->want_upgrade) {
1634 simple_unlock(&l->interlock);
1635 ETAP_DESTROY_ENTRY(entry);
1636 return(FALSE);
1637 }
1638
1639 l->read_count++;
1640
1641 ETAP_LINK_ENTRY(l, entry, trace);
1642
1643 simple_unlock(&l->interlock);
1644
1645 MON_ASSIGN_PC(entry->start_pc, pc, trace);
1646 ETAP_DURATION_TIMESTAMP(entry, trace);
1647
1648 return(TRUE);
1649}
1650#endif /* Unused */
1651
1652#if MACH_KDB
1653
1654void db_show_one_lock(lock_t *);
1655
1656
1657void
1658db_show_one_lock(
1659 lock_t *lock)
1660{
1661 db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ",
1662 lock->read_count,
1663 lock->want_upgrade ? "" : "!",
1664 lock->want_write ? "" : "!");
1665 db_printf("%swaiting, %scan_sleep\n",
1666 lock->waiting ? "" : "!", lock->can_sleep ? "" : "!");
1667 db_printf("Interlock:\n");
1668 db_show_one_simple_lock((db_expr_t)simple_lock_addr(lock->interlock),
1669 TRUE, (db_expr_t)0, (char *)0);
1670}
1671#endif /* MACH_KDB */
1672
1673/*
1674 * The C portion of the mutex package. These routines are only invoked
1675 * if the optimized assembler routines can't do the work.
1676 */
1677
1678/*
1679 * Routine: lock_alloc
1680 * Function:
1681 * Allocate a mutex for external users who cannot
1682 * hard-code the structure definition into their
1683 * objects.
1684 * For now just use kalloc, but a zone is probably
1685 * warranted.
1686 */
1687mutex_t *
1688mutex_alloc(
1689 etap_event_t event)
1690{
1691 mutex_t *m;
1692
1693 if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0)
1694 mutex_init(m, event);
1695 return(m);
1696}
1697
1698/*
1699 * Routine: mutex_free
1700 * Function:
1701 * Free a mutex allocated for external users.
1702 * For now just use kfree, but a zone is probably
1703 * warranted.
1704 */
1705void
1706mutex_free(
1707 mutex_t *m)
1708{
1709 kfree((vm_offset_t)m, sizeof(mutex_t));
1710}
1711
1c79356b 1712/*
9bccf70c
A
1713 * mutex_lock_wait
1714 *
1715 * Invoked in order to wait on contention.
1716 *
1717 * Called with the interlock locked and
1718 * returns it unlocked.
1c79356b 1719 */
1c79356b
A
1720void
1721mutex_lock_wait (
9bccf70c
A
1722 mutex_t *mutex,
1723 thread_act_t holder)
1c79356b 1724{
9bccf70c
A
1725 thread_t thread, self = current_thread();
1726#if !defined(i386)
1727 integer_t priority;
1728 spl_t s = splsched();
1729
1730 priority = self->last_processor->current_pri;
1731 if (priority < self->priority)
1732 priority = self->priority;
1733 if (priority > MINPRI_KERNEL)
1734 priority = MINPRI_KERNEL;
1735 else
1736 if (priority < BASEPRI_DEFAULT)
1737 priority = BASEPRI_DEFAULT;
1738
1739 thread = holder->thread;
1740 assert(thread->top_act == holder); /* XXX */
1741 thread_lock(thread);
1742 if (mutex->promoted_pri == 0)
1743 thread->promotions++;
1744 if (thread->priority < MINPRI_KERNEL) {
1745 thread->sched_mode |= TH_MODE_PROMOTED;
1746 if ( mutex->promoted_pri < priority &&
1747 thread->sched_pri < priority ) {
1748 KERNEL_DEBUG_CONSTANT(
1749 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
1750 thread->sched_pri, priority, (int)thread, (int)mutex, 0);
1751
1752 set_sched_pri(thread, priority);
1753 }
1754 }
1755 thread_unlock(thread);
1756 splx(s);
1757
1758 if (mutex->promoted_pri < priority)
1759 mutex->promoted_pri = priority;
1760#endif
1761
1762 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
1763 self->pending_promoter[self->pending_promoter_index] = mutex;
1764 mutex->waiters++;
1765 }
1766 else
1767 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
1768 self->pending_promoter[++self->pending_promoter_index] = mutex;
1769 mutex->waiters++;
1770 }
1771
1772 assert_wait(mutex, THREAD_UNINT);
1773 interlock_unlock(&mutex->interlock);
1774
1775 thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
1776}
1777
1778/*
9bccf70c
A
1779 * mutex_lock_acquire
1780 *
1781 * Invoked on acquiring the mutex when there is
1782 * contention.
1783 *
1784 * Returns the current number of waiters.
1785 *
1786 * Called with the interlock locked.
1c79356b 1787 */
9bccf70c
A
1788int
1789mutex_lock_acquire(
1790 mutex_t *mutex)
1791{
1792 thread_t thread = current_thread();
1793
1794 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
1795 thread->pending_promoter[thread->pending_promoter_index] = NULL;
1796 if (thread->pending_promoter_index > 0)
1797 thread->pending_promoter_index--;
1798 mutex->waiters--;
1799 }
1800
1801#if !defined(i386)
1802 if (mutex->waiters > 0) {
1803 integer_t priority = mutex->promoted_pri;
1804 spl_t s = splsched();
1805
1806 thread_lock(thread);
1807 thread->promotions++;
1808 if (thread->priority < MINPRI_KERNEL) {
1809 thread->sched_mode |= TH_MODE_PROMOTED;
1810 if (thread->sched_pri < priority) {
1811 KERNEL_DEBUG_CONSTANT(
1812 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
1813 thread->sched_pri, priority, 0, (int)mutex, 0);
1814
1815 set_sched_pri(thread, priority);
1816 }
1817 }
1818 thread_unlock(thread);
1819 splx(s);
1820 }
1821 else
1822 mutex->promoted_pri = 0;
1823#endif
1824
1825 return (mutex->waiters);
1826}
1c79356b 1827
9bccf70c
A
1828/*
1829 * mutex_unlock_wakeup
1830 *
1831 * Invoked on unlock when there is contention.
1832 *
1833 * Called with the interlock locked.
1834 */
1c79356b
A
1835void
1836mutex_unlock_wakeup (
9bccf70c
A
1837 mutex_t *mutex,
1838 thread_act_t holder)
1c79356b 1839{
9bccf70c
A
1840#if !defined(i386)
1841 thread_t thread = current_thread();
1842
1843 if (thread->top_act != holder)
1844 panic("mutex_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
1845
1846 if (thread->promotions > 0) {
1847 spl_t s = splsched();
1848
1849 thread_lock(thread);
1850 if ( --thread->promotions == 0 &&
1851 (thread->sched_mode & TH_MODE_PROMOTED) ) {
1852 thread->sched_mode &= ~TH_MODE_PROMOTED;
1853 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
1854 KERNEL_DEBUG_CONSTANT(
1855 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
1856 thread->sched_pri, DEPRESSPRI, 0, (int)mutex, 0);
1857
1858 set_sched_pri(thread, DEPRESSPRI);
1859 }
1860 else {
1861 if (thread->priority < thread->sched_pri) {
1862 KERNEL_DEBUG_CONSTANT(
1863 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
1864 DBG_FUNC_NONE,
1865 thread->sched_pri, thread->priority,
1866 0, (int)mutex, 0);
1867 }
1868
1869 compute_priority(thread, FALSE);
1870 }
1871 }
1872 thread_unlock(thread);
1873 splx(s);
1874 }
1875#endif
1876
1877 assert(mutex->waiters > 0);
1878 thread_wakeup_one(mutex);
1c79356b
A
1879}
1880
1881/*
1882 * mutex_pause: Called by former callers of simple_lock_pause().
1883 */
1884
1885void
1886mutex_pause(void)
1887{
9bccf70c
A
1888 wait_result_t wait_result;
1889
1890 wait_result = assert_wait_timeout( 1, THREAD_UNINT);
1891 assert(wait_result == THREAD_WAITING);
1c79356b 1892
1c79356b 1893 ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK);
9bccf70c
A
1894
1895 wait_result = thread_block(THREAD_CONTINUE_NULL);
1896 assert(wait_result == THREAD_TIMED_OUT);
1c79356b
A
1897}
1898
1899#if MACH_KDB
1900/*
1901 * Routines to print out simple_locks and mutexes in a nicely-formatted
1902 * fashion.
1903 */
1904
1905char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER";
1906char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER";
1907
1908void
1909db_show_one_simple_lock (
1910 db_expr_t addr,
1911 boolean_t have_addr,
1912 db_expr_t count,
1913 char * modif)
1914{
1915 simple_lock_t saddr = (simple_lock_t)addr;
1916
1917 if (saddr == (simple_lock_t)0 || !have_addr) {
1918 db_error ("No simple_lock\n");
1919 }
1920#if USLOCK_DEBUG
1921 else if (saddr->lock_type != USLOCK_TAG)
1922 db_error ("Not a simple_lock\n");
1923#endif /* USLOCK_DEBUG */
1924
1925 db_printf ("%s\n", simple_lock_labels);
1926 db_print_simple_lock (saddr);
1927}
1928
1929void
1930db_print_simple_lock (
1931 simple_lock_t addr)
1932{
1933
1934 db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock));
1935#if USLOCK_DEBUG
1936 db_printf (" %08x", addr->debug.lock_thread);
1937 db_printf (" %08x ", addr->debug.duration[1]);
1938 db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY);
1939#endif /* USLOCK_DEBUG */
1940 db_printf ("\n");
1941}
1942
1943void
1944db_show_one_mutex (
1945 db_expr_t addr,
1946 boolean_t have_addr,
1947 db_expr_t count,
1948 char * modif)
1949{
1950 mutex_t * maddr = (mutex_t *)addr;
1951
1952 if (maddr == (mutex_t *)0 || !have_addr)
1953 db_error ("No mutex\n");
1954#if MACH_LDEBUG
1955 else if (maddr->type != MUTEX_TAG)
1956 db_error ("Not a mutex\n");
1957#endif /* MACH_LDEBUG */
1958
1959 db_printf ("%s\n", mutex_labels);
1960 db_print_mutex (maddr);
1961}
1962
1963void
1964db_print_mutex (
1965 mutex_t * addr)
1966{
1967 db_printf ("%08x %6d %7d",
1968 addr, *hw_lock_addr(addr->locked), addr->waiters);
1969#if MACH_LDEBUG
1970 db_printf (" %08x ", addr->thread);
1971 db_printsym (addr->pc, DB_STGY_ANY);
1972#endif /* MACH_LDEBUG */
1973 db_printf ("\n");
1974}
1975#endif /* MACH_KDB */
1976
1977#if MACH_LDEBUG
1978extern void meter_simple_lock (
1979 simple_lock_t l);
1980extern void meter_simple_unlock (
1981 simple_lock_t l);
1982extern void cyctm05_stamp (
1983 unsigned long * start);
1984extern void cyctm05_diff (
1985 unsigned long * start,
1986 unsigned long * end,
1987 unsigned long * diff);
1988
1989#if 0
1990simple_lock_data_t loser;
1991#endif
1992
1993void
1994meter_simple_lock(
1995 simple_lock_t lp)
1996{
1997#if 0
1998 cyctm05_stamp (lp->duration);
1999#endif
2000}
2001
2002int long_simple_lock_crash;
2003int long_simple_lock_time = 0x600;
2004/*
2005 * This is pretty gawd-awful. XXX
2006 */
2007decl_simple_lock_data(extern,kd_tty)
2008
2009void
2010meter_simple_unlock(
2011 simple_lock_t lp)
2012{
2013#if 0
2014 unsigned long stime[2], etime[2], delta[2];
2015
2016 if (lp == &kd_tty) /* XXX */
2017 return; /* XXX */
2018
2019 stime[0] = lp->duration[0];
2020 stime[1] = lp->duration[1];
2021
2022 cyctm05_stamp (etime);
2023
2024 if (etime[1] < stime[1]) /* XXX */
2025 return; /* XXX */
2026
2027 cyctm05_diff (stime, etime, delta);
2028
2029 if (delta[1] >= 0x10000) /* XXX */
2030 return; /* XXX */
2031
2032 lp->duration[0] = delta[0];
2033 lp->duration[1] = delta[1];
2034
2035 if (loser.duration[1] < lp->duration[1])
2036 loser = *lp;
2037
2038 assert (!long_simple_lock_crash || delta[1] < long_simple_lock_time);
2039#endif
2040}
2041#endif /* MACH_LDEBUG */
2042
2043
2044#if ETAP_LOCK_TRACE
2045
2046/*
2047 * ==============================================================
2048 * ETAP hook when initializing a usimple_lock. May be invoked
2049 * from the portable lock package or from an optimized machine-
2050 * dependent implementation.
2051 * ==============================================================
2052 */
2053
2054void
2055etap_simplelock_init (
2056 simple_lock_t l,
2057 etap_event_t event)
2058{
2059 ETAP_CLEAR_TRACE_DATA(l);
2060 etap_event_table_assign(&l->u.event_table_chain, event);
2061
2062#if ETAP_LOCK_ACCUMULATE
2063 /* reserve an entry in the cumulative buffer */
2064 l->cbuff_entry = etap_cbuff_reserve(lock_event_table(l));
2065 /* initialize the entry if one was returned */
2066 if (l->cbuff_entry != CBUFF_ENTRY_NULL) {
2067 l->cbuff_entry->event = event;
2068 l->cbuff_entry->instance = (unsigned long) l;
2069 l->cbuff_entry->kind = SPIN_LOCK;
2070 }
2071#endif /* ETAP_LOCK_ACCUMULATE */
2072}
2073
2074
2075void
2076etap_simplelock_unlock(
2077 simple_lock_t l)
2078{
2079 unsigned short dynamic = 0;
2080 unsigned short trace = 0;
2081 etap_time_t total_time;
2082 etap_time_t stop_hold_time;
2083 pc_t pc;
2084
2085 OBTAIN_PC(pc, l);
2086 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2087
2088 /*
2089 * Calculate & collect hold time data only if
2090 * the hold tracing was enabled throughout the
2091 * whole operation. This prevents collection of
2092 * bogus data caused by mid-operation trace changes.
2093 *
2094 */
2095
2096 if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) {
2097 ETAP_TIMESTAMP (stop_hold_time);
2098 ETAP_TOTAL_TIME(total_time, stop_hold_time,
2099 l->u.s.start_hold_time);
2100 CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2101 MON_ASSIGN_PC(l->end_pc, pc, trace);
2102 MON_DATA_COLLECT(l,
2103 l,
2104 total_time,
2105 SPIN_LOCK,
2106 MON_DURATION,
2107 trace);
2108 }
2109 ETAP_CLEAR_TRACE_DATA(l);
2110}
2111
2112/* ========================================================================
2113 * Since the the simple_lock() routine is machine dependant, it must always
2114 * be coded in assembly. The two hook routines below are used to collect
2115 * lock_stat data.
2116 * ========================================================================
2117 */
2118
2119/*
2120 * ROUTINE: etap_simplelock_miss()
2121 *
2122 * FUNCTION: This spin lock routine is called upon the first
2123 * spin (miss) of the lock.
2124 *
2125 * A timestamp is taken at the beginning of the wait period,
2126 * if wait tracing is enabled.
2127 *
2128 *
2129 * PARAMETERS:
2130 * - lock address.
2131 * - timestamp address.
2132 *
2133 * RETURNS: Wait timestamp value. The timestamp value is later used
2134 * by etap_simplelock_hold().
2135 *
2136 * NOTES: This routine is NOT ALWAYS called. The lock may be free
2137 * (never spinning). For this reason the pc is collected in
2138 * etap_simplelock_hold().
2139 *
2140 */
2141etap_time_t
2142etap_simplelock_miss (
2143 simple_lock_t l)
2144
2145{
2146 unsigned short trace = 0;
2147 unsigned short dynamic = 0;
2148 etap_time_t start_miss_time;
2149
2150 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2151
2152 if (trace & ETAP_CONTENTION)
2153 ETAP_TIMESTAMP(start_miss_time);
2154
2155 return(start_miss_time);
2156}
2157
2158/*
2159 * ROUTINE: etap_simplelock_hold()
2160 *
2161 * FUNCTION: This spin lock routine is ALWAYS called once the lock
2162 * is acquired. Here, the contention time is calculated and
2163 * the start hold time is stamped.
2164 *
2165 * PARAMETERS:
2166 * - lock address.
2167 * - PC of the calling function.
2168 * - start wait timestamp.
2169 *
2170 */
2171
2172void
2173etap_simplelock_hold (
2174 simple_lock_t l,
2175 pc_t pc,
2176 etap_time_t start_hold_time)
2177{
2178 unsigned short dynamic = 0;
2179 unsigned short trace = 0;
2180 etap_time_t total_time;
2181 etap_time_t stop_hold_time;
2182
2183 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2184
2185 MON_ASSIGN_PC(l->start_pc, pc, trace);
2186
2187 /* do not collect wait data if lock was free */
2188 if (ETAP_TIME_IS_ZERO(start_hold_time) && (trace & ETAP_CONTENTION)) {
2189 ETAP_TIMESTAMP(stop_hold_time);
2190 ETAP_TOTAL_TIME(total_time,
2191 stop_hold_time,
2192 start_hold_time);
2193 CUM_WAIT_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2194 MON_DATA_COLLECT(l,
2195 l,
2196 total_time,
2197 SPIN_LOCK,
2198 MON_CONTENTION,
2199 trace);
2200 ETAP_COPY_START_HOLD_TIME(&l->u.s, stop_hold_time, trace);
2201 }
2202 else
2203 ETAP_DURATION_TIMESTAMP(&l->u.s, trace);
2204}
2205
2206void
2207etap_mutex_init (
2208 mutex_t *l,
2209 etap_event_t event)
2210{
2211 ETAP_CLEAR_TRACE_DATA(l);
2212 etap_event_table_assign(&l->u.event_table_chain, event);
2213
2214#if ETAP_LOCK_ACCUMULATE
2215 /* reserve an entry in the cumulative buffer */
2216 l->cbuff_entry = etap_cbuff_reserve(lock_event_table(l));
2217 /* initialize the entry if one was returned */
2218 if (l->cbuff_entry != CBUFF_ENTRY_NULL) {
2219 l->cbuff_entry->event = event;
2220 l->cbuff_entry->instance = (unsigned long) l;
2221 l->cbuff_entry->kind = MUTEX_LOCK;
2222 }
2223#endif /* ETAP_LOCK_ACCUMULATE */
2224}
2225
2226etap_time_t
2227etap_mutex_miss (
2228 mutex_t *l)
2229{
2230 unsigned short trace = 0;
2231 unsigned short dynamic = 0;
2232 etap_time_t start_miss_time;
2233
2234 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2235
2236 if (trace & ETAP_CONTENTION)
2237 ETAP_TIMESTAMP(start_miss_time);
2238 else
2239 ETAP_TIME_CLEAR(start_miss_time);
2240
2241 return(start_miss_time);
2242}
2243
2244void
2245etap_mutex_hold (
2246 mutex_t *l,
2247 pc_t pc,
2248 etap_time_t start_hold_time)
2249{
2250 unsigned short dynamic = 0;
2251 unsigned short trace = 0;
2252 etap_time_t total_time;
2253 etap_time_t stop_hold_time;
2254
2255 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2256
2257 MON_ASSIGN_PC(l->start_pc, pc, trace);
2258
2259 /* do not collect wait data if lock was free */
2260 if (!ETAP_TIME_IS_ZERO(start_hold_time) && (trace & ETAP_CONTENTION)) {
2261 ETAP_TIMESTAMP(stop_hold_time);
2262 ETAP_TOTAL_TIME(total_time,
2263 stop_hold_time,
2264 start_hold_time);
2265 CUM_WAIT_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2266 MON_DATA_COLLECT(l,
2267 l,
2268 total_time,
2269 MUTEX_LOCK,
2270 MON_CONTENTION,
2271 trace);
2272 ETAP_COPY_START_HOLD_TIME(&l->u.s, stop_hold_time, trace);
2273 }
2274 else
2275 ETAP_DURATION_TIMESTAMP(&l->u.s, trace);
2276}
2277
2278void
2279etap_mutex_unlock(
2280 mutex_t *l)
2281{
2282 unsigned short dynamic = 0;
2283 unsigned short trace = 0;
2284 etap_time_t total_time;
2285 etap_time_t stop_hold_time;
2286 pc_t pc;
2287
2288 OBTAIN_PC(pc, l);
2289 ETAP_STAMP(lock_event_table(l), trace, dynamic);
2290
2291 /*
2292 * Calculate & collect hold time data only if
2293 * the hold tracing was enabled throughout the
2294 * whole operation. This prevents collection of
2295 * bogus data caused by mid-operation trace changes.
2296 *
2297 */
2298
2299 if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) {
2300 ETAP_TIMESTAMP(stop_hold_time);
2301 ETAP_TOTAL_TIME(total_time, stop_hold_time,
2302 l->u.s.start_hold_time);
2303 CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace);
2304 MON_ASSIGN_PC(l->end_pc, pc, trace);
2305 MON_DATA_COLLECT(l,
2306 l,
2307 total_time,
2308 MUTEX_LOCK,
2309 MON_DURATION,
2310 trace);
2311 }
2312 ETAP_CLEAR_TRACE_DATA(l);
2313}
2314
2315#endif /* ETAP_LOCK_TRACE */