2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1989 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
34 #include <platforms.h>
35 #include <mach_ldebug.h>
37 #include <kern/etap_options.h>
42 * When performance isn't the only concern, it's
43 * nice to build stack frames...
45 #define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)
47 #if BUILD_STACK_FRAMES
50 #define L_ARG0 8(%ebp)
51 #define L_ARG1 12(%ebp)
53 #define SWT_HI -4(%ebp)
54 #define SWT_LO -8(%ebp)
55 #define MISSED -12(%ebp)
57 #else /* BUILD_STACK_FRAMES */
64 #define L_ARG0 4(%esp)
65 #define L_ARG1 8(%esp)
67 #endif /* BUILD_STACK_FRAMES */
71 #define M_LOCKED 1(%edx)
72 #define M_WAITERS 2(%edx)
73 #define M_PROMOTED_PRI 4(%edx)
75 #define M_TYPE 6(%edx)
77 #define M_THREAD 14(%edx)
78 #endif /* MACH_LDEBUG */
80 #include <i386/AT386/mp/mp.h>
82 #define CX(addr,reg) addr(,reg,4)
84 #define CPU_NUMBER(reg)
85 #define CX(addr,reg) addr
86 #endif /* (NCPUS > 1) */
90 * Routines for general lock debugging.
92 #define S_TYPE 4(%edx)
94 #define S_THREAD 12(%edx)
95 #define S_DURATIONH 16(%edx)
96 #define S_DURATIONL 20(%edx)
99 * Checks for expected lock types and calls "panic" on
100 * mismatch. Detects calls to Mutex functions with
101 * type simplelock and vice versa.
103 #define CHECK_MUTEX_TYPE() \
104 cmpl $ MUTEX_TAG,M_TYPE ; \
110 2: String "not a mutex!" ; \
114 #define CHECK_SIMPLE_LOCK_TYPE() \
115 cmpl $ SIMPLE_LOCK_TAG,S_TYPE ; \
121 2: String "not a simple lock!" ; \
126 * If one or more simplelocks are currently held by a thread,
127 * an attempt to acquire a mutex will cause this check to fail
128 * (since a mutex lock may context switch, holding a simplelock
129 * is not a good thing).
131 #if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/
132 #define CHECK_PREEMPTION_LEVEL() \
133 movl $ CPD_PREEMPTION_LEVEL,%eax ; \
134 cmpl $0,%gs:(%eax) ; \
140 2: String "preemption_level != 0!" ; \
144 #define CHECK_PREEMPTION_LEVEL()
147 #define CHECK_NO_SIMPLELOCKS() \
148 movl $ CPD_SIMPLE_LOCK_COUNT,%eax ; \
149 cmpl $0,%gs:(%eax) ; \
155 2: String "simple_locks_held!" ; \
160 * Verifies return to the correct thread in "unlock" situations.
162 #define CHECK_THREAD(thd) \
163 movl $ CPD_ACTIVE_THREAD,%eax ; \
164 movl %gs:(%eax),%ecx ; \
173 2: String "wrong thread!" ; \
177 #define CHECK_MYLOCK(thd) \
178 movl $ CPD_ACTIVE_THREAD,%eax ; \
179 movl %gs:(%eax),%ecx ; \
188 2: String "mylock attempt!" ; \
192 #define METER_SIMPLE_LOCK_LOCK(reg) \
194 call EXT(meter_simple_lock) ; \
197 #define METER_SIMPLE_LOCK_UNLOCK(reg) \
199 call EXT(meter_simple_unlock) ; \
202 #else /* MACH_LDEBUG */
203 #define CHECK_MUTEX_TYPE()
204 #define CHECK_SIMPLE_LOCK_TYPE
205 #define CHECK_THREAD(thd)
206 #define CHECK_PREEMPTION_LEVEL()
207 #define CHECK_NO_SIMPLELOCKS()
208 #define CHECK_MYLOCK(thd)
209 #define METER_SIMPLE_LOCK_LOCK(reg)
210 #define METER_SIMPLE_LOCK_UNLOCK(reg)
211 #endif /* MACH_LDEBUG */
215 * void hw_lock_init(hw_lock_t)
217 * Initialize a hardware lock.
221 movl L_ARG0,%edx /* fetch lock pointer */
223 movb %al,0(%edx) /* clear the lock */
228 * void hw_lock_lock(hw_lock_t)
229 * unsigned int hw_lock_to(hw_lock_t, unsigned int)
231 * Acquire lock, spinning until it becomes available.
232 * XXX: For now, we don't actually implement the timeout.
233 * MACH_RT: also return with preemption disabled.
235 ENTRY2(hw_lock_lock,hw_lock_to)
237 movl L_ARG0,%edx /* fetch lock pointer */
239 1: DISABLE_PREEMPTION(%eax)
241 xchgb 0(%edx),%cl /* try to acquire the HW lock */
242 testb %cl,%cl /* success? */
244 movl $1,%eax /* In case this was a timeout call */
245 EMARF /* if yes, then nothing left to do */
248 3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */
251 2: testb %cl,0(%edx) /* spin checking lock value in cache */
252 jne 2b /* non-zero means locked, keep spinning */
253 jmp 1b /* zero means unlocked, try to grab it */
256 * void hw_lock_unlock(hw_lock_t)
258 * Unconditionally release lock.
259 * MACH_RT: release preemption level.
261 ENTRY(hw_lock_unlock)
263 movl L_ARG0,%edx /* fetch lock pointer */
265 xchgb 0(%edx),%al /* clear the lock... a mov instruction */
266 /* ...might be cheaper and less paranoid */
267 ENABLE_PREEMPTION(%eax)
272 * unsigned int hw_lock_try(hw_lock_t)
273 * MACH_RT: returns with preemption disabled on success.
277 movl L_ARG0,%edx /* fetch lock pointer */
279 DISABLE_PREEMPTION(%eax)
281 xchgb 0(%edx),%cl /* try to acquire the HW lock */
282 testb %cl,%cl /* success? */
283 jne 1f /* if yes, let the caller know */
285 movl $1,%eax /* success */
289 1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */
290 xorl %eax,%eax /* ...and return failure */
295 * unsigned int hw_lock_held(hw_lock_t)
296 * MACH_RT: doesn't change preemption state.
297 * N.B. Racy, of course.
301 movl L_ARG0,%edx /* fetch lock pointer */
304 testb %cl,0(%edx) /* check lock value */
305 jne 1f /* non-zero means locked */
306 xorl %eax,%eax /* tell caller: lock wasn't locked */
310 1: movl $1,%eax /* tell caller: lock was locked */
319 ENTRY(_usimple_lock_init)
321 movl L_ARG0,%edx /* fetch lock pointer */
323 movb %al,USL_INTERLOCK(%edx) /* unlock the HW lock */
329 movl L_ARG0,%edx /* fetch lock pointer */
331 CHECK_SIMPLE_LOCK_TYPE()
333 DISABLE_PREEMPTION(%eax)
337 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
338 testb %cl,%cl /* did we succeed? */
342 CHECK_MYLOCK(S_THREAD)
345 #else /* MACH_LDEBUG */
346 jne sl_get_hw /* no, try again */
347 #endif /* MACH_LDEBUG */
352 movl $ CPD_ACTIVE_THREAD,%eax
355 incl CX(EXT(simple_lock_count),%eax)
357 METER_SIMPLE_LOCK_LOCK(%edx)
363 call EXT(lock_stack_push)
366 #endif /* NCPUS == 1 */
367 #endif /* MACH_LDEBUG */
372 ENTRY(_simple_lock_try)
374 movl L_ARG0,%edx /* fetch lock pointer */
376 CHECK_SIMPLE_LOCK_TYPE()
378 DISABLE_PREEMPTION(%eax)
381 xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */
382 testb %cl,%cl /* did we succeed? */
383 jne 1f /* no, return failure */
388 movl $ CPD_ACTIVE_THREAD,%eax
391 incl CX(EXT(simple_lock_count),%eax)
393 METER_SIMPLE_LOCK_LOCK(%edx)
399 call EXT(lock_stack_push)
402 #endif /* NCPUS == 1 */
403 #endif /* MACH_LDEBUG */
405 movl $1,%eax /* return success */
411 ENABLE_PREEMPTION(%eax)
413 xorl %eax,%eax /* and return failure */
418 ENTRY(_simple_unlock)
420 movl L_ARG0,%edx /* fetch lock pointer */
422 CHECK_SIMPLE_LOCK_TYPE()
423 CHECK_THREAD(S_THREAD)
427 movl %eax,S_THREAD /* disown thread */
428 MP_DISABLE_PREEMPTION(%eax)
430 decl CX(EXT(simple_lock_count),%eax)
431 MP_ENABLE_PREEMPTION(%eax)
433 METER_SIMPLE_LOCK_UNLOCK(%edx)
439 call EXT(lock_stack_pop)
442 #endif /* NCPUS == 1 */
443 #endif /* MACH_LDEBUG */
446 xchgb USL_INTERLOCK(%edx),%cl /* unlock the HW lock */
448 ENABLE_PREEMPTION(%eax)
458 movl L_ARG0,%edx /* fetch lock pointer */
460 movb %al,M_ILK /* clear interlock */
461 movb %al,M_LOCKED /* clear locked flag */
462 movw %ax,M_WAITERS /* init waiter count */
463 movw %ax,M_PROMOTED_PRI
466 movl $ MUTEX_TAG,M_TYPE /* set lock type */
467 movl %eax,M_PC /* init caller pc */
468 movl %eax,M_THREAD /* and owning thread */
471 movl L_ARG1,%ecx /* fetch event type */
472 pushl %ecx /* push event type */
473 pushl %edx /* push mutex address */
474 call EXT(etap_mutex_init) /* init ETAP data */
476 #endif /* ETAP_LOCK_TRACE */
481 ENTRY2(mutex_lock,_mutex_lock)
485 subl $12,%esp /* make room for locals */
486 movl $0,SWT_HI /* set wait time to zero (HI) */
487 movl $0,SWT_LO /* set wait time to zero (LO) */
488 movl $0,MISSED /* clear local miss marker */
489 #endif /* ETAP_LOCK_TRACE */
491 movl L_ARG0,%edx /* fetch lock pointer */
494 CHECK_NO_SIMPLELOCKS()
495 CHECK_PREEMPTION_LEVEL()
498 DISABLE_PREEMPTION(%eax)
503 testb %cl,%cl /* did we succeed? */
504 jne ml_get_hw /* no, try again */
507 xchgb %cl,M_LOCKED /* try to set locked flag */
508 testb %cl,%cl /* is the mutex locked? */
509 jne ml_fail /* yes, we lose */
512 call EXT(mutex_lock_acquire)
519 movl $ CPD_ACTIVE_THREAD,%eax
524 incl TH_MUTEX_COUNT(%ecx)
531 ENABLE_PREEMPTION(%eax)
534 movl L_PC,%eax /* fetch pc */
535 pushl SWT_LO /* push wait time (low) */
536 pushl SWT_HI /* push wait time (high) */
537 pushl %eax /* push pc */
538 pushl %edx /* push mutex address */
539 call EXT(etap_mutex_hold) /* collect hold timestamp */
540 addl $16+12,%esp /* clean up stack, adjusting for locals */
541 #endif /* ETAP_LOCK_TRACE */
548 cmp $0,MISSED /* did we already take a wait timestamp? */
549 jne ml_block /* yup. carry-on */
550 pushl %edx /* push mutex address */
551 call EXT(etap_mutex_miss) /* get wait timestamp */
552 movl %eax,SWT_HI /* set wait time (high word) */
553 movl %edx,SWT_LO /* set wait time (low word) */
554 popl %edx /* clean up stack */
555 movl $1,MISSED /* mark wait timestamp as taken */
556 #endif /* ETAP_LOCK_TRACE */
559 CHECK_MYLOCK(M_THREAD)
561 pushl %eax /* no promotion here yet */
562 pushl %edx /* push mutex address */
563 call EXT(mutex_lock_wait) /* wait for the lock */
565 movl L_ARG0,%edx /* refetch lock pointer */
566 jmp ml_retry /* and try again */
568 ENTRY2(mutex_try,_mutex_try)
572 subl $8,%esp /* make room for locals */
573 movl $0,SWT_HI /* set wait time to zero (HI) */
574 movl $0,SWT_LO /* set wait time to zero (LO) */
575 #endif /* ETAP_LOCK_TRACE */
577 movl L_ARG0,%edx /* fetch lock pointer */
580 CHECK_NO_SIMPLELOCKS()
582 DISABLE_PREEMPTION(%eax)
596 call EXT(mutex_lock_acquire)
603 movl $ CPD_ACTIVE_THREAD,%ecx
608 incl TH_MUTEX_COUNT(%ecx)
615 ENABLE_PREEMPTION(%eax)
618 movl L_PC,%eax /* fetch pc */
619 pushl SWT_LO /* push wait time (low) */
620 pushl SWT_HI /* push wait time (high) */
621 pushl %eax /* push pc */
622 pushl %edx /* push mutex address */
623 call EXT(etap_mutex_hold) /* get start hold timestamp */
624 addl $16,%esp /* clean up stack, adjusting for locals */
625 #endif /* ETAP_LOCK_TRACE */
629 #if MACH_LDEBUG || ETAP_LOCK_TRACE
631 addl $8,%esp /* pop stack claimed on entry */
642 movl $ CPD_ACTIVE_THREAD,%ecx
647 incl TH_MUTEX_COUNT(%ecx)
654 ENABLE_PREEMPTION(%eax)
657 movl L_PC,%eax /* fetch pc */
658 pushl SWT_LO /* push wait time (low) */
659 pushl SWT_HI /* push wait time (high) */
660 pushl %eax /* push pc */
661 pushl %edx /* push mutex address */
662 call EXT(etap_mutex_hold) /* get start hold timestamp */
663 addl $16,%esp /* clean up stack, adjusting for locals */
664 #endif /* ETAP_LOCK_TRACE */
668 #if MACH_LDEBUG || ETAP_LOCK_TRACE
670 addl $8,%esp /* pop stack claimed on entry */
679 movl L_ARG0,%edx /* fetch lock pointer */
682 pushl %edx /* push mutex address */
683 call EXT(etap_mutex_unlock) /* collect ETAP data */
684 popl %edx /* restore mutex address */
685 #endif /* ETAP_LOCK_TRACE */
688 CHECK_THREAD(M_THREAD)
690 DISABLE_PREEMPTION(%eax)
695 testb %cl,%cl /* did we succeed? */
696 jne mu_get_hw /* no, try again */
698 cmpw $0,M_WAITERS /* are there any waiters? */
699 jne mu_wakeup /* yes, more work to do */
704 movl %eax,M_THREAD /* disown thread */
705 movl $ CPD_ACTIVE_THREAD,%eax
709 decl TH_MUTEX_COUNT(%ecx)
714 xchgb %cl,M_LOCKED /* unlock the mutex */
719 ENABLE_PREEMPTION(%eax)
726 pushl %eax /* no promotion here yet */
727 pushl %edx /* push mutex address */
728 call EXT(mutex_unlock_wakeup)/* yes, wake a thread */
730 movl L_ARG0,%edx /* refetch lock pointer */
733 ENTRY(interlock_unlock)
740 ENABLE_PREEMPTION(%eax)
746 ENTRY(_disable_preemption)
748 _DISABLE_PREEMPTION(%eax)
752 ENTRY(_enable_preemption)
755 movl $ CPD_PREEMPTION_LEVEL,%eax
763 2: String "_enable_preemption: preemption_level(%d) < 0!"
766 #endif /* MACH_ASSERT */
767 _ENABLE_PREEMPTION(%eax)
771 ENTRY(_enable_preemption_no_check)
774 movl $ CPD_PREEMPTION_LEVEL,%eax
781 2: String "_enable_preemption_no_check: preemption_level <= 0!"
784 #endif /* MACH_ASSERT */
785 _ENABLE_PREEMPTION_NO_CHECK(%eax)
790 ENTRY(_mp_disable_preemption)
791 #if MACH_RT && NCPUS > 1
792 _DISABLE_PREEMPTION(%eax)
793 #endif /* MACH_RT && NCPUS > 1*/
796 ENTRY(_mp_enable_preemption)
797 #if MACH_RT && NCPUS > 1
799 movl $ CPD_PREEMPTION_LEVEL,%eax
807 2: String "_mp_enable_preemption: preemption_level (%d) <= 0!"
810 #endif /* MACH_ASSERT */
811 _ENABLE_PREEMPTION(%eax)
812 #endif /* MACH_RT && NCPUS > 1 */
815 ENTRY(_mp_enable_preemption_no_check)
816 #if MACH_RT && NCPUS > 1
818 movl $ CPD_PREEMPTION_LEVEL,%eax
825 2: String "_mp_enable_preemption_no_check: preemption_level <= 0!"
828 #endif /* MACH_ASSERT */
829 _ENABLE_PREEMPTION_NO_CHECK(%eax)
830 #endif /* MACH_RT && NCPUS > 1 */
863 ret /* %eax better not be null ! */